1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2748446bbSMel Gorman /* 3748446bbSMel Gorman * linux/mm/compaction.c 4748446bbSMel Gorman * 5748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 6748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 7748446bbSMel Gorman * lifting 8748446bbSMel Gorman * 9748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 10748446bbSMel Gorman */ 11698b1b30SVlastimil Babka #include <linux/cpu.h> 12748446bbSMel Gorman #include <linux/swap.h> 13748446bbSMel Gorman #include <linux/migrate.h> 14748446bbSMel Gorman #include <linux/compaction.h> 15748446bbSMel Gorman #include <linux/mm_inline.h> 16174cd4b1SIngo Molnar #include <linux/sched/signal.h> 17748446bbSMel Gorman #include <linux/backing-dev.h> 1876ab0f53SMel Gorman #include <linux/sysctl.h> 19ed4a6d7fSMel Gorman #include <linux/sysfs.h> 20194159fbSMinchan Kim #include <linux/page-isolation.h> 21b8c73fc2SAndrey Ryabinin #include <linux/kasan.h> 22698b1b30SVlastimil Babka #include <linux/kthread.h> 23698b1b30SVlastimil Babka #include <linux/freezer.h> 2483358eceSJoonsoo Kim #include <linux/page_owner.h> 25eb414681SJohannes Weiner #include <linux/psi.h> 26748446bbSMel Gorman #include "internal.h" 27748446bbSMel Gorman 28010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 2931ca72faSCharan Teja Kalla /* 3031ca72faSCharan Teja Kalla * Fragmentation score check interval for proactive compaction purposes. 3131ca72faSCharan Teja Kalla */ 3231ca72faSCharan Teja Kalla #define HPAGE_FRAG_CHECK_INTERVAL_MSEC (500) 3331ca72faSCharan Teja Kalla 34010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 35010fc29aSMinchan Kim { 36010fc29aSMinchan Kim count_vm_event(item); 37010fc29aSMinchan Kim } 38010fc29aSMinchan Kim 39010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 40010fc29aSMinchan Kim { 41010fc29aSMinchan Kim count_vm_events(item, delta); 42010fc29aSMinchan Kim } 43010fc29aSMinchan Kim #else 44010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 45010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 46010fc29aSMinchan Kim #endif 47010fc29aSMinchan Kim 48ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 49ff9543fdSMichal Nazarewicz 50b7aba698SMel Gorman #define CREATE_TRACE_POINTS 51b7aba698SMel Gorman #include <trace/events/compaction.h> 52b7aba698SMel Gorman 5306b6640aSVlastimil Babka #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) 5406b6640aSVlastimil Babka #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) 5506b6640aSVlastimil Babka 56facdaa91SNitin Gupta /* 57facdaa91SNitin Gupta * Page order with-respect-to which proactive compaction 58facdaa91SNitin Gupta * calculates external fragmentation, which is used as 59facdaa91SNitin Gupta * the "fragmentation score" of a node/zone. 60facdaa91SNitin Gupta */ 61facdaa91SNitin Gupta #if defined CONFIG_TRANSPARENT_HUGEPAGE 62facdaa91SNitin Gupta #define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER 6325788738SNitin Gupta #elif defined CONFIG_HUGETLBFS 64facdaa91SNitin Gupta #define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER 65facdaa91SNitin Gupta #else 66facdaa91SNitin Gupta #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) 67facdaa91SNitin Gupta #endif 68facdaa91SNitin Gupta 69748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 70748446bbSMel Gorman { 71748446bbSMel Gorman struct page *page, *next; 726bace090SVlastimil Babka unsigned long high_pfn = 0; 73748446bbSMel Gorman 74748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 756bace090SVlastimil Babka unsigned long pfn = page_to_pfn(page); 76748446bbSMel Gorman list_del(&page->lru); 77748446bbSMel Gorman __free_page(page); 786bace090SVlastimil Babka if (pfn > high_pfn) 796bace090SVlastimil Babka high_pfn = pfn; 80748446bbSMel Gorman } 81748446bbSMel Gorman 826bace090SVlastimil Babka return high_pfn; 83748446bbSMel Gorman } 84748446bbSMel Gorman 854469ab98SMel Gorman static void split_map_pages(struct list_head *list) 86ff9543fdSMichal Nazarewicz { 8766c64223SJoonsoo Kim unsigned int i, order, nr_pages; 8866c64223SJoonsoo Kim struct page *page, *next; 8966c64223SJoonsoo Kim LIST_HEAD(tmp_list); 90ff9543fdSMichal Nazarewicz 9166c64223SJoonsoo Kim list_for_each_entry_safe(page, next, list, lru) { 9266c64223SJoonsoo Kim list_del(&page->lru); 9366c64223SJoonsoo Kim 9466c64223SJoonsoo Kim order = page_private(page); 9566c64223SJoonsoo Kim nr_pages = 1 << order; 9666c64223SJoonsoo Kim 9746f24fd8SJoonsoo Kim post_alloc_hook(page, order, __GFP_MOVABLE); 9866c64223SJoonsoo Kim if (order) 9966c64223SJoonsoo Kim split_page(page, order); 10066c64223SJoonsoo Kim 10166c64223SJoonsoo Kim for (i = 0; i < nr_pages; i++) { 10266c64223SJoonsoo Kim list_add(&page->lru, &tmp_list); 10366c64223SJoonsoo Kim page++; 104ff9543fdSMichal Nazarewicz } 105ff9543fdSMichal Nazarewicz } 106ff9543fdSMichal Nazarewicz 10766c64223SJoonsoo Kim list_splice(&tmp_list, list); 10866c64223SJoonsoo Kim } 10966c64223SJoonsoo Kim 110bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 11168f2736aSMatthew Wilcox (Oracle) bool PageMovable(struct page *page) 112bda807d4SMinchan Kim { 11368f2736aSMatthew Wilcox (Oracle) const struct movable_operations *mops; 114bda807d4SMinchan Kim 115bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 116bda807d4SMinchan Kim if (!__PageMovable(page)) 11768f2736aSMatthew Wilcox (Oracle) return false; 118bda807d4SMinchan Kim 11968f2736aSMatthew Wilcox (Oracle) mops = page_movable_ops(page); 12068f2736aSMatthew Wilcox (Oracle) if (mops) 12168f2736aSMatthew Wilcox (Oracle) return true; 122bda807d4SMinchan Kim 12368f2736aSMatthew Wilcox (Oracle) return false; 124bda807d4SMinchan Kim } 125bda807d4SMinchan Kim 12668f2736aSMatthew Wilcox (Oracle) void __SetPageMovable(struct page *page, const struct movable_operations *mops) 127bda807d4SMinchan Kim { 128bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 12968f2736aSMatthew Wilcox (Oracle) VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page); 13068f2736aSMatthew Wilcox (Oracle) page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE); 131bda807d4SMinchan Kim } 132bda807d4SMinchan Kim EXPORT_SYMBOL(__SetPageMovable); 133bda807d4SMinchan Kim 134bda807d4SMinchan Kim void __ClearPageMovable(struct page *page) 135bda807d4SMinchan Kim { 136bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageMovable(page), page); 137bda807d4SMinchan Kim /* 13868f2736aSMatthew Wilcox (Oracle) * This page still has the type of a movable page, but it's 13968f2736aSMatthew Wilcox (Oracle) * actually not movable any more. 140bda807d4SMinchan Kim */ 14168f2736aSMatthew Wilcox (Oracle) page->mapping = (void *)PAGE_MAPPING_MOVABLE; 142bda807d4SMinchan Kim } 143bda807d4SMinchan Kim EXPORT_SYMBOL(__ClearPageMovable); 144bda807d4SMinchan Kim 14524e2716fSJoonsoo Kim /* Do not skip compaction more than 64 times */ 14624e2716fSJoonsoo Kim #define COMPACT_MAX_DEFER_SHIFT 6 14724e2716fSJoonsoo Kim 14824e2716fSJoonsoo Kim /* 14924e2716fSJoonsoo Kim * Compaction is deferred when compaction fails to result in a page 150860b3272SAlex Shi * allocation success. 1 << compact_defer_shift, compactions are skipped up 15124e2716fSJoonsoo Kim * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 15224e2716fSJoonsoo Kim */ 1532271b016SHui Su static void defer_compaction(struct zone *zone, int order) 15424e2716fSJoonsoo Kim { 15524e2716fSJoonsoo Kim zone->compact_considered = 0; 15624e2716fSJoonsoo Kim zone->compact_defer_shift++; 15724e2716fSJoonsoo Kim 15824e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 15924e2716fSJoonsoo Kim zone->compact_order_failed = order; 16024e2716fSJoonsoo Kim 16124e2716fSJoonsoo Kim if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 16224e2716fSJoonsoo Kim zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 16324e2716fSJoonsoo Kim 16424e2716fSJoonsoo Kim trace_mm_compaction_defer_compaction(zone, order); 16524e2716fSJoonsoo Kim } 16624e2716fSJoonsoo Kim 16724e2716fSJoonsoo Kim /* Returns true if compaction should be skipped this time */ 1682271b016SHui Su static bool compaction_deferred(struct zone *zone, int order) 16924e2716fSJoonsoo Kim { 17024e2716fSJoonsoo Kim unsigned long defer_limit = 1UL << zone->compact_defer_shift; 17124e2716fSJoonsoo Kim 17224e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 17324e2716fSJoonsoo Kim return false; 17424e2716fSJoonsoo Kim 17524e2716fSJoonsoo Kim /* Avoid possible overflow */ 17662b35fe0SMateusz Nosek if (++zone->compact_considered >= defer_limit) { 17724e2716fSJoonsoo Kim zone->compact_considered = defer_limit; 17824e2716fSJoonsoo Kim return false; 17962b35fe0SMateusz Nosek } 18024e2716fSJoonsoo Kim 18124e2716fSJoonsoo Kim trace_mm_compaction_deferred(zone, order); 18224e2716fSJoonsoo Kim 18324e2716fSJoonsoo Kim return true; 18424e2716fSJoonsoo Kim } 18524e2716fSJoonsoo Kim 18624e2716fSJoonsoo Kim /* 18724e2716fSJoonsoo Kim * Update defer tracking counters after successful compaction of given order, 18824e2716fSJoonsoo Kim * which means an allocation either succeeded (alloc_success == true) or is 18924e2716fSJoonsoo Kim * expected to succeed. 19024e2716fSJoonsoo Kim */ 19124e2716fSJoonsoo Kim void compaction_defer_reset(struct zone *zone, int order, 19224e2716fSJoonsoo Kim bool alloc_success) 19324e2716fSJoonsoo Kim { 19424e2716fSJoonsoo Kim if (alloc_success) { 19524e2716fSJoonsoo Kim zone->compact_considered = 0; 19624e2716fSJoonsoo Kim zone->compact_defer_shift = 0; 19724e2716fSJoonsoo Kim } 19824e2716fSJoonsoo Kim if (order >= zone->compact_order_failed) 19924e2716fSJoonsoo Kim zone->compact_order_failed = order + 1; 20024e2716fSJoonsoo Kim 20124e2716fSJoonsoo Kim trace_mm_compaction_defer_reset(zone, order); 20224e2716fSJoonsoo Kim } 20324e2716fSJoonsoo Kim 20424e2716fSJoonsoo Kim /* Returns true if restarting compaction after many failures */ 2052271b016SHui Su static bool compaction_restarting(struct zone *zone, int order) 20624e2716fSJoonsoo Kim { 20724e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 20824e2716fSJoonsoo Kim return false; 20924e2716fSJoonsoo Kim 21024e2716fSJoonsoo Kim return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 21124e2716fSJoonsoo Kim zone->compact_considered >= 1UL << zone->compact_defer_shift; 21224e2716fSJoonsoo Kim } 21324e2716fSJoonsoo Kim 214bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 215bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 216bb13ffebSMel Gorman struct page *page) 217bb13ffebSMel Gorman { 218bb13ffebSMel Gorman if (cc->ignore_skip_hint) 219bb13ffebSMel Gorman return true; 220bb13ffebSMel Gorman 221bb13ffebSMel Gorman return !get_pageblock_skip(page); 222bb13ffebSMel Gorman } 223bb13ffebSMel Gorman 22402333641SVlastimil Babka static void reset_cached_positions(struct zone *zone) 22502333641SVlastimil Babka { 22602333641SVlastimil Babka zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 22702333641SVlastimil Babka zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 228623446e4SJoonsoo Kim zone->compact_cached_free_pfn = 22906b6640aSVlastimil Babka pageblock_start_pfn(zone_end_pfn(zone) - 1); 23002333641SVlastimil Babka } 23102333641SVlastimil Babka 2329721fd82SBaolin Wang #ifdef CONFIG_SPARSEMEM 2339721fd82SBaolin Wang /* 2349721fd82SBaolin Wang * If the PFN falls into an offline section, return the start PFN of the 2359721fd82SBaolin Wang * next online section. If the PFN falls into an online section or if 2369721fd82SBaolin Wang * there is no next online section, return 0. 2379721fd82SBaolin Wang */ 2389721fd82SBaolin Wang static unsigned long skip_offline_sections(unsigned long start_pfn) 2399721fd82SBaolin Wang { 2409721fd82SBaolin Wang unsigned long start_nr = pfn_to_section_nr(start_pfn); 2419721fd82SBaolin Wang 2429721fd82SBaolin Wang if (online_section_nr(start_nr)) 2439721fd82SBaolin Wang return 0; 2449721fd82SBaolin Wang 2459721fd82SBaolin Wang while (++start_nr <= __highest_present_section_nr) { 2469721fd82SBaolin Wang if (online_section_nr(start_nr)) 2479721fd82SBaolin Wang return section_nr_to_pfn(start_nr); 2489721fd82SBaolin Wang } 2499721fd82SBaolin Wang 2509721fd82SBaolin Wang return 0; 2519721fd82SBaolin Wang } 252e6e0c767SBaolin Wang 253e6e0c767SBaolin Wang /* 254e6e0c767SBaolin Wang * If the PFN falls into an offline section, return the end PFN of the 255e6e0c767SBaolin Wang * next online section in reverse. If the PFN falls into an online section 256e6e0c767SBaolin Wang * or if there is no next online section in reverse, return 0. 257e6e0c767SBaolin Wang */ 258e6e0c767SBaolin Wang static unsigned long skip_offline_sections_reverse(unsigned long start_pfn) 259e6e0c767SBaolin Wang { 260e6e0c767SBaolin Wang unsigned long start_nr = pfn_to_section_nr(start_pfn); 261e6e0c767SBaolin Wang 262e6e0c767SBaolin Wang if (!start_nr || online_section_nr(start_nr)) 263e6e0c767SBaolin Wang return 0; 264e6e0c767SBaolin Wang 265e6e0c767SBaolin Wang while (start_nr-- > 0) { 266e6e0c767SBaolin Wang if (online_section_nr(start_nr)) 267e6e0c767SBaolin Wang return section_nr_to_pfn(start_nr) + PAGES_PER_SECTION; 268e6e0c767SBaolin Wang } 269e6e0c767SBaolin Wang 270e6e0c767SBaolin Wang return 0; 271e6e0c767SBaolin Wang } 2729721fd82SBaolin Wang #else 2739721fd82SBaolin Wang static unsigned long skip_offline_sections(unsigned long start_pfn) 2749721fd82SBaolin Wang { 2759721fd82SBaolin Wang return 0; 2769721fd82SBaolin Wang } 277e6e0c767SBaolin Wang 278e6e0c767SBaolin Wang static unsigned long skip_offline_sections_reverse(unsigned long start_pfn) 279e6e0c767SBaolin Wang { 280e6e0c767SBaolin Wang return 0; 281e6e0c767SBaolin Wang } 2829721fd82SBaolin Wang #endif 2839721fd82SBaolin Wang 284bb13ffebSMel Gorman /* 2852271b016SHui Su * Compound pages of >= pageblock_order should consistently be skipped until 286b527cfe5SVlastimil Babka * released. It is always pointless to compact pages of such order (if they are 287b527cfe5SVlastimil Babka * migratable), and the pageblocks they occupy cannot contain any free pages. 28821dc7e02SDavid Rientjes */ 289b527cfe5SVlastimil Babka static bool pageblock_skip_persistent(struct page *page) 29021dc7e02SDavid Rientjes { 291b527cfe5SVlastimil Babka if (!PageCompound(page)) 29221dc7e02SDavid Rientjes return false; 293b527cfe5SVlastimil Babka 294b527cfe5SVlastimil Babka page = compound_head(page); 295b527cfe5SVlastimil Babka 296b527cfe5SVlastimil Babka if (compound_order(page) >= pageblock_order) 29721dc7e02SDavid Rientjes return true; 298b527cfe5SVlastimil Babka 299b527cfe5SVlastimil Babka return false; 30021dc7e02SDavid Rientjes } 30121dc7e02SDavid Rientjes 302e332f741SMel Gorman static bool 303e332f741SMel Gorman __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, 304e332f741SMel Gorman bool check_target) 305e332f741SMel Gorman { 306e332f741SMel Gorman struct page *page = pfn_to_online_page(pfn); 3076b0868c8SMel Gorman struct page *block_page; 308e332f741SMel Gorman struct page *end_page; 309e332f741SMel Gorman unsigned long block_pfn; 310e332f741SMel Gorman 311e332f741SMel Gorman if (!page) 312e332f741SMel Gorman return false; 313e332f741SMel Gorman if (zone != page_zone(page)) 314e332f741SMel Gorman return false; 315e332f741SMel Gorman if (pageblock_skip_persistent(page)) 316e332f741SMel Gorman return false; 317e332f741SMel Gorman 318e332f741SMel Gorman /* 319e332f741SMel Gorman * If skip is already cleared do no further checking once the 320e332f741SMel Gorman * restart points have been set. 321e332f741SMel Gorman */ 322e332f741SMel Gorman if (check_source && check_target && !get_pageblock_skip(page)) 323e332f741SMel Gorman return true; 324e332f741SMel Gorman 325e332f741SMel Gorman /* 326e332f741SMel Gorman * If clearing skip for the target scanner, do not select a 327e332f741SMel Gorman * non-movable pageblock as the starting point. 328e332f741SMel Gorman */ 329e332f741SMel Gorman if (!check_source && check_target && 330e332f741SMel Gorman get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 331e332f741SMel Gorman return false; 332e332f741SMel Gorman 3336b0868c8SMel Gorman /* Ensure the start of the pageblock or zone is online and valid */ 3346b0868c8SMel Gorman block_pfn = pageblock_start_pfn(pfn); 335a2e9a5afSVlastimil Babka block_pfn = max(block_pfn, zone->zone_start_pfn); 336a2e9a5afSVlastimil Babka block_page = pfn_to_online_page(block_pfn); 3376b0868c8SMel Gorman if (block_page) { 3386b0868c8SMel Gorman page = block_page; 3396b0868c8SMel Gorman pfn = block_pfn; 3406b0868c8SMel Gorman } 3416b0868c8SMel Gorman 3426b0868c8SMel Gorman /* Ensure the end of the pageblock or zone is online and valid */ 343a2e9a5afSVlastimil Babka block_pfn = pageblock_end_pfn(pfn) - 1; 3446b0868c8SMel Gorman block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); 3456b0868c8SMel Gorman end_page = pfn_to_online_page(block_pfn); 3466b0868c8SMel Gorman if (!end_page) 3476b0868c8SMel Gorman return false; 3486b0868c8SMel Gorman 349e332f741SMel Gorman /* 350e332f741SMel Gorman * Only clear the hint if a sample indicates there is either a 351e332f741SMel Gorman * free page or an LRU page in the block. One or other condition 352e332f741SMel Gorman * is necessary for the block to be a migration source/target. 353e332f741SMel Gorman */ 354e332f741SMel Gorman do { 355e332f741SMel Gorman if (check_source && PageLRU(page)) { 356e332f741SMel Gorman clear_pageblock_skip(page); 357e332f741SMel Gorman return true; 358e332f741SMel Gorman } 359e332f741SMel Gorman 360e332f741SMel Gorman if (check_target && PageBuddy(page)) { 361e332f741SMel Gorman clear_pageblock_skip(page); 362e332f741SMel Gorman return true; 363e332f741SMel Gorman } 364e332f741SMel Gorman 365e332f741SMel Gorman page += (1 << PAGE_ALLOC_COSTLY_ORDER); 366a2e9a5afSVlastimil Babka } while (page <= end_page); 367e332f741SMel Gorman 368e332f741SMel Gorman return false; 369e332f741SMel Gorman } 370e332f741SMel Gorman 37121dc7e02SDavid Rientjes /* 372bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 373bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 374bb13ffebSMel Gorman * meet. 375bb13ffebSMel Gorman */ 37662997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 377bb13ffebSMel Gorman { 378e332f741SMel Gorman unsigned long migrate_pfn = zone->zone_start_pfn; 3796b0868c8SMel Gorman unsigned long free_pfn = zone_end_pfn(zone) - 1; 380e332f741SMel Gorman unsigned long reset_migrate = free_pfn; 381e332f741SMel Gorman unsigned long reset_free = migrate_pfn; 382e332f741SMel Gorman bool source_set = false; 383e332f741SMel Gorman bool free_set = false; 384e332f741SMel Gorman 385e332f741SMel Gorman if (!zone->compact_blockskip_flush) 386e332f741SMel Gorman return; 387bb13ffebSMel Gorman 38862997027SMel Gorman zone->compact_blockskip_flush = false; 389bb13ffebSMel Gorman 390e332f741SMel Gorman /* 391e332f741SMel Gorman * Walk the zone and update pageblock skip information. Source looks 392e332f741SMel Gorman * for PageLRU while target looks for PageBuddy. When the scanner 393e332f741SMel Gorman * is found, both PageBuddy and PageLRU are checked as the pageblock 394e332f741SMel Gorman * is suitable as both source and target. 395e332f741SMel Gorman */ 396e332f741SMel Gorman for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages, 397e332f741SMel Gorman free_pfn -= pageblock_nr_pages) { 398bb13ffebSMel Gorman cond_resched(); 399bb13ffebSMel Gorman 400e332f741SMel Gorman /* Update the migrate PFN */ 401e332f741SMel Gorman if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && 402e332f741SMel Gorman migrate_pfn < reset_migrate) { 403e332f741SMel Gorman source_set = true; 404e332f741SMel Gorman reset_migrate = migrate_pfn; 405e332f741SMel Gorman zone->compact_init_migrate_pfn = reset_migrate; 406e332f741SMel Gorman zone->compact_cached_migrate_pfn[0] = reset_migrate; 407e332f741SMel Gorman zone->compact_cached_migrate_pfn[1] = reset_migrate; 408bb13ffebSMel Gorman } 40902333641SVlastimil Babka 410e332f741SMel Gorman /* Update the free PFN */ 411e332f741SMel Gorman if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && 412e332f741SMel Gorman free_pfn > reset_free) { 413e332f741SMel Gorman free_set = true; 414e332f741SMel Gorman reset_free = free_pfn; 415e332f741SMel Gorman zone->compact_init_free_pfn = reset_free; 416e332f741SMel Gorman zone->compact_cached_free_pfn = reset_free; 417e332f741SMel Gorman } 418e332f741SMel Gorman } 419e332f741SMel Gorman 420e332f741SMel Gorman /* Leave no distance if no suitable block was reset */ 421e332f741SMel Gorman if (reset_migrate >= reset_free) { 422e332f741SMel Gorman zone->compact_cached_migrate_pfn[0] = migrate_pfn; 423e332f741SMel Gorman zone->compact_cached_migrate_pfn[1] = migrate_pfn; 424e332f741SMel Gorman zone->compact_cached_free_pfn = free_pfn; 425e332f741SMel Gorman } 426bb13ffebSMel Gorman } 427bb13ffebSMel Gorman 42862997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 42962997027SMel Gorman { 43062997027SMel Gorman int zoneid; 43162997027SMel Gorman 43262997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 43362997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 43462997027SMel Gorman if (!populated_zone(zone)) 43562997027SMel Gorman continue; 43662997027SMel Gorman 43762997027SMel Gorman /* Only flush if a full compaction finished recently */ 43862997027SMel Gorman if (zone->compact_blockskip_flush) 43962997027SMel Gorman __reset_isolation_suitable(zone); 44062997027SMel Gorman } 44162997027SMel Gorman } 44262997027SMel Gorman 443bb13ffebSMel Gorman /* 444e380bebeSMel Gorman * Sets the pageblock skip bit if it was clear. Note that this is a hint as 445e380bebeSMel Gorman * locks are not required for read/writers. Returns true if it was already set. 446e380bebeSMel Gorman */ 447590ccea8SMel Gorman static bool test_and_set_skip(struct compact_control *cc, struct page *page) 448e380bebeSMel Gorman { 449e380bebeSMel Gorman bool skip; 450e380bebeSMel Gorman 451590ccea8SMel Gorman /* Do not update if skip hint is being ignored */ 452e380bebeSMel Gorman if (cc->ignore_skip_hint) 453e380bebeSMel Gorman return false; 454e380bebeSMel Gorman 455e380bebeSMel Gorman skip = get_pageblock_skip(page); 456e380bebeSMel Gorman if (!skip && !cc->no_set_skip_hint) 457e380bebeSMel Gorman set_pageblock_skip(page); 458e380bebeSMel Gorman 459e380bebeSMel Gorman return skip; 460e380bebeSMel Gorman } 461e380bebeSMel Gorman 462e380bebeSMel Gorman static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 463e380bebeSMel Gorman { 464e380bebeSMel Gorman struct zone *zone = cc->zone; 465e380bebeSMel Gorman 466e380bebeSMel Gorman /* Set for isolation rather than compaction */ 467e380bebeSMel Gorman if (cc->no_set_skip_hint) 468e380bebeSMel Gorman return; 469e380bebeSMel Gorman 4703c099a2bSKemeng Shi pfn = pageblock_end_pfn(pfn); 4713c099a2bSKemeng Shi 472*cf043a00SKemeng Shi /* Update where async and sync compaction should restart */ 473e380bebeSMel Gorman if (pfn > zone->compact_cached_migrate_pfn[0]) 474e380bebeSMel Gorman zone->compact_cached_migrate_pfn[0] = pfn; 475e380bebeSMel Gorman if (cc->mode != MIGRATE_ASYNC && 476e380bebeSMel Gorman pfn > zone->compact_cached_migrate_pfn[1]) 477e380bebeSMel Gorman zone->compact_cached_migrate_pfn[1] = pfn; 478e380bebeSMel Gorman } 479e380bebeSMel Gorman 480e380bebeSMel Gorman /* 481bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 48262997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 483bb13ffebSMel Gorman */ 484c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 485d097a6f6SMel Gorman struct page *page, unsigned long pfn) 486bb13ffebSMel Gorman { 487c89511abSMel Gorman struct zone *zone = cc->zone; 4886815bf3fSJoonsoo Kim 4892583d671SVlastimil Babka if (cc->no_set_skip_hint) 4906815bf3fSJoonsoo Kim return; 4916815bf3fSJoonsoo Kim 492bb13ffebSMel Gorman set_pageblock_skip(page); 493c89511abSMel Gorman 49435979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 495c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 496c89511abSMel Gorman } 497bb13ffebSMel Gorman #else 498bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 499bb13ffebSMel Gorman struct page *page) 500bb13ffebSMel Gorman { 501bb13ffebSMel Gorman return true; 502bb13ffebSMel Gorman } 503bb13ffebSMel Gorman 504b527cfe5SVlastimil Babka static inline bool pageblock_skip_persistent(struct page *page) 50521dc7e02SDavid Rientjes { 50621dc7e02SDavid Rientjes return false; 50721dc7e02SDavid Rientjes } 50821dc7e02SDavid Rientjes 50921dc7e02SDavid Rientjes static inline void update_pageblock_skip(struct compact_control *cc, 510d097a6f6SMel Gorman struct page *page, unsigned long pfn) 511bb13ffebSMel Gorman { 512bb13ffebSMel Gorman } 513e380bebeSMel Gorman 514e380bebeSMel Gorman static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 515e380bebeSMel Gorman { 516e380bebeSMel Gorman } 517e380bebeSMel Gorman 518590ccea8SMel Gorman static bool test_and_set_skip(struct compact_control *cc, struct page *page) 519e380bebeSMel Gorman { 520e380bebeSMel Gorman return false; 521e380bebeSMel Gorman } 522bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 523bb13ffebSMel Gorman 5241f9efdefSVlastimil Babka /* 5258b44d279SVlastimil Babka * Compaction requires the taking of some coarse locks that are potentially 526cb2dcaf0SMel Gorman * very heavily contended. For async compaction, trylock and record if the 527cb2dcaf0SMel Gorman * lock is contended. The lock will still be acquired but compaction will 528cb2dcaf0SMel Gorman * abort when the current block is finished regardless of success rate. 529cb2dcaf0SMel Gorman * Sync compaction acquires the lock. 5308b44d279SVlastimil Babka * 531cb2dcaf0SMel Gorman * Always returns true which makes it easier to track lock state in callers. 5321f9efdefSVlastimil Babka */ 533cb2dcaf0SMel Gorman static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, 5348b44d279SVlastimil Babka struct compact_control *cc) 53577337edeSJules Irenge __acquires(lock) 5368b44d279SVlastimil Babka { 537cb2dcaf0SMel Gorman /* Track if the lock is contended in async mode */ 538cb2dcaf0SMel Gorman if (cc->mode == MIGRATE_ASYNC && !cc->contended) { 539cb2dcaf0SMel Gorman if (spin_trylock_irqsave(lock, *flags)) 540cb2dcaf0SMel Gorman return true; 541cb2dcaf0SMel Gorman 542c3486f53SVlastimil Babka cc->contended = true; 5438b44d279SVlastimil Babka } 5441f9efdefSVlastimil Babka 545cb2dcaf0SMel Gorman spin_lock_irqsave(lock, *flags); 5468b44d279SVlastimil Babka return true; 5472a1402aaSMel Gorman } 5482a1402aaSMel Gorman 54985aa125fSMichal Nazarewicz /* 550c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 5518b44d279SVlastimil Babka * very heavily contended. The lock should be periodically unlocked to avoid 5528b44d279SVlastimil Babka * having disabled IRQs for a long time, even when there is nobody waiting on 5538b44d279SVlastimil Babka * the lock. It might also be that allowing the IRQs will result in 554d56c1584SMiaohe Lin * need_resched() becoming true. If scheduling is needed, compaction schedules. 5558b44d279SVlastimil Babka * Either compaction type will also abort if a fatal signal is pending. 5568b44d279SVlastimil Babka * In either case if the lock was locked, it is dropped and not regained. 557c67fe375SMel Gorman * 558d56c1584SMiaohe Lin * Returns true if compaction should abort due to fatal signal pending. 559d56c1584SMiaohe Lin * Returns false when compaction can continue. 560c67fe375SMel Gorman */ 5618b44d279SVlastimil Babka static bool compact_unlock_should_abort(spinlock_t *lock, 5628b44d279SVlastimil Babka unsigned long flags, bool *locked, struct compact_control *cc) 563c67fe375SMel Gorman { 5648b44d279SVlastimil Babka if (*locked) { 5658b44d279SVlastimil Babka spin_unlock_irqrestore(lock, flags); 5668b44d279SVlastimil Babka *locked = false; 567c67fe375SMel Gorman } 568c67fe375SMel Gorman 5698b44d279SVlastimil Babka if (fatal_signal_pending(current)) { 570c3486f53SVlastimil Babka cc->contended = true; 5718b44d279SVlastimil Babka return true; 5728b44d279SVlastimil Babka } 5738b44d279SVlastimil Babka 574cf66f070SMel Gorman cond_resched(); 575be976572SVlastimil Babka 576be976572SVlastimil Babka return false; 577be976572SVlastimil Babka } 578be976572SVlastimil Babka 579c67fe375SMel Gorman /* 5809e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 5819e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 5829e4be470SJerome Marchand * (even though it may still end up isolating some pages). 58385aa125fSMichal Nazarewicz */ 584f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 585e14c720eSVlastimil Babka unsigned long *start_pfn, 58685aa125fSMichal Nazarewicz unsigned long end_pfn, 58785aa125fSMichal Nazarewicz struct list_head *freelist, 5884fca9730SMel Gorman unsigned int stride, 58985aa125fSMichal Nazarewicz bool strict) 590748446bbSMel Gorman { 591b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 592dc13292cSKemeng Shi struct page *page; 593b8b2d825SXiubo Li unsigned long flags = 0; 594f40d1e42SMel Gorman bool locked = false; 595e14c720eSVlastimil Babka unsigned long blockpfn = *start_pfn; 59666c64223SJoonsoo Kim unsigned int order; 597748446bbSMel Gorman 5984fca9730SMel Gorman /* Strict mode is for isolation, speed is secondary */ 5994fca9730SMel Gorman if (strict) 6004fca9730SMel Gorman stride = 1; 6014fca9730SMel Gorman 602dc13292cSKemeng Shi page = pfn_to_page(blockpfn); 603748446bbSMel Gorman 604f40d1e42SMel Gorman /* Isolate free pages. */ 605dc13292cSKemeng Shi for (; blockpfn < end_pfn; blockpfn += stride, page += stride) { 60666c64223SJoonsoo Kim int isolated; 607748446bbSMel Gorman 6088b44d279SVlastimil Babka /* 6098b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 6108b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort if fatal signal 611d56c1584SMiaohe Lin * pending. 6128b44d279SVlastimil Babka */ 613c036ddffSMiaohe Lin if (!(blockpfn % COMPACT_CLUSTER_MAX) 6148b44d279SVlastimil Babka && compact_unlock_should_abort(&cc->zone->lock, flags, 6158b44d279SVlastimil Babka &locked, cc)) 6168b44d279SVlastimil Babka break; 6178b44d279SVlastimil Babka 618b7aba698SMel Gorman nr_scanned++; 6192af120bcSLaura Abbott 6209fcd6d2eSVlastimil Babka /* 6219fcd6d2eSVlastimil Babka * For compound pages such as THP and hugetlbfs, we can save 6229fcd6d2eSVlastimil Babka * potentially a lot of iterations if we skip them at once. 6239fcd6d2eSVlastimil Babka * The check is racy, but we can consider only valid values 6249fcd6d2eSVlastimil Babka * and the only danger is skipping too much. 6259fcd6d2eSVlastimil Babka */ 6269fcd6d2eSVlastimil Babka if (PageCompound(page)) { 62721dc7e02SDavid Rientjes const unsigned int order = compound_order(page); 6289fcd6d2eSVlastimil Babka 62923baf831SKirill A. Shutemov if (likely(order <= MAX_ORDER)) { 63021dc7e02SDavid Rientjes blockpfn += (1UL << order) - 1; 631dc13292cSKemeng Shi page += (1UL << order) - 1; 63256d48d8dSBaolin Wang nr_scanned += (1UL << order) - 1; 6339fcd6d2eSVlastimil Babka } 6349fcd6d2eSVlastimil Babka goto isolate_fail; 6359fcd6d2eSVlastimil Babka } 6369fcd6d2eSVlastimil Babka 637f40d1e42SMel Gorman if (!PageBuddy(page)) 6382af120bcSLaura Abbott goto isolate_fail; 639f40d1e42SMel Gorman 64085f73e6dSMiaohe Lin /* If we already hold the lock, we can skip some rechecking. */ 64169b7189fSVlastimil Babka if (!locked) { 642cb2dcaf0SMel Gorman locked = compact_lock_irqsave(&cc->zone->lock, 6438b44d279SVlastimil Babka &flags, cc); 644f40d1e42SMel Gorman 645f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 646f40d1e42SMel Gorman if (!PageBuddy(page)) 6472af120bcSLaura Abbott goto isolate_fail; 64869b7189fSVlastimil Babka } 649748446bbSMel Gorman 65066c64223SJoonsoo Kim /* Found a free page, will break it into order-0 pages */ 651ab130f91SMatthew Wilcox (Oracle) order = buddy_order(page); 65266c64223SJoonsoo Kim isolated = __isolate_free_page(page, order); 653a4f04f2cSDavid Rientjes if (!isolated) 654a4f04f2cSDavid Rientjes break; 65566c64223SJoonsoo Kim set_page_private(page, order); 656a4f04f2cSDavid Rientjes 657b717d6b9SWilliam Lam nr_scanned += isolated - 1; 658748446bbSMel Gorman total_isolated += isolated; 659a4f04f2cSDavid Rientjes cc->nr_freepages += isolated; 66066c64223SJoonsoo Kim list_add_tail(&page->lru, freelist); 66166c64223SJoonsoo Kim 662a4f04f2cSDavid Rientjes if (!strict && cc->nr_migratepages <= cc->nr_freepages) { 663932ff6bbSJoonsoo Kim blockpfn += isolated; 664932ff6bbSJoonsoo Kim break; 665932ff6bbSJoonsoo Kim } 666a4f04f2cSDavid Rientjes /* Advance to the end of split page */ 667748446bbSMel Gorman blockpfn += isolated - 1; 668dc13292cSKemeng Shi page += isolated - 1; 6692af120bcSLaura Abbott continue; 6702af120bcSLaura Abbott 6712af120bcSLaura Abbott isolate_fail: 6722af120bcSLaura Abbott if (strict) 6732af120bcSLaura Abbott break; 6742af120bcSLaura Abbott 675748446bbSMel Gorman } 676748446bbSMel Gorman 677a4f04f2cSDavid Rientjes if (locked) 678a4f04f2cSDavid Rientjes spin_unlock_irqrestore(&cc->zone->lock, flags); 679a4f04f2cSDavid Rientjes 6809fcd6d2eSVlastimil Babka /* 6819fcd6d2eSVlastimil Babka * There is a tiny chance that we have read bogus compound_order(), 6829fcd6d2eSVlastimil Babka * so be careful to not go outside of the pageblock. 6839fcd6d2eSVlastimil Babka */ 6849fcd6d2eSVlastimil Babka if (unlikely(blockpfn > end_pfn)) 6859fcd6d2eSVlastimil Babka blockpfn = end_pfn; 6869fcd6d2eSVlastimil Babka 687e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 688e34d85f0SJoonsoo Kim nr_scanned, total_isolated); 689e34d85f0SJoonsoo Kim 690e14c720eSVlastimil Babka /* Record how far we have got within the block */ 691e14c720eSVlastimil Babka *start_pfn = blockpfn; 692e14c720eSVlastimil Babka 693f40d1e42SMel Gorman /* 694f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 695f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 696f40d1e42SMel Gorman * returned and CMA will fail. 697f40d1e42SMel Gorman */ 6982af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 699f40d1e42SMel Gorman total_isolated = 0; 700f40d1e42SMel Gorman 7017f354a54SDavid Rientjes cc->total_free_scanned += nr_scanned; 702397487dbSMel Gorman if (total_isolated) 703010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 704748446bbSMel Gorman return total_isolated; 705748446bbSMel Gorman } 706748446bbSMel Gorman 70785aa125fSMichal Nazarewicz /** 70885aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 709e8b098fcSMike Rapoport * @cc: Compaction control structure. 71085aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 71185aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 71285aa125fSMichal Nazarewicz * 71385aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 71485aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 71585aa125fSMichal Nazarewicz * undo its actions and return zero. 71685aa125fSMichal Nazarewicz * 71785aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 71885aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 71985aa125fSMichal Nazarewicz * a free page). 72085aa125fSMichal Nazarewicz */ 721ff9543fdSMichal Nazarewicz unsigned long 722bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 723bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 72485aa125fSMichal Nazarewicz { 725e1409c32SJoonsoo Kim unsigned long isolated, pfn, block_start_pfn, block_end_pfn; 72685aa125fSMichal Nazarewicz LIST_HEAD(freelist); 72785aa125fSMichal Nazarewicz 7287d49d886SVlastimil Babka pfn = start_pfn; 72906b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 730e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 731e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 73206b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 7337d49d886SVlastimil Babka 7347d49d886SVlastimil Babka for (; pfn < end_pfn; pfn += isolated, 735e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 7367d49d886SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 737e14c720eSVlastimil Babka /* Protect pfn from changing by isolate_freepages_block */ 738e14c720eSVlastimil Babka unsigned long isolate_start_pfn = pfn; 7397d49d886SVlastimil Babka 74058420016SJoonsoo Kim /* 74158420016SJoonsoo Kim * pfn could pass the block_end_pfn if isolated freepage 74258420016SJoonsoo Kim * is more than pageblock order. In this case, we adjust 74358420016SJoonsoo Kim * scanning range to right one. 74458420016SJoonsoo Kim */ 74558420016SJoonsoo Kim if (pfn >= block_end_pfn) { 74606b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 74706b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 74858420016SJoonsoo Kim } 74958420016SJoonsoo Kim 750a2864a67SKemeng Shi block_end_pfn = min(block_end_pfn, end_pfn); 751a2864a67SKemeng Shi 752e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 753e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 7547d49d886SVlastimil Babka break; 7557d49d886SVlastimil Babka 756e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 7574fca9730SMel Gorman block_end_pfn, &freelist, 0, true); 75885aa125fSMichal Nazarewicz 75985aa125fSMichal Nazarewicz /* 76085aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 76185aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 76285aa125fSMichal Nazarewicz * non-free pages). 76385aa125fSMichal Nazarewicz */ 76485aa125fSMichal Nazarewicz if (!isolated) 76585aa125fSMichal Nazarewicz break; 76685aa125fSMichal Nazarewicz 76785aa125fSMichal Nazarewicz /* 76885aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 76985aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 77085aa125fSMichal Nazarewicz * page may span two pageblocks). 77185aa125fSMichal Nazarewicz */ 77285aa125fSMichal Nazarewicz } 77385aa125fSMichal Nazarewicz 77466c64223SJoonsoo Kim /* __isolate_free_page() does not map the pages */ 7754469ab98SMel Gorman split_map_pages(&freelist); 77685aa125fSMichal Nazarewicz 77785aa125fSMichal Nazarewicz if (pfn < end_pfn) { 77885aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 77985aa125fSMichal Nazarewicz release_freepages(&freelist); 78085aa125fSMichal Nazarewicz return 0; 78185aa125fSMichal Nazarewicz } 78285aa125fSMichal Nazarewicz 78385aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 78485aa125fSMichal Nazarewicz return pfn; 78585aa125fSMichal Nazarewicz } 78685aa125fSMichal Nazarewicz 787748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 7884fbbb3fdSJohannes Weiner static bool too_many_isolated(struct compact_control *cc) 789748446bbSMel Gorman { 7904fbbb3fdSJohannes Weiner pg_data_t *pgdat = cc->zone->zone_pgdat; 791d818fca1SMel Gorman bool too_many; 792d818fca1SMel Gorman 793bc693045SMinchan Kim unsigned long active, inactive, isolated; 794748446bbSMel Gorman 7955f438eeeSAndrey Ryabinin inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + 7965f438eeeSAndrey Ryabinin node_page_state(pgdat, NR_INACTIVE_ANON); 7975f438eeeSAndrey Ryabinin active = node_page_state(pgdat, NR_ACTIVE_FILE) + 7985f438eeeSAndrey Ryabinin node_page_state(pgdat, NR_ACTIVE_ANON); 7995f438eeeSAndrey Ryabinin isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + 8005f438eeeSAndrey Ryabinin node_page_state(pgdat, NR_ISOLATED_ANON); 801748446bbSMel Gorman 8024fbbb3fdSJohannes Weiner /* 8034fbbb3fdSJohannes Weiner * Allow GFP_NOFS to isolate past the limit set for regular 8044fbbb3fdSJohannes Weiner * compaction runs. This prevents an ABBA deadlock when other 8054fbbb3fdSJohannes Weiner * compactors have already isolated to the limit, but are 8064fbbb3fdSJohannes Weiner * blocked on filesystem locks held by the GFP_NOFS thread. 8074fbbb3fdSJohannes Weiner */ 8084fbbb3fdSJohannes Weiner if (cc->gfp_mask & __GFP_FS) { 8094fbbb3fdSJohannes Weiner inactive >>= 3; 8104fbbb3fdSJohannes Weiner active >>= 3; 8114fbbb3fdSJohannes Weiner } 8124fbbb3fdSJohannes Weiner 813d818fca1SMel Gorman too_many = isolated > (inactive + active) / 2; 814d818fca1SMel Gorman if (!too_many) 815d818fca1SMel Gorman wake_throttle_isolated(pgdat); 816d818fca1SMel Gorman 817d818fca1SMel Gorman return too_many; 818748446bbSMel Gorman } 819748446bbSMel Gorman 8202fe86e00SMichal Nazarewicz /** 821edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 822edc2ca61SVlastimil Babka * a single pageblock 8232fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 824edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 825edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 82689f6c88aSHugh Dickins * @mode: Isolation mode to be used. 8272fe86e00SMichal Nazarewicz * 8282fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 829edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 830c2ad7a1fSOscar Salvador * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion, 831369fa227SOscar Salvador * -ENOMEM in case we could not allocate a page, or 0. 832c2ad7a1fSOscar Salvador * cc->migrate_pfn will contain the next pfn to scan. 8332fe86e00SMichal Nazarewicz * 834edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 835c2ad7a1fSOscar Salvador * and cc->nr_migratepages is updated accordingly. 836748446bbSMel Gorman */ 837c2ad7a1fSOscar Salvador static int 838edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 83989f6c88aSHugh Dickins unsigned long end_pfn, isolate_mode_t mode) 840748446bbSMel Gorman { 8415f438eeeSAndrey Ryabinin pg_data_t *pgdat = cc->zone->zone_pgdat; 842b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 843fa9add64SHugh Dickins struct lruvec *lruvec; 844b8b2d825SXiubo Li unsigned long flags = 0; 8456168d0daSAlex Shi struct lruvec *locked = NULL; 84656ae0bb3SKefeng Wang struct folio *folio = NULL; 847bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 84889f6c88aSHugh Dickins struct address_space *mapping; 849e34d85f0SJoonsoo Kim unsigned long start_pfn = low_pfn; 850fdd048e1SVlastimil Babka bool skip_on_failure = false; 851fdd048e1SVlastimil Babka unsigned long next_skip_pfn = 0; 852e380bebeSMel Gorman bool skip_updated = false; 853c2ad7a1fSOscar Salvador int ret = 0; 854c2ad7a1fSOscar Salvador 855c2ad7a1fSOscar Salvador cc->migrate_pfn = low_pfn; 856748446bbSMel Gorman 857748446bbSMel Gorman /* 858748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 859748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 860748446bbSMel Gorman * delay for some time until fewer pages are isolated 861748446bbSMel Gorman */ 8624fbbb3fdSJohannes Weiner while (unlikely(too_many_isolated(cc))) { 863d20bdd57SZi Yan /* stop isolation if there are still pages not migrated */ 864d20bdd57SZi Yan if (cc->nr_migratepages) 865c2ad7a1fSOscar Salvador return -EAGAIN; 866d20bdd57SZi Yan 867f9e35b3bSMel Gorman /* async migration should just abort */ 868e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 869c2ad7a1fSOscar Salvador return -EAGAIN; 870f9e35b3bSMel Gorman 871c3f4a9a2SMel Gorman reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); 872748446bbSMel Gorman 873748446bbSMel Gorman if (fatal_signal_pending(current)) 874c2ad7a1fSOscar Salvador return -EINTR; 875748446bbSMel Gorman } 876748446bbSMel Gorman 877cf66f070SMel Gorman cond_resched(); 878aeef4b83SDavid Rientjes 879fdd048e1SVlastimil Babka if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { 880fdd048e1SVlastimil Babka skip_on_failure = true; 881fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 882fdd048e1SVlastimil Babka } 883fdd048e1SVlastimil Babka 884748446bbSMel Gorman /* Time to isolate some pages for migration */ 885748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 88629c0dde8SVlastimil Babka 887fdd048e1SVlastimil Babka if (skip_on_failure && low_pfn >= next_skip_pfn) { 888fdd048e1SVlastimil Babka /* 889fdd048e1SVlastimil Babka * We have isolated all migration candidates in the 890fdd048e1SVlastimil Babka * previous order-aligned block, and did not skip it due 891fdd048e1SVlastimil Babka * to failure. We should migrate the pages now and 892fdd048e1SVlastimil Babka * hopefully succeed compaction. 893fdd048e1SVlastimil Babka */ 894fdd048e1SVlastimil Babka if (nr_isolated) 895fdd048e1SVlastimil Babka break; 896fdd048e1SVlastimil Babka 897fdd048e1SVlastimil Babka /* 898fdd048e1SVlastimil Babka * We failed to isolate in the previous order-aligned 899fdd048e1SVlastimil Babka * block. Set the new boundary to the end of the 900fdd048e1SVlastimil Babka * current block. Note we can't simply increase 901fdd048e1SVlastimil Babka * next_skip_pfn by 1 << order, as low_pfn might have 902fdd048e1SVlastimil Babka * been incremented by a higher number due to skipping 903fdd048e1SVlastimil Babka * a compound or a high-order buddy page in the 904fdd048e1SVlastimil Babka * previous loop iteration. 905fdd048e1SVlastimil Babka */ 906fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 907fdd048e1SVlastimil Babka } 908fdd048e1SVlastimil Babka 9098b44d279SVlastimil Babka /* 9108b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 911670105a2SMel Gorman * contention, to give chance to IRQs. Abort completely if 912670105a2SMel Gorman * a fatal signal is pending. 9138b44d279SVlastimil Babka */ 914c036ddffSMiaohe Lin if (!(low_pfn % COMPACT_CLUSTER_MAX)) { 9156168d0daSAlex Shi if (locked) { 9166168d0daSAlex Shi unlock_page_lruvec_irqrestore(locked, flags); 9176168d0daSAlex Shi locked = NULL; 9186168d0daSAlex Shi } 9196168d0daSAlex Shi 9206168d0daSAlex Shi if (fatal_signal_pending(current)) { 9216168d0daSAlex Shi cc->contended = true; 922c2ad7a1fSOscar Salvador ret = -EINTR; 9236168d0daSAlex Shi 924670105a2SMel Gorman goto fatal_pending; 925670105a2SMel Gorman } 926b2eef8c0SAndrea Arcangeli 9276168d0daSAlex Shi cond_resched(); 9286168d0daSAlex Shi } 9296168d0daSAlex Shi 930b7aba698SMel Gorman nr_scanned++; 931748446bbSMel Gorman 932748446bbSMel Gorman page = pfn_to_page(low_pfn); 933dc908600SMel Gorman 934e380bebeSMel Gorman /* 935e380bebeSMel Gorman * Check if the pageblock has already been marked skipped. 936e380bebeSMel Gorman * Only the aligned PFN is checked as the caller isolates 937e380bebeSMel Gorman * COMPACT_CLUSTER_MAX at a time so the second call must 938e380bebeSMel Gorman * not falsely conclude that the block should be skipped. 939e380bebeSMel Gorman */ 940ee0913c4SKefeng Wang if (!valid_page && pageblock_aligned(low_pfn)) { 9414af12d04SMiaohe Lin if (!isolation_suitable(cc, page)) { 942e380bebeSMel Gorman low_pfn = end_pfn; 94356ae0bb3SKefeng Wang folio = NULL; 944e380bebeSMel Gorman goto isolate_abort; 945e380bebeSMel Gorman } 946bb13ffebSMel Gorman valid_page = page; 947e380bebeSMel Gorman } 948bb13ffebSMel Gorman 949369fa227SOscar Salvador if (PageHuge(page) && cc->alloc_contig) { 9501c06b6a5SBaolin Wang if (locked) { 9511c06b6a5SBaolin Wang unlock_page_lruvec_irqrestore(locked, flags); 9521c06b6a5SBaolin Wang locked = NULL; 9531c06b6a5SBaolin Wang } 9541c06b6a5SBaolin Wang 955ae37c7ffSOscar Salvador ret = isolate_or_dissolve_huge_page(page, &cc->migratepages); 956369fa227SOscar Salvador 957369fa227SOscar Salvador /* 958369fa227SOscar Salvador * Fail isolation in case isolate_or_dissolve_huge_page() 959369fa227SOscar Salvador * reports an error. In case of -ENOMEM, abort right away. 960369fa227SOscar Salvador */ 961369fa227SOscar Salvador if (ret < 0) { 962369fa227SOscar Salvador /* Do not report -EBUSY down the chain */ 963369fa227SOscar Salvador if (ret == -EBUSY) 964369fa227SOscar Salvador ret = 0; 96566fe1cf7SMiaohe Lin low_pfn += compound_nr(page) - 1; 96656d48d8dSBaolin Wang nr_scanned += compound_nr(page) - 1; 967369fa227SOscar Salvador goto isolate_fail; 968369fa227SOscar Salvador } 969369fa227SOscar Salvador 970ae37c7ffSOscar Salvador if (PageHuge(page)) { 971ae37c7ffSOscar Salvador /* 972ae37c7ffSOscar Salvador * Hugepage was successfully isolated and placed 973ae37c7ffSOscar Salvador * on the cc->migratepages list. 974ae37c7ffSOscar Salvador */ 97556ae0bb3SKefeng Wang folio = page_folio(page); 97656ae0bb3SKefeng Wang low_pfn += folio_nr_pages(folio) - 1; 977ae37c7ffSOscar Salvador goto isolate_success_no_list; 978ae37c7ffSOscar Salvador } 979ae37c7ffSOscar Salvador 980369fa227SOscar Salvador /* 981369fa227SOscar Salvador * Ok, the hugepage was dissolved. Now these pages are 982369fa227SOscar Salvador * Buddy and cannot be re-allocated because they are 983369fa227SOscar Salvador * isolated. Fall-through as the check below handles 984369fa227SOscar Salvador * Buddy pages. 985369fa227SOscar Salvador */ 986369fa227SOscar Salvador } 987369fa227SOscar Salvador 988c122b208SJoonsoo Kim /* 98999c0fd5eSVlastimil Babka * Skip if free. We read page order here without zone lock 99099c0fd5eSVlastimil Babka * which is generally unsafe, but the race window is small and 99199c0fd5eSVlastimil Babka * the worst thing that can happen is that we skip some 99299c0fd5eSVlastimil Babka * potential isolation targets. 9936c14466cSMel Gorman */ 99499c0fd5eSVlastimil Babka if (PageBuddy(page)) { 995ab130f91SMatthew Wilcox (Oracle) unsigned long freepage_order = buddy_order_unsafe(page); 99699c0fd5eSVlastimil Babka 99799c0fd5eSVlastimil Babka /* 99899c0fd5eSVlastimil Babka * Without lock, we cannot be sure that what we got is 99999c0fd5eSVlastimil Babka * a valid page order. Consider only values in the 100099c0fd5eSVlastimil Babka * valid order range to prevent low_pfn overflow. 100199c0fd5eSVlastimil Babka */ 100256d48d8dSBaolin Wang if (freepage_order > 0 && freepage_order <= MAX_ORDER) { 100399c0fd5eSVlastimil Babka low_pfn += (1UL << freepage_order) - 1; 100456d48d8dSBaolin Wang nr_scanned += (1UL << freepage_order) - 1; 100556d48d8dSBaolin Wang } 1006748446bbSMel Gorman continue; 100799c0fd5eSVlastimil Babka } 1008748446bbSMel Gorman 10099927af74SMel Gorman /* 101029c0dde8SVlastimil Babka * Regardless of being on LRU, compound pages such as THP and 10111da2f328SRik van Riel * hugetlbfs are not to be compacted unless we are attempting 10121da2f328SRik van Riel * an allocation much larger than the huge page size (eg CMA). 10131da2f328SRik van Riel * We can potentially save a lot of iterations if we skip them 10141da2f328SRik van Riel * at once. The check is racy, but we can consider only valid 10151da2f328SRik van Riel * values and the only danger is skipping too much. 1016bc835011SAndrea Arcangeli */ 10171da2f328SRik van Riel if (PageCompound(page) && !cc->alloc_contig) { 101821dc7e02SDavid Rientjes const unsigned int order = compound_order(page); 101929c0dde8SVlastimil Babka 102056d48d8dSBaolin Wang if (likely(order <= MAX_ORDER)) { 102121dc7e02SDavid Rientjes low_pfn += (1UL << order) - 1; 102256d48d8dSBaolin Wang nr_scanned += (1UL << order) - 1; 102356d48d8dSBaolin Wang } 1024fdd048e1SVlastimil Babka goto isolate_fail; 10252a1402aaSMel Gorman } 10262a1402aaSMel Gorman 1027bda807d4SMinchan Kim /* 1028bda807d4SMinchan Kim * Check may be lockless but that's ok as we recheck later. 1029bda807d4SMinchan Kim * It's possible to migrate LRU and non-lru movable pages. 1030bda807d4SMinchan Kim * Skip any other type of page 1031bda807d4SMinchan Kim */ 1032bda807d4SMinchan Kim if (!PageLRU(page)) { 1033bda807d4SMinchan Kim /* 1034bda807d4SMinchan Kim * __PageMovable can return false positive so we need 1035bda807d4SMinchan Kim * to verify it under page_lock. 1036bda807d4SMinchan Kim */ 1037bda807d4SMinchan Kim if (unlikely(__PageMovable(page)) && 1038bda807d4SMinchan Kim !PageIsolated(page)) { 1039bda807d4SMinchan Kim if (locked) { 10406168d0daSAlex Shi unlock_page_lruvec_irqrestore(locked, flags); 10416168d0daSAlex Shi locked = NULL; 1042bda807d4SMinchan Kim } 1043bda807d4SMinchan Kim 104456ae0bb3SKefeng Wang if (isolate_movable_page(page, mode)) { 104556ae0bb3SKefeng Wang folio = page_folio(page); 1046bda807d4SMinchan Kim goto isolate_success; 1047bda807d4SMinchan Kim } 104856ae0bb3SKefeng Wang } 1049bda807d4SMinchan Kim 1050fdd048e1SVlastimil Babka goto isolate_fail; 1051bda807d4SMinchan Kim } 105229c0dde8SVlastimil Babka 1053119d6d59SDavid Rientjes /* 10549df41314SAlex Shi * Be careful not to clear PageLRU until after we're 10559df41314SAlex Shi * sure the page is not being freed elsewhere -- the 10569df41314SAlex Shi * page release code relies on it. 10579df41314SAlex Shi */ 105856ae0bb3SKefeng Wang folio = folio_get_nontail_page(page); 105956ae0bb3SKefeng Wang if (unlikely(!folio)) 10609df41314SAlex Shi goto isolate_fail; 10619df41314SAlex Shi 1062829ae0f8SGavin Shan /* 1063829ae0f8SGavin Shan * Migration will fail if an anonymous page is pinned in memory, 1064829ae0f8SGavin Shan * so avoid taking lru_lock and isolating it unnecessarily in an 1065829ae0f8SGavin Shan * admittedly racy check. 1066829ae0f8SGavin Shan */ 106756ae0bb3SKefeng Wang mapping = folio_mapping(folio); 106856ae0bb3SKefeng Wang if (!mapping && (folio_ref_count(folio) - 1) > folio_mapcount(folio)) 1069829ae0f8SGavin Shan goto isolate_fail_put; 1070829ae0f8SGavin Shan 1071829ae0f8SGavin Shan /* 1072829ae0f8SGavin Shan * Only allow to migrate anonymous pages in GFP_NOFS context 1073829ae0f8SGavin Shan * because those do not depend on fs locks. 1074829ae0f8SGavin Shan */ 1075829ae0f8SGavin Shan if (!(cc->gfp_mask & __GFP_FS) && mapping) 1076829ae0f8SGavin Shan goto isolate_fail_put; 1077829ae0f8SGavin Shan 107889f6c88aSHugh Dickins /* Only take pages on LRU: a check now makes later tests safe */ 107956ae0bb3SKefeng Wang if (!folio_test_lru(folio)) 10809df41314SAlex Shi goto isolate_fail_put; 10819df41314SAlex Shi 108289f6c88aSHugh Dickins /* Compaction might skip unevictable pages but CMA takes them */ 108356ae0bb3SKefeng Wang if (!(mode & ISOLATE_UNEVICTABLE) && folio_test_unevictable(folio)) 108489f6c88aSHugh Dickins goto isolate_fail_put; 108589f6c88aSHugh Dickins 108689f6c88aSHugh Dickins /* 108789f6c88aSHugh Dickins * To minimise LRU disruption, the caller can indicate with 108889f6c88aSHugh Dickins * ISOLATE_ASYNC_MIGRATE that it only wants to isolate pages 108989f6c88aSHugh Dickins * it will be able to migrate without blocking - clean pages 109089f6c88aSHugh Dickins * for the most part. PageWriteback would require blocking. 109189f6c88aSHugh Dickins */ 109256ae0bb3SKefeng Wang if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio)) 109389f6c88aSHugh Dickins goto isolate_fail_put; 109489f6c88aSHugh Dickins 109556ae0bb3SKefeng Wang if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_dirty(folio)) { 109689f6c88aSHugh Dickins bool migrate_dirty; 109789f6c88aSHugh Dickins 109889f6c88aSHugh Dickins /* 1099866ff801SMatthew Wilcox * Only folios without mappings or that have 1100866ff801SMatthew Wilcox * a ->migrate_folio callback are possible to 1101866ff801SMatthew Wilcox * migrate without blocking. However, we may 1102866ff801SMatthew Wilcox * be racing with truncation, which can free 1103866ff801SMatthew Wilcox * the mapping. Truncation holds the folio lock 1104866ff801SMatthew Wilcox * until after the folio is removed from the page 1105866ff801SMatthew Wilcox * cache so holding it ourselves is sufficient. 110689f6c88aSHugh Dickins */ 110756ae0bb3SKefeng Wang if (!folio_trylock(folio)) 110889f6c88aSHugh Dickins goto isolate_fail_put; 110989f6c88aSHugh Dickins 111056ae0bb3SKefeng Wang mapping = folio_mapping(folio); 11115490da4fSMatthew Wilcox (Oracle) migrate_dirty = !mapping || 11129d0ddc0cSMatthew Wilcox (Oracle) mapping->a_ops->migrate_folio; 111356ae0bb3SKefeng Wang folio_unlock(folio); 111489f6c88aSHugh Dickins if (!migrate_dirty) 111589f6c88aSHugh Dickins goto isolate_fail_put; 111689f6c88aSHugh Dickins } 111789f6c88aSHugh Dickins 111856ae0bb3SKefeng Wang /* Try isolate the folio */ 111956ae0bb3SKefeng Wang if (!folio_test_clear_lru(folio)) 11209df41314SAlex Shi goto isolate_fail_put; 11219df41314SAlex Shi 112256ae0bb3SKefeng Wang lruvec = folio_lruvec(folio); 11236168d0daSAlex Shi 112469b7189fSVlastimil Babka /* If we already hold the lock, we can skip some rechecking */ 11256168d0daSAlex Shi if (lruvec != locked) { 11266168d0daSAlex Shi if (locked) 11276168d0daSAlex Shi unlock_page_lruvec_irqrestore(locked, flags); 11286168d0daSAlex Shi 11296168d0daSAlex Shi compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); 11306168d0daSAlex Shi locked = lruvec; 11316168d0daSAlex Shi 113256ae0bb3SKefeng Wang lruvec_memcg_debug(lruvec, folio); 1133e380bebeSMel Gorman 1134590ccea8SMel Gorman /* 1135590ccea8SMel Gorman * Try get exclusive access under lock. If marked for 1136590ccea8SMel Gorman * skip, the scan is aborted unless the current context 1137590ccea8SMel Gorman * is a rescan to reach the end of the pageblock. 1138590ccea8SMel Gorman */ 1139590ccea8SMel Gorman if (!skip_updated && valid_page) { 1140e380bebeSMel Gorman skip_updated = true; 1141590ccea8SMel Gorman if (test_and_set_skip(cc, valid_page) && 1142590ccea8SMel Gorman !cc->finish_pageblock) { 11437545e2f2SKemeng Shi low_pfn = end_pfn; 1144e380bebeSMel Gorman goto isolate_abort; 1145e380bebeSMel Gorman } 1146590ccea8SMel Gorman } 11472a1402aaSMel Gorman 114829c0dde8SVlastimil Babka /* 114956ae0bb3SKefeng Wang * folio become large since the non-locked check, 115056ae0bb3SKefeng Wang * and it's on LRU. 115129c0dde8SVlastimil Babka */ 115256ae0bb3SKefeng Wang if (unlikely(folio_test_large(folio) && !cc->alloc_contig)) { 115356ae0bb3SKefeng Wang low_pfn += folio_nr_pages(folio) - 1; 115456ae0bb3SKefeng Wang nr_scanned += folio_nr_pages(folio) - 1; 115556ae0bb3SKefeng Wang folio_set_lru(folio); 11569df41314SAlex Shi goto isolate_fail_put; 1157bc835011SAndrea Arcangeli } 1158d99fd5feSAlex Shi } 1159fa9add64SHugh Dickins 116056ae0bb3SKefeng Wang /* The folio is taken off the LRU */ 116156ae0bb3SKefeng Wang if (folio_test_large(folio)) 116256ae0bb3SKefeng Wang low_pfn += folio_nr_pages(folio) - 1; 1163bc835011SAndrea Arcangeli 1164748446bbSMel Gorman /* Successfully isolated */ 116556ae0bb3SKefeng Wang lruvec_del_folio(lruvec, folio); 116656ae0bb3SKefeng Wang node_stat_mod_folio(folio, 116756ae0bb3SKefeng Wang NR_ISOLATED_ANON + folio_is_file_lru(folio), 116856ae0bb3SKefeng Wang folio_nr_pages(folio)); 1169b6c75016SJoonsoo Kim 1170b6c75016SJoonsoo Kim isolate_success: 117156ae0bb3SKefeng Wang list_add(&folio->lru, &cc->migratepages); 1172ae37c7ffSOscar Salvador isolate_success_no_list: 117356ae0bb3SKefeng Wang cc->nr_migratepages += folio_nr_pages(folio); 117456ae0bb3SKefeng Wang nr_isolated += folio_nr_pages(folio); 117556ae0bb3SKefeng Wang nr_scanned += folio_nr_pages(folio) - 1; 1176748446bbSMel Gorman 1177804d3121SMel Gorman /* 1178804d3121SMel Gorman * Avoid isolating too much unless this block is being 117948731c84SMel Gorman * fully scanned (e.g. dirty/writeback pages, parallel allocation) 1180cb2dcaf0SMel Gorman * or a lock is contended. For contention, isolate quickly to 1181cb2dcaf0SMel Gorman * potentially remove one source of contention. 1182804d3121SMel Gorman */ 118338935861SZi Yan if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && 118448731c84SMel Gorman !cc->finish_pageblock && !cc->contended) { 118531b8384aSHillf Danton ++low_pfn; 1186748446bbSMel Gorman break; 1187748446bbSMel Gorman } 1188fdd048e1SVlastimil Babka 1189fdd048e1SVlastimil Babka continue; 11909df41314SAlex Shi 11919df41314SAlex Shi isolate_fail_put: 11929df41314SAlex Shi /* Avoid potential deadlock in freeing page under lru_lock */ 11939df41314SAlex Shi if (locked) { 11946168d0daSAlex Shi unlock_page_lruvec_irqrestore(locked, flags); 11956168d0daSAlex Shi locked = NULL; 11969df41314SAlex Shi } 119756ae0bb3SKefeng Wang folio_put(folio); 11989df41314SAlex Shi 1199fdd048e1SVlastimil Babka isolate_fail: 1200369fa227SOscar Salvador if (!skip_on_failure && ret != -ENOMEM) 1201fdd048e1SVlastimil Babka continue; 1202fdd048e1SVlastimil Babka 1203fdd048e1SVlastimil Babka /* 1204fdd048e1SVlastimil Babka * We have isolated some pages, but then failed. Release them 1205fdd048e1SVlastimil Babka * instead of migrating, as we cannot form the cc->order buddy 1206fdd048e1SVlastimil Babka * page anyway. 1207fdd048e1SVlastimil Babka */ 1208fdd048e1SVlastimil Babka if (nr_isolated) { 1209fdd048e1SVlastimil Babka if (locked) { 12106168d0daSAlex Shi unlock_page_lruvec_irqrestore(locked, flags); 12116168d0daSAlex Shi locked = NULL; 1212fdd048e1SVlastimil Babka } 1213fdd048e1SVlastimil Babka putback_movable_pages(&cc->migratepages); 1214fdd048e1SVlastimil Babka cc->nr_migratepages = 0; 1215fdd048e1SVlastimil Babka nr_isolated = 0; 1216fdd048e1SVlastimil Babka } 1217fdd048e1SVlastimil Babka 1218fdd048e1SVlastimil Babka if (low_pfn < next_skip_pfn) { 1219fdd048e1SVlastimil Babka low_pfn = next_skip_pfn - 1; 1220fdd048e1SVlastimil Babka /* 1221fdd048e1SVlastimil Babka * The check near the loop beginning would have updated 1222fdd048e1SVlastimil Babka * next_skip_pfn too, but this is a bit simpler. 1223fdd048e1SVlastimil Babka */ 1224fdd048e1SVlastimil Babka next_skip_pfn += 1UL << cc->order; 1225fdd048e1SVlastimil Babka } 1226369fa227SOscar Salvador 1227369fa227SOscar Salvador if (ret == -ENOMEM) 1228369fa227SOscar Salvador break; 122931b8384aSHillf Danton } 1230748446bbSMel Gorman 123199c0fd5eSVlastimil Babka /* 123299c0fd5eSVlastimil Babka * The PageBuddy() check could have potentially brought us outside 123399c0fd5eSVlastimil Babka * the range to be scanned. 123499c0fd5eSVlastimil Babka */ 123599c0fd5eSVlastimil Babka if (unlikely(low_pfn > end_pfn)) 123699c0fd5eSVlastimil Babka low_pfn = end_pfn; 123799c0fd5eSVlastimil Babka 123856ae0bb3SKefeng Wang folio = NULL; 12399df41314SAlex Shi 1240e380bebeSMel Gorman isolate_abort: 1241c67fe375SMel Gorman if (locked) 12426168d0daSAlex Shi unlock_page_lruvec_irqrestore(locked, flags); 124356ae0bb3SKefeng Wang if (folio) { 124456ae0bb3SKefeng Wang folio_set_lru(folio); 124556ae0bb3SKefeng Wang folio_put(folio); 12469df41314SAlex Shi } 1247748446bbSMel Gorman 124850b5b094SVlastimil Babka /* 124948731c84SMel Gorman * Update the cached scanner pfn once the pageblock has been scanned. 1250804d3121SMel Gorman * Pages will either be migrated in which case there is no point 1251804d3121SMel Gorman * scanning in the near future or migration failed in which case the 1252804d3121SMel Gorman * failure reason may persist. The block is marked for skipping if 1253804d3121SMel Gorman * there were no pages isolated in the block or if the block is 1254804d3121SMel Gorman * rescanned twice in a row. 125550b5b094SVlastimil Babka */ 125648731c84SMel Gorman if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) { 12578b71b499SBaolin Wang if (!cc->no_set_skip_hint && valid_page && !skip_updated) 1258e380bebeSMel Gorman set_pageblock_skip(valid_page); 1259e380bebeSMel Gorman update_cached_migrate(cc, low_pfn); 1260e380bebeSMel Gorman } 1261bb13ffebSMel Gorman 1262e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 1263e34d85f0SJoonsoo Kim nr_scanned, nr_isolated); 1264b7aba698SMel Gorman 1265670105a2SMel Gorman fatal_pending: 12667f354a54SDavid Rientjes cc->total_migrate_scanned += nr_scanned; 1267397487dbSMel Gorman if (nr_isolated) 1268010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 1269397487dbSMel Gorman 1270c2ad7a1fSOscar Salvador cc->migrate_pfn = low_pfn; 1271c2ad7a1fSOscar Salvador 1272c2ad7a1fSOscar Salvador return ret; 12732fe86e00SMichal Nazarewicz } 12742fe86e00SMichal Nazarewicz 1275edc2ca61SVlastimil Babka /** 1276edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 1277edc2ca61SVlastimil Babka * @cc: Compaction control structure. 1278edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 1279edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 1280edc2ca61SVlastimil Babka * 1281369fa227SOscar Salvador * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM 1282369fa227SOscar Salvador * in case we could not allocate a page, or 0. 1283edc2ca61SVlastimil Babka */ 1284c2ad7a1fSOscar Salvador int 1285edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 1286edc2ca61SVlastimil Babka unsigned long end_pfn) 1287edc2ca61SVlastimil Babka { 1288e1409c32SJoonsoo Kim unsigned long pfn, block_start_pfn, block_end_pfn; 1289c2ad7a1fSOscar Salvador int ret = 0; 1290edc2ca61SVlastimil Babka 1291edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 1292edc2ca61SVlastimil Babka pfn = start_pfn; 129306b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 1294e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 1295e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 129606b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 1297edc2ca61SVlastimil Babka 1298edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 1299e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 1300edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 1301edc2ca61SVlastimil Babka 1302edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 1303edc2ca61SVlastimil Babka 1304e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 1305e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 1306edc2ca61SVlastimil Babka continue; 1307edc2ca61SVlastimil Babka 1308c2ad7a1fSOscar Salvador ret = isolate_migratepages_block(cc, pfn, block_end_pfn, 1309edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 1310edc2ca61SVlastimil Babka 1311c2ad7a1fSOscar Salvador if (ret) 1312edc2ca61SVlastimil Babka break; 13136ea41c0cSJoonsoo Kim 131438935861SZi Yan if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) 13156ea41c0cSJoonsoo Kim break; 1316edc2ca61SVlastimil Babka } 1317edc2ca61SVlastimil Babka 1318c2ad7a1fSOscar Salvador return ret; 1319edc2ca61SVlastimil Babka } 1320edc2ca61SVlastimil Babka 1321ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 1322ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 1323018e9a49SAndrew Morton 1324b682debdSVlastimil Babka static bool suitable_migration_source(struct compact_control *cc, 1325b682debdSVlastimil Babka struct page *page) 1326b682debdSVlastimil Babka { 1327282722b0SVlastimil Babka int block_mt; 1328282722b0SVlastimil Babka 13299bebefd5SMel Gorman if (pageblock_skip_persistent(page)) 13309bebefd5SMel Gorman return false; 13319bebefd5SMel Gorman 1332282722b0SVlastimil Babka if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) 1333b682debdSVlastimil Babka return true; 1334b682debdSVlastimil Babka 1335282722b0SVlastimil Babka block_mt = get_pageblock_migratetype(page); 1336282722b0SVlastimil Babka 1337282722b0SVlastimil Babka if (cc->migratetype == MIGRATE_MOVABLE) 1338282722b0SVlastimil Babka return is_migrate_movable(block_mt); 1339282722b0SVlastimil Babka else 1340282722b0SVlastimil Babka return block_mt == cc->migratetype; 1341b682debdSVlastimil Babka } 1342b682debdSVlastimil Babka 1343018e9a49SAndrew Morton /* Returns true if the page is within a block suitable for migration to */ 13449f7e3387SVlastimil Babka static bool suitable_migration_target(struct compact_control *cc, 13459f7e3387SVlastimil Babka struct page *page) 1346018e9a49SAndrew Morton { 1347018e9a49SAndrew Morton /* If the page is a large free page, then disallow migration */ 1348018e9a49SAndrew Morton if (PageBuddy(page)) { 1349018e9a49SAndrew Morton /* 1350018e9a49SAndrew Morton * We are checking page_order without zone->lock taken. But 1351018e9a49SAndrew Morton * the only small danger is that we skip a potentially suitable 1352018e9a49SAndrew Morton * pageblock, so it's not worth to check order for valid range. 1353018e9a49SAndrew Morton */ 1354ab130f91SMatthew Wilcox (Oracle) if (buddy_order_unsafe(page) >= pageblock_order) 1355018e9a49SAndrew Morton return false; 1356018e9a49SAndrew Morton } 1357018e9a49SAndrew Morton 13581ef36db2SYisheng Xie if (cc->ignore_block_suitable) 13591ef36db2SYisheng Xie return true; 13601ef36db2SYisheng Xie 1361018e9a49SAndrew Morton /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 1362b682debdSVlastimil Babka if (is_migrate_movable(get_pageblock_migratetype(page))) 1363018e9a49SAndrew Morton return true; 1364018e9a49SAndrew Morton 1365018e9a49SAndrew Morton /* Otherwise skip the block */ 1366018e9a49SAndrew Morton return false; 1367018e9a49SAndrew Morton } 1368018e9a49SAndrew Morton 136970b44595SMel Gorman static inline unsigned int 137070b44595SMel Gorman freelist_scan_limit(struct compact_control *cc) 137170b44595SMel Gorman { 1372dd7ef7bdSQian Cai unsigned short shift = BITS_PER_LONG - 1; 1373dd7ef7bdSQian Cai 1374dd7ef7bdSQian Cai return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; 137570b44595SMel Gorman } 137670b44595SMel Gorman 1377ff9543fdSMichal Nazarewicz /* 1378f2849aa0SVlastimil Babka * Test whether the free scanner has reached the same or lower pageblock than 1379f2849aa0SVlastimil Babka * the migration scanner, and compaction should thus terminate. 1380f2849aa0SVlastimil Babka */ 1381f2849aa0SVlastimil Babka static inline bool compact_scanners_met(struct compact_control *cc) 1382f2849aa0SVlastimil Babka { 1383f2849aa0SVlastimil Babka return (cc->free_pfn >> pageblock_order) 1384f2849aa0SVlastimil Babka <= (cc->migrate_pfn >> pageblock_order); 1385f2849aa0SVlastimil Babka } 1386f2849aa0SVlastimil Babka 13875a811889SMel Gorman /* 13885a811889SMel Gorman * Used when scanning for a suitable migration target which scans freelists 13895a811889SMel Gorman * in reverse. Reorders the list such as the unscanned pages are scanned 13905a811889SMel Gorman * first on the next iteration of the free scanner 13915a811889SMel Gorman */ 13925a811889SMel Gorman static void 13935a811889SMel Gorman move_freelist_head(struct list_head *freelist, struct page *freepage) 13945a811889SMel Gorman { 13955a811889SMel Gorman LIST_HEAD(sublist); 13965a811889SMel Gorman 13975a811889SMel Gorman if (!list_is_last(freelist, &freepage->lru)) { 13985a811889SMel Gorman list_cut_before(&sublist, freelist, &freepage->lru); 13995a811889SMel Gorman list_splice_tail(&sublist, freelist); 14005a811889SMel Gorman } 14015a811889SMel Gorman } 14025a811889SMel Gorman 14035a811889SMel Gorman /* 14045a811889SMel Gorman * Similar to move_freelist_head except used by the migration scanner 14055a811889SMel Gorman * when scanning forward. It's possible for these list operations to 14065a811889SMel Gorman * move against each other if they search the free list exactly in 14075a811889SMel Gorman * lockstep. 14085a811889SMel Gorman */ 140970b44595SMel Gorman static void 141070b44595SMel Gorman move_freelist_tail(struct list_head *freelist, struct page *freepage) 141170b44595SMel Gorman { 141270b44595SMel Gorman LIST_HEAD(sublist); 141370b44595SMel Gorman 141470b44595SMel Gorman if (!list_is_first(freelist, &freepage->lru)) { 141570b44595SMel Gorman list_cut_position(&sublist, freelist, &freepage->lru); 141670b44595SMel Gorman list_splice_tail(&sublist, freelist); 141770b44595SMel Gorman } 141870b44595SMel Gorman } 141970b44595SMel Gorman 14205a811889SMel Gorman static void 1421be21b32aSNARIBAYASHI Akira fast_isolate_around(struct compact_control *cc, unsigned long pfn) 14225a811889SMel Gorman { 14235a811889SMel Gorman unsigned long start_pfn, end_pfn; 14246e2b7044SVlastimil Babka struct page *page; 14255a811889SMel Gorman 14265a811889SMel Gorman /* Do not search around if there are enough pages already */ 14275a811889SMel Gorman if (cc->nr_freepages >= cc->nr_migratepages) 14285a811889SMel Gorman return; 14295a811889SMel Gorman 14305a811889SMel Gorman /* Minimise scanning during async compaction */ 14315a811889SMel Gorman if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) 14325a811889SMel Gorman return; 14335a811889SMel Gorman 14345a811889SMel Gorman /* Pageblock boundaries */ 14356e2b7044SVlastimil Babka start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); 14366e2b7044SVlastimil Babka end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); 14376e2b7044SVlastimil Babka 14386e2b7044SVlastimil Babka page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); 14396e2b7044SVlastimil Babka if (!page) 14406e2b7044SVlastimil Babka return; 14415a811889SMel Gorman 14424fca9730SMel Gorman isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); 14435a811889SMel Gorman 14445a811889SMel Gorman /* Skip this pageblock in the future as it's full or nearly full */ 1445cf650342SBaolin Wang if (start_pfn == end_pfn) 14465a811889SMel Gorman set_pageblock_skip(page); 1447be21b32aSNARIBAYASHI Akira 1448be21b32aSNARIBAYASHI Akira return; 14495a811889SMel Gorman } 14505a811889SMel Gorman 1451dbe2d4e4SMel Gorman /* Search orders in round-robin fashion */ 1452dbe2d4e4SMel Gorman static int next_search_order(struct compact_control *cc, int order) 1453dbe2d4e4SMel Gorman { 1454dbe2d4e4SMel Gorman order--; 1455dbe2d4e4SMel Gorman if (order < 0) 1456dbe2d4e4SMel Gorman order = cc->order - 1; 1457dbe2d4e4SMel Gorman 1458dbe2d4e4SMel Gorman /* Search wrapped around? */ 1459dbe2d4e4SMel Gorman if (order == cc->search_order) { 1460dbe2d4e4SMel Gorman cc->search_order--; 1461dbe2d4e4SMel Gorman if (cc->search_order < 0) 1462dbe2d4e4SMel Gorman cc->search_order = cc->order - 1; 1463dbe2d4e4SMel Gorman return -1; 1464dbe2d4e4SMel Gorman } 1465dbe2d4e4SMel Gorman 1466dbe2d4e4SMel Gorman return order; 1467dbe2d4e4SMel Gorman } 1468dbe2d4e4SMel Gorman 14692dbd9005SBaolin Wang static void fast_isolate_freepages(struct compact_control *cc) 14705a811889SMel Gorman { 1471b55ca526SWonhyuk Yang unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); 1472447ba886SBaolin Wang unsigned int nr_scanned = 0, total_isolated = 0; 147374e21484SRokudo Yan unsigned long low_pfn, min_pfn, highest = 0; 14745a811889SMel Gorman unsigned long nr_isolated = 0; 14755a811889SMel Gorman unsigned long distance; 14765a811889SMel Gorman struct page *page = NULL; 14775a811889SMel Gorman bool scan_start = false; 14785a811889SMel Gorman int order; 14795a811889SMel Gorman 14805a811889SMel Gorman /* Full compaction passes in a negative order */ 14815a811889SMel Gorman if (cc->order <= 0) 14822dbd9005SBaolin Wang return; 14835a811889SMel Gorman 14845a811889SMel Gorman /* 14855a811889SMel Gorman * If starting the scan, use a deeper search and use the highest 14865a811889SMel Gorman * PFN found if a suitable one is not found. 14875a811889SMel Gorman */ 1488e332f741SMel Gorman if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { 14895a811889SMel Gorman limit = pageblock_nr_pages >> 1; 14905a811889SMel Gorman scan_start = true; 14915a811889SMel Gorman } 14925a811889SMel Gorman 14935a811889SMel Gorman /* 14945a811889SMel Gorman * Preferred point is in the top quarter of the scan space but take 14955a811889SMel Gorman * a pfn from the top half if the search is problematic. 14965a811889SMel Gorman */ 14975a811889SMel Gorman distance = (cc->free_pfn - cc->migrate_pfn); 14985a811889SMel Gorman low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); 14995a811889SMel Gorman min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); 15005a811889SMel Gorman 15015a811889SMel Gorman if (WARN_ON_ONCE(min_pfn > low_pfn)) 15025a811889SMel Gorman low_pfn = min_pfn; 15035a811889SMel Gorman 1504dbe2d4e4SMel Gorman /* 1505dbe2d4e4SMel Gorman * Search starts from the last successful isolation order or the next 1506dbe2d4e4SMel Gorman * order to search after a previous failure 1507dbe2d4e4SMel Gorman */ 1508dbe2d4e4SMel Gorman cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); 1509dbe2d4e4SMel Gorman 1510dbe2d4e4SMel Gorman for (order = cc->search_order; 1511dbe2d4e4SMel Gorman !page && order >= 0; 1512dbe2d4e4SMel Gorman order = next_search_order(cc, order)) { 15135a811889SMel Gorman struct free_area *area = &cc->zone->free_area[order]; 15145a811889SMel Gorman struct list_head *freelist; 15155a811889SMel Gorman struct page *freepage; 15165a811889SMel Gorman unsigned long flags; 15175a811889SMel Gorman unsigned int order_scanned = 0; 151874e21484SRokudo Yan unsigned long high_pfn = 0; 15195a811889SMel Gorman 15205a811889SMel Gorman if (!area->nr_free) 15215a811889SMel Gorman continue; 15225a811889SMel Gorman 15235a811889SMel Gorman spin_lock_irqsave(&cc->zone->lock, flags); 15245a811889SMel Gorman freelist = &area->free_list[MIGRATE_MOVABLE]; 152594ec2003SBaolin Wang list_for_each_entry_reverse(freepage, freelist, buddy_list) { 15265a811889SMel Gorman unsigned long pfn; 15275a811889SMel Gorman 15285a811889SMel Gorman order_scanned++; 15295a811889SMel Gorman nr_scanned++; 15305a811889SMel Gorman pfn = page_to_pfn(freepage); 15315a811889SMel Gorman 15325a811889SMel Gorman if (pfn >= highest) 15336e2b7044SVlastimil Babka highest = max(pageblock_start_pfn(pfn), 15346e2b7044SVlastimil Babka cc->zone->zone_start_pfn); 15355a811889SMel Gorman 15365a811889SMel Gorman if (pfn >= low_pfn) { 15375a811889SMel Gorman cc->fast_search_fail = 0; 1538dbe2d4e4SMel Gorman cc->search_order = order; 15395a811889SMel Gorman page = freepage; 15405a811889SMel Gorman break; 15415a811889SMel Gorman } 15425a811889SMel Gorman 15435a811889SMel Gorman if (pfn >= min_pfn && pfn > high_pfn) { 15445a811889SMel Gorman high_pfn = pfn; 15455a811889SMel Gorman 15465a811889SMel Gorman /* Shorten the scan if a candidate is found */ 15475a811889SMel Gorman limit >>= 1; 15485a811889SMel Gorman } 15495a811889SMel Gorman 15505a811889SMel Gorman if (order_scanned >= limit) 15515a811889SMel Gorman break; 15525a811889SMel Gorman } 15535a811889SMel Gorman 1554e6bd14ecSKemeng Shi /* Use a maximum candidate pfn if a preferred one was not found */ 15555a811889SMel Gorman if (!page && high_pfn) { 15565a811889SMel Gorman page = pfn_to_page(high_pfn); 15575a811889SMel Gorman 15585a811889SMel Gorman /* Update freepage for the list reorder below */ 15595a811889SMel Gorman freepage = page; 15605a811889SMel Gorman } 15615a811889SMel Gorman 15625a811889SMel Gorman /* Reorder to so a future search skips recent pages */ 15635a811889SMel Gorman move_freelist_head(freelist, freepage); 15645a811889SMel Gorman 15655a811889SMel Gorman /* Isolate the page if available */ 15665a811889SMel Gorman if (page) { 15675a811889SMel Gorman if (__isolate_free_page(page, order)) { 15685a811889SMel Gorman set_page_private(page, order); 15695a811889SMel Gorman nr_isolated = 1 << order; 1570b717d6b9SWilliam Lam nr_scanned += nr_isolated - 1; 1571447ba886SBaolin Wang total_isolated += nr_isolated; 15725a811889SMel Gorman cc->nr_freepages += nr_isolated; 15735a811889SMel Gorman list_add_tail(&page->lru, &cc->freepages); 15745a811889SMel Gorman count_compact_events(COMPACTISOLATED, nr_isolated); 15755a811889SMel Gorman } else { 15765a811889SMel Gorman /* If isolation fails, abort the search */ 15775b56d996SQian Cai order = cc->search_order + 1; 15785a811889SMel Gorman page = NULL; 15795a811889SMel Gorman } 15805a811889SMel Gorman } 15815a811889SMel Gorman 15825a811889SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 15835a811889SMel Gorman 1584a8d13355SBaolin Wang /* Skip fast search if enough freepages isolated */ 1585a8d13355SBaolin Wang if (cc->nr_freepages >= cc->nr_migratepages) 1586a8d13355SBaolin Wang break; 1587a8d13355SBaolin Wang 15885a811889SMel Gorman /* 1589b55ca526SWonhyuk Yang * Smaller scan on next order so the total scan is related 15905a811889SMel Gorman * to freelist_scan_limit. 15915a811889SMel Gorman */ 15925a811889SMel Gorman if (order_scanned >= limit) 1593b55ca526SWonhyuk Yang limit = max(1U, limit >> 1); 15945a811889SMel Gorman } 15955a811889SMel Gorman 1596447ba886SBaolin Wang trace_mm_compaction_fast_isolate_freepages(min_pfn, cc->free_pfn, 1597447ba886SBaolin Wang nr_scanned, total_isolated); 1598447ba886SBaolin Wang 15995a811889SMel Gorman if (!page) { 16005a811889SMel Gorman cc->fast_search_fail++; 16015a811889SMel Gorman if (scan_start) { 16025a811889SMel Gorman /* 16035a811889SMel Gorman * Use the highest PFN found above min. If one was 1604f3867755SEthon Paul * not found, be pessimistic for direct compaction 16055a811889SMel Gorman * and use the min mark. 16065a811889SMel Gorman */ 1607ca2864e5SMiaohe Lin if (highest >= min_pfn) { 16085a811889SMel Gorman page = pfn_to_page(highest); 16095a811889SMel Gorman cc->free_pfn = highest; 16105a811889SMel Gorman } else { 1611e577c8b6SSuzuki K Poulose if (cc->direct_compaction && pfn_valid(min_pfn)) { 161273a6e474SBaoquan He page = pageblock_pfn_to_page(min_pfn, 16136e2b7044SVlastimil Babka min(pageblock_end_pfn(min_pfn), 16146e2b7044SVlastimil Babka zone_end_pfn(cc->zone)), 161573a6e474SBaoquan He cc->zone); 16165a811889SMel Gorman cc->free_pfn = min_pfn; 16175a811889SMel Gorman } 16185a811889SMel Gorman } 16195a811889SMel Gorman } 16205a811889SMel Gorman } 16215a811889SMel Gorman 1622d097a6f6SMel Gorman if (highest && highest >= cc->zone->compact_cached_free_pfn) { 1623d097a6f6SMel Gorman highest -= pageblock_nr_pages; 16245a811889SMel Gorman cc->zone->compact_cached_free_pfn = highest; 1625d097a6f6SMel Gorman } 16265a811889SMel Gorman 16275a811889SMel Gorman cc->total_free_scanned += nr_scanned; 16285a811889SMel Gorman if (!page) 16292dbd9005SBaolin Wang return; 16305a811889SMel Gorman 16315a811889SMel Gorman low_pfn = page_to_pfn(page); 1632be21b32aSNARIBAYASHI Akira fast_isolate_around(cc, low_pfn); 16335a811889SMel Gorman } 16345a811889SMel Gorman 1635f2849aa0SVlastimil Babka /* 1636ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 1637ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 1638ff9543fdSMichal Nazarewicz */ 1639edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 1640ff9543fdSMichal Nazarewicz { 1641edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 1642ff9543fdSMichal Nazarewicz struct page *page; 1643c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 1644e14c720eSVlastimil Babka unsigned long isolate_start_pfn; /* exact pfn we start at */ 1645c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 1646c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 1647ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 16484fca9730SMel Gorman unsigned int stride; 16492fe86e00SMichal Nazarewicz 16505a811889SMel Gorman /* Try a small search of the free lists for a candidate */ 165100bc102fSMiaohe Lin fast_isolate_freepages(cc); 16525a811889SMel Gorman if (cc->nr_freepages) 16535a811889SMel Gorman goto splitmap; 16545a811889SMel Gorman 1655ff9543fdSMichal Nazarewicz /* 1656ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 165749e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 1658e14c720eSVlastimil Babka * zone when isolating for the first time. For looping we also need 1659e14c720eSVlastimil Babka * this pfn aligned down to the pageblock boundary, because we do 1660c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 1661c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 1662a1c1dbebSRandy Dunlap * zone which ends in the middle of a pageblock. 166349e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 166449e068f0SVlastimil Babka * is using. 1665ff9543fdSMichal Nazarewicz */ 1666e14c720eSVlastimil Babka isolate_start_pfn = cc->free_pfn; 16675a811889SMel Gorman block_start_pfn = pageblock_start_pfn(isolate_start_pfn); 1668c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 1669c96b9e50SVlastimil Babka zone_end_pfn(zone)); 167006b6640aSVlastimil Babka low_pfn = pageblock_end_pfn(cc->migrate_pfn); 16714fca9730SMel Gorman stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; 16722fe86e00SMichal Nazarewicz 1673ff9543fdSMichal Nazarewicz /* 1674ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 1675ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 1676ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 1677ff9543fdSMichal Nazarewicz */ 1678f5f61a32SVlastimil Babka for (; block_start_pfn >= low_pfn; 1679c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 1680e14c720eSVlastimil Babka block_start_pfn -= pageblock_nr_pages, 1681e14c720eSVlastimil Babka isolate_start_pfn = block_start_pfn) { 16824fca9730SMel Gorman unsigned long nr_isolated; 16834fca9730SMel Gorman 1684f6ea3adbSDavid Rientjes /* 1685f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 1686cb810ad2SMel Gorman * suitable migration targets, so periodically check resched. 1687f6ea3adbSDavid Rientjes */ 1688c036ddffSMiaohe Lin if (!(block_start_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages))) 1689cf66f070SMel Gorman cond_resched(); 1690f6ea3adbSDavid Rientjes 16917d49d886SVlastimil Babka page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 16927d49d886SVlastimil Babka zone); 1693e6e0c767SBaolin Wang if (!page) { 1694e6e0c767SBaolin Wang unsigned long next_pfn; 1695e6e0c767SBaolin Wang 1696e6e0c767SBaolin Wang next_pfn = skip_offline_sections_reverse(block_start_pfn); 1697e6e0c767SBaolin Wang if (next_pfn) 1698e6e0c767SBaolin Wang block_start_pfn = max(next_pfn, low_pfn); 1699e6e0c767SBaolin Wang 1700ff9543fdSMichal Nazarewicz continue; 1701e6e0c767SBaolin Wang } 1702ff9543fdSMichal Nazarewicz 1703ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 17049f7e3387SVlastimil Babka if (!suitable_migration_target(cc, page)) 1705ff9543fdSMichal Nazarewicz continue; 170668e3e926SLinus Torvalds 1707bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 1708bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 1709bb13ffebSMel Gorman continue; 1710bb13ffebSMel Gorman 1711e14c720eSVlastimil Babka /* Found a block suitable for isolating free pages from. */ 17124fca9730SMel Gorman nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, 17134fca9730SMel Gorman block_end_pfn, freelist, stride, false); 1714ff9543fdSMichal Nazarewicz 1715d097a6f6SMel Gorman /* Update the skip hint if the full pageblock was scanned */ 1716d097a6f6SMel Gorman if (isolate_start_pfn == block_end_pfn) 171716951789SKemeng Shi update_pageblock_skip(cc, page, block_start_pfn - 171816951789SKemeng Shi pageblock_nr_pages); 1719d097a6f6SMel Gorman 1720cb2dcaf0SMel Gorman /* Are enough freepages isolated? */ 1721cb2dcaf0SMel Gorman if (cc->nr_freepages >= cc->nr_migratepages) { 1722a46cbf3bSDavid Rientjes if (isolate_start_pfn >= block_end_pfn) { 1723a46cbf3bSDavid Rientjes /* 1724a46cbf3bSDavid Rientjes * Restart at previous pageblock if more 1725a46cbf3bSDavid Rientjes * freepages can be isolated next time. 1726a46cbf3bSDavid Rientjes */ 1727f5f61a32SVlastimil Babka isolate_start_pfn = 1728e14c720eSVlastimil Babka block_start_pfn - pageblock_nr_pages; 1729a46cbf3bSDavid Rientjes } 1730be976572SVlastimil Babka break; 1731a46cbf3bSDavid Rientjes } else if (isolate_start_pfn < block_end_pfn) { 1732f5f61a32SVlastimil Babka /* 1733a46cbf3bSDavid Rientjes * If isolation failed early, do not continue 1734a46cbf3bSDavid Rientjes * needlessly. 1735f5f61a32SVlastimil Babka */ 1736a46cbf3bSDavid Rientjes break; 1737f5f61a32SVlastimil Babka } 17384fca9730SMel Gorman 17394fca9730SMel Gorman /* Adjust stride depending on isolation */ 17404fca9730SMel Gorman if (nr_isolated) { 17414fca9730SMel Gorman stride = 1; 17424fca9730SMel Gorman continue; 17434fca9730SMel Gorman } 17444fca9730SMel Gorman stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1); 1745c89511abSMel Gorman } 1746ff9543fdSMichal Nazarewicz 17477ed695e0SVlastimil Babka /* 1748f5f61a32SVlastimil Babka * Record where the free scanner will restart next time. Either we 1749f5f61a32SVlastimil Babka * broke from the loop and set isolate_start_pfn based on the last 1750f5f61a32SVlastimil Babka * call to isolate_freepages_block(), or we met the migration scanner 1751f5f61a32SVlastimil Babka * and the loop terminated due to isolate_start_pfn < low_pfn 17527ed695e0SVlastimil Babka */ 1753f5f61a32SVlastimil Babka cc->free_pfn = isolate_start_pfn; 17545a811889SMel Gorman 17555a811889SMel Gorman splitmap: 17565a811889SMel Gorman /* __isolate_free_page() does not map the pages */ 17575a811889SMel Gorman split_map_pages(freelist); 1758748446bbSMel Gorman } 1759748446bbSMel Gorman 1760748446bbSMel Gorman /* 1761748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 1762748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 1763748446bbSMel Gorman */ 17644e096ae1SMatthew Wilcox (Oracle) static struct folio *compaction_alloc(struct folio *src, unsigned long data) 1765748446bbSMel Gorman { 1766748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 17674e096ae1SMatthew Wilcox (Oracle) struct folio *dst; 1768748446bbSMel Gorman 1769748446bbSMel Gorman if (list_empty(&cc->freepages)) { 1770edc2ca61SVlastimil Babka isolate_freepages(cc); 1771748446bbSMel Gorman 1772748446bbSMel Gorman if (list_empty(&cc->freepages)) 1773748446bbSMel Gorman return NULL; 1774748446bbSMel Gorman } 1775748446bbSMel Gorman 17764e096ae1SMatthew Wilcox (Oracle) dst = list_entry(cc->freepages.next, struct folio, lru); 17774e096ae1SMatthew Wilcox (Oracle) list_del(&dst->lru); 1778748446bbSMel Gorman cc->nr_freepages--; 1779748446bbSMel Gorman 17804e096ae1SMatthew Wilcox (Oracle) return dst; 1781748446bbSMel Gorman } 1782748446bbSMel Gorman 1783748446bbSMel Gorman /* 1784d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 1785d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 1786d53aea3dSDavid Rientjes * special handling needed for NUMA. 1787d53aea3dSDavid Rientjes */ 17884e096ae1SMatthew Wilcox (Oracle) static void compaction_free(struct folio *dst, unsigned long data) 1789d53aea3dSDavid Rientjes { 1790d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 1791d53aea3dSDavid Rientjes 17924e096ae1SMatthew Wilcox (Oracle) list_add(&dst->lru, &cc->freepages); 1793d53aea3dSDavid Rientjes cc->nr_freepages++; 1794d53aea3dSDavid Rientjes } 1795d53aea3dSDavid Rientjes 1796ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 1797ff9543fdSMichal Nazarewicz typedef enum { 1798ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 1799ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 1800ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 1801ff9543fdSMichal Nazarewicz } isolate_migrate_t; 1802ff9543fdSMichal Nazarewicz 1803ff9543fdSMichal Nazarewicz /* 18045bbe3547SEric B Munson * Allow userspace to control policy on scanning the unevictable LRU for 18055bbe3547SEric B Munson * compactable pages. 18065bbe3547SEric B Munson */ 180748fe8ab8SMinghao Chi static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNEVICTABLE_DEFAULT; 180848fe8ab8SMinghao Chi /* 180948fe8ab8SMinghao Chi * Tunable for proactive compaction. It determines how 181048fe8ab8SMinghao Chi * aggressively the kernel should compact memory in the 181148fe8ab8SMinghao Chi * background. It takes values in the range [0, 100]. 181248fe8ab8SMinghao Chi */ 181348fe8ab8SMinghao Chi static unsigned int __read_mostly sysctl_compaction_proactiveness = 20; 181448fe8ab8SMinghao Chi static int sysctl_extfrag_threshold = 500; 18158b9167cdSWen Yang static int __read_mostly sysctl_compact_memory; 18165bbe3547SEric B Munson 181770b44595SMel Gorman static inline void 181870b44595SMel Gorman update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) 181970b44595SMel Gorman { 182070b44595SMel Gorman if (cc->fast_start_pfn == ULONG_MAX) 182170b44595SMel Gorman return; 182270b44595SMel Gorman 182370b44595SMel Gorman if (!cc->fast_start_pfn) 182470b44595SMel Gorman cc->fast_start_pfn = pfn; 182570b44595SMel Gorman 182670b44595SMel Gorman cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); 182770b44595SMel Gorman } 182870b44595SMel Gorman 182970b44595SMel Gorman static inline unsigned long 183070b44595SMel Gorman reinit_migrate_pfn(struct compact_control *cc) 183170b44595SMel Gorman { 183270b44595SMel Gorman if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) 183370b44595SMel Gorman return cc->migrate_pfn; 183470b44595SMel Gorman 183570b44595SMel Gorman cc->migrate_pfn = cc->fast_start_pfn; 183670b44595SMel Gorman cc->fast_start_pfn = ULONG_MAX; 183770b44595SMel Gorman 183870b44595SMel Gorman return cc->migrate_pfn; 183970b44595SMel Gorman } 184070b44595SMel Gorman 184170b44595SMel Gorman /* 184270b44595SMel Gorman * Briefly search the free lists for a migration source that already has 184370b44595SMel Gorman * some free pages to reduce the number of pages that need migration 184470b44595SMel Gorman * before a pageblock is free. 184570b44595SMel Gorman */ 184670b44595SMel Gorman static unsigned long fast_find_migrateblock(struct compact_control *cc) 184770b44595SMel Gorman { 184870b44595SMel Gorman unsigned int limit = freelist_scan_limit(cc); 184970b44595SMel Gorman unsigned int nr_scanned = 0; 185070b44595SMel Gorman unsigned long distance; 185170b44595SMel Gorman unsigned long pfn = cc->migrate_pfn; 185270b44595SMel Gorman unsigned long high_pfn; 185370b44595SMel Gorman int order; 185415d28d0dSWonhyuk Yang bool found_block = false; 185570b44595SMel Gorman 185670b44595SMel Gorman /* Skip hints are relied on to avoid repeats on the fast search */ 185770b44595SMel Gorman if (cc->ignore_skip_hint) 185870b44595SMel Gorman return pfn; 185970b44595SMel Gorman 186070b44595SMel Gorman /* 1861f9d7fc1aSMel Gorman * If the pageblock should be finished then do not select a different 1862f9d7fc1aSMel Gorman * pageblock. 1863f9d7fc1aSMel Gorman */ 1864f9d7fc1aSMel Gorman if (cc->finish_pageblock) 1865f9d7fc1aSMel Gorman return pfn; 1866f9d7fc1aSMel Gorman 1867f9d7fc1aSMel Gorman /* 186870b44595SMel Gorman * If the migrate_pfn is not at the start of a zone or the start 186970b44595SMel Gorman * of a pageblock then assume this is a continuation of a previous 187070b44595SMel Gorman * scan restarted due to COMPACT_CLUSTER_MAX. 187170b44595SMel Gorman */ 187270b44595SMel Gorman if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) 187370b44595SMel Gorman return pfn; 187470b44595SMel Gorman 187570b44595SMel Gorman /* 187670b44595SMel Gorman * For smaller orders, just linearly scan as the number of pages 187770b44595SMel Gorman * to migrate should be relatively small and does not necessarily 187870b44595SMel Gorman * justify freeing up a large block for a small allocation. 187970b44595SMel Gorman */ 188070b44595SMel Gorman if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) 188170b44595SMel Gorman return pfn; 188270b44595SMel Gorman 188370b44595SMel Gorman /* 188470b44595SMel Gorman * Only allow kcompactd and direct requests for movable pages to 188570b44595SMel Gorman * quickly clear out a MOVABLE pageblock for allocation. This 188670b44595SMel Gorman * reduces the risk that a large movable pageblock is freed for 188770b44595SMel Gorman * an unmovable/reclaimable small allocation. 188870b44595SMel Gorman */ 188970b44595SMel Gorman if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) 189070b44595SMel Gorman return pfn; 189170b44595SMel Gorman 189270b44595SMel Gorman /* 189370b44595SMel Gorman * When starting the migration scanner, pick any pageblock within the 189470b44595SMel Gorman * first half of the search space. Otherwise try and pick a pageblock 189570b44595SMel Gorman * within the first eighth to reduce the chances that a migration 189670b44595SMel Gorman * target later becomes a source. 189770b44595SMel Gorman */ 189870b44595SMel Gorman distance = (cc->free_pfn - cc->migrate_pfn) >> 1; 189970b44595SMel Gorman if (cc->migrate_pfn != cc->zone->zone_start_pfn) 190070b44595SMel Gorman distance >>= 2; 190170b44595SMel Gorman high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); 190270b44595SMel Gorman 190370b44595SMel Gorman for (order = cc->order - 1; 190415d28d0dSWonhyuk Yang order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; 190570b44595SMel Gorman order--) { 190670b44595SMel Gorman struct free_area *area = &cc->zone->free_area[order]; 190770b44595SMel Gorman struct list_head *freelist; 190870b44595SMel Gorman unsigned long flags; 190970b44595SMel Gorman struct page *freepage; 191070b44595SMel Gorman 191170b44595SMel Gorman if (!area->nr_free) 191270b44595SMel Gorman continue; 191370b44595SMel Gorman 191470b44595SMel Gorman spin_lock_irqsave(&cc->zone->lock, flags); 191570b44595SMel Gorman freelist = &area->free_list[MIGRATE_MOVABLE]; 191694ec2003SBaolin Wang list_for_each_entry(freepage, freelist, buddy_list) { 191770b44595SMel Gorman unsigned long free_pfn; 191870b44595SMel Gorman 191915d28d0dSWonhyuk Yang if (nr_scanned++ >= limit) { 192015d28d0dSWonhyuk Yang move_freelist_tail(freelist, freepage); 192115d28d0dSWonhyuk Yang break; 192215d28d0dSWonhyuk Yang } 192315d28d0dSWonhyuk Yang 192470b44595SMel Gorman free_pfn = page_to_pfn(freepage); 192570b44595SMel Gorman if (free_pfn < high_pfn) { 192670b44595SMel Gorman /* 192770b44595SMel Gorman * Avoid if skipped recently. Ideally it would 192870b44595SMel Gorman * move to the tail but even safe iteration of 192970b44595SMel Gorman * the list assumes an entry is deleted, not 193070b44595SMel Gorman * reordered. 193170b44595SMel Gorman */ 193215d28d0dSWonhyuk Yang if (get_pageblock_skip(freepage)) 193370b44595SMel Gorman continue; 193470b44595SMel Gorman 193570b44595SMel Gorman /* Reorder to so a future search skips recent pages */ 193670b44595SMel Gorman move_freelist_tail(freelist, freepage); 193770b44595SMel Gorman 1938e380bebeSMel Gorman update_fast_start_pfn(cc, free_pfn); 193970b44595SMel Gorman pfn = pageblock_start_pfn(free_pfn); 1940bbe832b9SRei Yamamoto if (pfn < cc->zone->zone_start_pfn) 1941bbe832b9SRei Yamamoto pfn = cc->zone->zone_start_pfn; 194270b44595SMel Gorman cc->fast_search_fail = 0; 194315d28d0dSWonhyuk Yang found_block = true; 194470b44595SMel Gorman break; 194570b44595SMel Gorman } 194670b44595SMel Gorman } 194770b44595SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 194870b44595SMel Gorman } 194970b44595SMel Gorman 195070b44595SMel Gorman cc->total_migrate_scanned += nr_scanned; 195170b44595SMel Gorman 195270b44595SMel Gorman /* 195370b44595SMel Gorman * If fast scanning failed then use a cached entry for a page block 195470b44595SMel Gorman * that had free pages as the basis for starting a linear scan. 195570b44595SMel Gorman */ 195615d28d0dSWonhyuk Yang if (!found_block) { 195715d28d0dSWonhyuk Yang cc->fast_search_fail++; 195870b44595SMel Gorman pfn = reinit_migrate_pfn(cc); 195915d28d0dSWonhyuk Yang } 196070b44595SMel Gorman return pfn; 196170b44595SMel Gorman } 196270b44595SMel Gorman 19635bbe3547SEric B Munson /* 1964edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 1965edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 1966edc2ca61SVlastimil Babka * compact_control. 1967ff9543fdSMichal Nazarewicz */ 196832aaf055SPengfei Li static isolate_migrate_t isolate_migratepages(struct compact_control *cc) 1969ff9543fdSMichal Nazarewicz { 1970e1409c32SJoonsoo Kim unsigned long block_start_pfn; 1971e1409c32SJoonsoo Kim unsigned long block_end_pfn; 1972e1409c32SJoonsoo Kim unsigned long low_pfn; 1973edc2ca61SVlastimil Babka struct page *page; 1974edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 19755bbe3547SEric B Munson (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | 19761d2047feSHugh Dickins (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); 197770b44595SMel Gorman bool fast_find_block; 1978ff9543fdSMichal Nazarewicz 1979edc2ca61SVlastimil Babka /* 1980edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 198170b44595SMel Gorman * initialized by compact_zone(). The first failure will use 198270b44595SMel Gorman * the lowest PFN as the starting point for linear scanning. 1983edc2ca61SVlastimil Babka */ 198470b44595SMel Gorman low_pfn = fast_find_migrateblock(cc); 198506b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(low_pfn); 198632aaf055SPengfei Li if (block_start_pfn < cc->zone->zone_start_pfn) 198732aaf055SPengfei Li block_start_pfn = cc->zone->zone_start_pfn; 1988ff9543fdSMichal Nazarewicz 198970b44595SMel Gorman /* 19900aa8ea3cSKemeng Shi * fast_find_migrateblock() has already ensured the pageblock is not 19910aa8ea3cSKemeng Shi * set with a skipped flag, so to avoid the isolation_suitable check 19920aa8ea3cSKemeng Shi * below again, check whether the fast search was successful. 199370b44595SMel Gorman */ 199470b44595SMel Gorman fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; 199570b44595SMel Gorman 1996ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 199706b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(low_pfn); 1998ff9543fdSMichal Nazarewicz 1999edc2ca61SVlastimil Babka /* 2000edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 2001edc2ca61SVlastimil Babka * Do not cross the free scanner. 2002edc2ca61SVlastimil Babka */ 2003e1409c32SJoonsoo Kim for (; block_end_pfn <= cc->free_pfn; 200470b44595SMel Gorman fast_find_block = false, 2005c2ad7a1fSOscar Salvador cc->migrate_pfn = low_pfn = block_end_pfn, 2006e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 2007e1409c32SJoonsoo Kim block_end_pfn += pageblock_nr_pages) { 2008edc2ca61SVlastimil Babka 2009edc2ca61SVlastimil Babka /* 2010edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 2011edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 2012cb810ad2SMel Gorman * need to schedule. 2013edc2ca61SVlastimil Babka */ 2014c036ddffSMiaohe Lin if (!(low_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages))) 2015cf66f070SMel Gorman cond_resched(); 2016edc2ca61SVlastimil Babka 201732aaf055SPengfei Li page = pageblock_pfn_to_page(block_start_pfn, 201832aaf055SPengfei Li block_end_pfn, cc->zone); 20199721fd82SBaolin Wang if (!page) { 20209721fd82SBaolin Wang unsigned long next_pfn; 20219721fd82SBaolin Wang 20229721fd82SBaolin Wang next_pfn = skip_offline_sections(block_start_pfn); 20239721fd82SBaolin Wang if (next_pfn) 20249721fd82SBaolin Wang block_end_pfn = min(next_pfn, cc->free_pfn); 2025edc2ca61SVlastimil Babka continue; 20269721fd82SBaolin Wang } 2027edc2ca61SVlastimil Babka 2028e380bebeSMel Gorman /* 2029e380bebeSMel Gorman * If isolation recently failed, do not retry. Only check the 2030e380bebeSMel Gorman * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock 2031e380bebeSMel Gorman * to be visited multiple times. Assume skip was checked 2032e380bebeSMel Gorman * before making it "skip" so other compaction instances do 2033e380bebeSMel Gorman * not scan the same block. 2034e380bebeSMel Gorman */ 2035ee0913c4SKefeng Wang if (pageblock_aligned(low_pfn) && 2036e380bebeSMel Gorman !fast_find_block && !isolation_suitable(cc, page)) 2037edc2ca61SVlastimil Babka continue; 2038edc2ca61SVlastimil Babka 2039edc2ca61SVlastimil Babka /* 2040556162bfSMiaohe Lin * For async direct compaction, only scan the pageblocks of the 2041556162bfSMiaohe Lin * same migratetype without huge pages. Async direct compaction 2042556162bfSMiaohe Lin * is optimistic to see if the minimum amount of work satisfies 2043556162bfSMiaohe Lin * the allocation. The cached PFN is updated as it's possible 2044556162bfSMiaohe Lin * that all remaining blocks between source and target are 2045556162bfSMiaohe Lin * unsuitable and the compaction scanners fail to meet. 2046edc2ca61SVlastimil Babka */ 20479bebefd5SMel Gorman if (!suitable_migration_source(cc, page)) { 20489bebefd5SMel Gorman update_cached_migrate(cc, block_end_pfn); 2049edc2ca61SVlastimil Babka continue; 20509bebefd5SMel Gorman } 2051ff9543fdSMichal Nazarewicz 2052ff9543fdSMichal Nazarewicz /* Perform the isolation */ 2053c2ad7a1fSOscar Salvador if (isolate_migratepages_block(cc, low_pfn, block_end_pfn, 2054c2ad7a1fSOscar Salvador isolate_mode)) 2055ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 2056ff9543fdSMichal Nazarewicz 2057edc2ca61SVlastimil Babka /* 2058edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 2059edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 2060edc2ca61SVlastimil Babka * continue or not. 2061edc2ca61SVlastimil Babka */ 2062edc2ca61SVlastimil Babka break; 2063edc2ca61SVlastimil Babka } 2064edc2ca61SVlastimil Babka 2065edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 2066ff9543fdSMichal Nazarewicz } 2067ff9543fdSMichal Nazarewicz 206821c527a3SYaowei Bai /* 206921c527a3SYaowei Bai * order == -1 is expected when compacting via 207021c527a3SYaowei Bai * /proc/sys/vm/compact_memory 207121c527a3SYaowei Bai */ 207221c527a3SYaowei Bai static inline bool is_via_compact_memory(int order) 207321c527a3SYaowei Bai { 207421c527a3SYaowei Bai return order == -1; 207521c527a3SYaowei Bai } 207621c527a3SYaowei Bai 2077b4a0215eSKefeng Wang /* 2078b4a0215eSKefeng Wang * Determine whether kswapd is (or recently was!) running on this node. 2079b4a0215eSKefeng Wang * 2080b4a0215eSKefeng Wang * pgdat_kswapd_lock() pins pgdat->kswapd, so a concurrent kswapd_stop() can't 2081b4a0215eSKefeng Wang * zero it. 2082b4a0215eSKefeng Wang */ 2083facdaa91SNitin Gupta static bool kswapd_is_running(pg_data_t *pgdat) 2084facdaa91SNitin Gupta { 2085b4a0215eSKefeng Wang bool running; 2086b4a0215eSKefeng Wang 2087b4a0215eSKefeng Wang pgdat_kswapd_lock(pgdat); 2088b4a0215eSKefeng Wang running = pgdat->kswapd && task_is_running(pgdat->kswapd); 2089b4a0215eSKefeng Wang pgdat_kswapd_unlock(pgdat); 2090b4a0215eSKefeng Wang 2091b4a0215eSKefeng Wang return running; 2092facdaa91SNitin Gupta } 2093facdaa91SNitin Gupta 2094facdaa91SNitin Gupta /* 2095facdaa91SNitin Gupta * A zone's fragmentation score is the external fragmentation wrt to the 209640d7e203SCharan Teja Reddy * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100]. 209740d7e203SCharan Teja Reddy */ 209840d7e203SCharan Teja Reddy static unsigned int fragmentation_score_zone(struct zone *zone) 209940d7e203SCharan Teja Reddy { 210040d7e203SCharan Teja Reddy return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); 210140d7e203SCharan Teja Reddy } 210240d7e203SCharan Teja Reddy 210340d7e203SCharan Teja Reddy /* 210440d7e203SCharan Teja Reddy * A weighted zone's fragmentation score is the external fragmentation 210540d7e203SCharan Teja Reddy * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It 210640d7e203SCharan Teja Reddy * returns a value in the range [0, 100]. 2107facdaa91SNitin Gupta * 2108facdaa91SNitin Gupta * The scaling factor ensures that proactive compaction focuses on larger 2109facdaa91SNitin Gupta * zones like ZONE_NORMAL, rather than smaller, specialized zones like 2110facdaa91SNitin Gupta * ZONE_DMA32. For smaller zones, the score value remains close to zero, 2111facdaa91SNitin Gupta * and thus never exceeds the high threshold for proactive compaction. 2112facdaa91SNitin Gupta */ 211340d7e203SCharan Teja Reddy static unsigned int fragmentation_score_zone_weighted(struct zone *zone) 2114facdaa91SNitin Gupta { 2115facdaa91SNitin Gupta unsigned long score; 2116facdaa91SNitin Gupta 211740d7e203SCharan Teja Reddy score = zone->present_pages * fragmentation_score_zone(zone); 2118facdaa91SNitin Gupta return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); 2119facdaa91SNitin Gupta } 2120facdaa91SNitin Gupta 2121facdaa91SNitin Gupta /* 2122facdaa91SNitin Gupta * The per-node proactive (background) compaction process is started by its 2123facdaa91SNitin Gupta * corresponding kcompactd thread when the node's fragmentation score 2124facdaa91SNitin Gupta * exceeds the high threshold. The compaction process remains active till 2125facdaa91SNitin Gupta * the node's score falls below the low threshold, or one of the back-off 2126facdaa91SNitin Gupta * conditions is met. 2127facdaa91SNitin Gupta */ 2128d34c0a75SNitin Gupta static unsigned int fragmentation_score_node(pg_data_t *pgdat) 2129facdaa91SNitin Gupta { 2130d34c0a75SNitin Gupta unsigned int score = 0; 2131facdaa91SNitin Gupta int zoneid; 2132facdaa91SNitin Gupta 2133facdaa91SNitin Gupta for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2134facdaa91SNitin Gupta struct zone *zone; 2135facdaa91SNitin Gupta 2136facdaa91SNitin Gupta zone = &pgdat->node_zones[zoneid]; 21379e552271SBaolin Wang if (!populated_zone(zone)) 21389e552271SBaolin Wang continue; 213940d7e203SCharan Teja Reddy score += fragmentation_score_zone_weighted(zone); 2140facdaa91SNitin Gupta } 2141facdaa91SNitin Gupta 2142facdaa91SNitin Gupta return score; 2143facdaa91SNitin Gupta } 2144facdaa91SNitin Gupta 2145d34c0a75SNitin Gupta static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low) 2146facdaa91SNitin Gupta { 2147d34c0a75SNitin Gupta unsigned int wmark_low; 2148facdaa91SNitin Gupta 2149facdaa91SNitin Gupta /* 2150f0953a1bSIngo Molnar * Cap the low watermark to avoid excessive compaction 2151f0953a1bSIngo Molnar * activity in case a user sets the proactiveness tunable 2152facdaa91SNitin Gupta * close to 100 (maximum). 2153facdaa91SNitin Gupta */ 2154d34c0a75SNitin Gupta wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); 2155d34c0a75SNitin Gupta return low ? wmark_low : min(wmark_low + 10, 100U); 2156facdaa91SNitin Gupta } 2157facdaa91SNitin Gupta 2158facdaa91SNitin Gupta static bool should_proactive_compact_node(pg_data_t *pgdat) 2159facdaa91SNitin Gupta { 2160facdaa91SNitin Gupta int wmark_high; 2161facdaa91SNitin Gupta 2162facdaa91SNitin Gupta if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat)) 2163facdaa91SNitin Gupta return false; 2164facdaa91SNitin Gupta 2165facdaa91SNitin Gupta wmark_high = fragmentation_score_wmark(pgdat, false); 2166facdaa91SNitin Gupta return fragmentation_score_node(pgdat) > wmark_high; 2167facdaa91SNitin Gupta } 2168facdaa91SNitin Gupta 216940cacbcbSMel Gorman static enum compact_result __compact_finished(struct compact_control *cc) 2170748446bbSMel Gorman { 21718fb74b9fSMel Gorman unsigned int order; 2172d39773a0SVlastimil Babka const int migratetype = cc->migratetype; 2173cb2dcaf0SMel Gorman int ret; 2174748446bbSMel Gorman 2175753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 2176f2849aa0SVlastimil Babka if (compact_scanners_met(cc)) { 217755b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 217840cacbcbSMel Gorman reset_cached_positions(cc->zone); 217955b7c4c9SVlastimil Babka 218062997027SMel Gorman /* 218162997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 2182accf6242SVlastimil Babka * by kswapd when it goes to sleep. kcompactd does not set the 218362997027SMel Gorman * flag itself as the decision to be clear should be directly 218462997027SMel Gorman * based on an allocation request. 218562997027SMel Gorman */ 2186accf6242SVlastimil Babka if (cc->direct_compaction) 218740cacbcbSMel Gorman cc->zone->compact_blockskip_flush = true; 218862997027SMel Gorman 2189c8f7de0bSMichal Hocko if (cc->whole_zone) 2190748446bbSMel Gorman return COMPACT_COMPLETE; 2191c8f7de0bSMichal Hocko else 2192c8f7de0bSMichal Hocko return COMPACT_PARTIAL_SKIPPED; 2193bb13ffebSMel Gorman } 2194748446bbSMel Gorman 2195facdaa91SNitin Gupta if (cc->proactive_compaction) { 2196facdaa91SNitin Gupta int score, wmark_low; 2197facdaa91SNitin Gupta pg_data_t *pgdat; 2198facdaa91SNitin Gupta 2199facdaa91SNitin Gupta pgdat = cc->zone->zone_pgdat; 2200facdaa91SNitin Gupta if (kswapd_is_running(pgdat)) 2201facdaa91SNitin Gupta return COMPACT_PARTIAL_SKIPPED; 2202facdaa91SNitin Gupta 2203facdaa91SNitin Gupta score = fragmentation_score_zone(cc->zone); 2204facdaa91SNitin Gupta wmark_low = fragmentation_score_wmark(pgdat, true); 2205facdaa91SNitin Gupta 2206facdaa91SNitin Gupta if (score > wmark_low) 2207facdaa91SNitin Gupta ret = COMPACT_CONTINUE; 2208facdaa91SNitin Gupta else 2209facdaa91SNitin Gupta ret = COMPACT_SUCCESS; 2210facdaa91SNitin Gupta 2211facdaa91SNitin Gupta goto out; 2212facdaa91SNitin Gupta } 2213facdaa91SNitin Gupta 221421c527a3SYaowei Bai if (is_via_compact_memory(cc->order)) 221556de7263SMel Gorman return COMPACT_CONTINUE; 221656de7263SMel Gorman 2217baf6a9a1SVlastimil Babka /* 2218efe771c7SMel Gorman * Always finish scanning a pageblock to reduce the possibility of 2219efe771c7SMel Gorman * fallbacks in the future. This is particularly important when 2220efe771c7SMel Gorman * migration source is unmovable/reclaimable but it's not worth 2221efe771c7SMel Gorman * special casing. 2222baf6a9a1SVlastimil Babka */ 2223ee0913c4SKefeng Wang if (!pageblock_aligned(cc->migrate_pfn)) 2224baf6a9a1SVlastimil Babka return COMPACT_CONTINUE; 2225baf6a9a1SVlastimil Babka 222656de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 2227cb2dcaf0SMel Gorman ret = COMPACT_NO_SUITABLE_PAGE; 222823baf831SKirill A. Shutemov for (order = cc->order; order <= MAX_ORDER; order++) { 222940cacbcbSMel Gorman struct free_area *area = &cc->zone->free_area[order]; 22302149cdaeSJoonsoo Kim bool can_steal; 22318fb74b9fSMel Gorman 223256de7263SMel Gorman /* Job done if page is free of the right migratetype */ 2233b03641afSDan Williams if (!free_area_empty(area, migratetype)) 2234cf378319SVlastimil Babka return COMPACT_SUCCESS; 223556de7263SMel Gorman 22362149cdaeSJoonsoo Kim #ifdef CONFIG_CMA 22372149cdaeSJoonsoo Kim /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ 22382149cdaeSJoonsoo Kim if (migratetype == MIGRATE_MOVABLE && 2239b03641afSDan Williams !free_area_empty(area, MIGRATE_CMA)) 2240cf378319SVlastimil Babka return COMPACT_SUCCESS; 22412149cdaeSJoonsoo Kim #endif 22422149cdaeSJoonsoo Kim /* 22432149cdaeSJoonsoo Kim * Job done if allocation would steal freepages from 22442149cdaeSJoonsoo Kim * other migratetype buddy lists. 22452149cdaeSJoonsoo Kim */ 22462149cdaeSJoonsoo Kim if (find_suitable_fallback(area, order, migratetype, 2247fa599c44SMiaohe Lin true, &can_steal) != -1) 2248baf6a9a1SVlastimil Babka /* 2249fa599c44SMiaohe Lin * Movable pages are OK in any pageblock. If we are 2250fa599c44SMiaohe Lin * stealing for a non-movable allocation, make sure 2251fa599c44SMiaohe Lin * we finish compacting the current pageblock first 2252fa599c44SMiaohe Lin * (which is assured by the above migrate_pfn align 2253fa599c44SMiaohe Lin * check) so it is as free as possible and we won't 2254fa599c44SMiaohe Lin * have to steal another one soon. 2255baf6a9a1SVlastimil Babka */ 2256baf6a9a1SVlastimil Babka return COMPACT_SUCCESS; 2257baf6a9a1SVlastimil Babka } 2258baf6a9a1SVlastimil Babka 2259facdaa91SNitin Gupta out: 2260cb2dcaf0SMel Gorman if (cc->contended || fatal_signal_pending(current)) 2261cb2dcaf0SMel Gorman ret = COMPACT_CONTENDED; 2262cb2dcaf0SMel Gorman 2263cb2dcaf0SMel Gorman return ret; 2264837d026dSJoonsoo Kim } 2265837d026dSJoonsoo Kim 226640cacbcbSMel Gorman static enum compact_result compact_finished(struct compact_control *cc) 2267837d026dSJoonsoo Kim { 2268837d026dSJoonsoo Kim int ret; 2269837d026dSJoonsoo Kim 227040cacbcbSMel Gorman ret = __compact_finished(cc); 227140cacbcbSMel Gorman trace_mm_compaction_finished(cc->zone, cc->order, ret); 2272837d026dSJoonsoo Kim if (ret == COMPACT_NO_SUITABLE_PAGE) 2273837d026dSJoonsoo Kim ret = COMPACT_CONTINUE; 2274837d026dSJoonsoo Kim 2275837d026dSJoonsoo Kim return ret; 2276748446bbSMel Gorman } 2277748446bbSMel Gorman 22783cf04937SJohannes Weiner static bool __compaction_suitable(struct zone *zone, int order, 227997a225e6SJoonsoo Kim int highest_zoneidx, 228086a294a8SMichal Hocko unsigned long wmark_target) 22813e7d3449SMel Gorman { 22823e7d3449SMel Gorman unsigned long watermark; 22833957c776SMichal Hocko /* 22849861a62cSVlastimil Babka * Watermarks for order-0 must be met for compaction to be able to 2285984fdba6SVlastimil Babka * isolate free pages for migration targets. This means that the 2286984fdba6SVlastimil Babka * watermark and alloc_flags have to match, or be more pessimistic than 2287984fdba6SVlastimil Babka * the check in __isolate_free_page(). We don't use the direct 2288984fdba6SVlastimil Babka * compactor's alloc_flags, as they are not relevant for freepage 228997a225e6SJoonsoo Kim * isolation. We however do use the direct compactor's highest_zoneidx 229097a225e6SJoonsoo Kim * to skip over zones where lowmem reserves would prevent allocation 229197a225e6SJoonsoo Kim * even if compaction succeeds. 22928348faf9SVlastimil Babka * For costly orders, we require low watermark instead of min for 22938348faf9SVlastimil Babka * compaction to proceed to increase its chances. 2294d883c6cfSJoonsoo Kim * ALLOC_CMA is used, as pages in CMA pageblocks are considered 2295d883c6cfSJoonsoo Kim * suitable migration targets 22963e7d3449SMel Gorman */ 22978348faf9SVlastimil Babka watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? 22988348faf9SVlastimil Babka low_wmark_pages(zone) : min_wmark_pages(zone); 22998348faf9SVlastimil Babka watermark += compact_gap(order); 23003cf04937SJohannes Weiner return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx, 23013cf04937SJohannes Weiner ALLOC_CMA, wmark_target); 2302cc5c9f09SVlastimil Babka } 2303cc5c9f09SVlastimil Babka 23042b1a20c3SHui Su /* 23052b1a20c3SHui Su * compaction_suitable: Is this suitable to run compaction on this zone now? 23062b1a20c3SHui Su */ 23073cf04937SJohannes Weiner bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) 2308cc5c9f09SVlastimil Babka { 23093cf04937SJohannes Weiner enum compact_result compact_result; 23103cf04937SJohannes Weiner bool suitable; 2311cc5c9f09SVlastimil Babka 23123cf04937SJohannes Weiner suitable = __compaction_suitable(zone, order, highest_zoneidx, 2313cc5c9f09SVlastimil Babka zone_page_state(zone, NR_FREE_PAGES)); 23143e7d3449SMel Gorman /* 23153e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 23163e7d3449SMel Gorman * low memory or external fragmentation 23173e7d3449SMel Gorman * 2318ebff3980SVlastimil Babka * index of -1000 would imply allocations might succeed depending on 2319ebff3980SVlastimil Babka * watermarks, but we already failed the high-order watermark check 23203e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 23213e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 23223e7d3449SMel Gorman * 232320311420SVlastimil Babka * Only compact if a failure would be due to fragmentation. Also 232420311420SVlastimil Babka * ignore fragindex for non-costly orders where the alternative to 232520311420SVlastimil Babka * a successful reclaim/compaction is OOM. Fragindex and the 232620311420SVlastimil Babka * vm.extfrag_threshold sysctl is meant as a heuristic to prevent 232720311420SVlastimil Babka * excessive compaction for costly orders, but it should not be at the 232820311420SVlastimil Babka * expense of system stability. 23293e7d3449SMel Gorman */ 23303cf04937SJohannes Weiner if (suitable) { 23313cf04937SJohannes Weiner compact_result = COMPACT_CONTINUE; 23323cf04937SJohannes Weiner if (order > PAGE_ALLOC_COSTLY_ORDER) { 23333cf04937SJohannes Weiner int fragindex = fragmentation_index(zone, order); 23343cf04937SJohannes Weiner 23353cf04937SJohannes Weiner if (fragindex >= 0 && 23363cf04937SJohannes Weiner fragindex <= sysctl_extfrag_threshold) { 23373cf04937SJohannes Weiner suitable = false; 23383cf04937SJohannes Weiner compact_result = COMPACT_NOT_SUITABLE_ZONE; 23393cf04937SJohannes Weiner } 23403cf04937SJohannes Weiner } 23413cf04937SJohannes Weiner } else { 23423cf04937SJohannes Weiner compact_result = COMPACT_SKIPPED; 23433e7d3449SMel Gorman } 23443e7d3449SMel Gorman 23453cf04937SJohannes Weiner trace_mm_compaction_suitable(zone, order, compact_result); 2346837d026dSJoonsoo Kim 23473cf04937SJohannes Weiner return suitable; 2348837d026dSJoonsoo Kim } 2349837d026dSJoonsoo Kim 235086a294a8SMichal Hocko bool compaction_zonelist_suitable(struct alloc_context *ac, int order, 235186a294a8SMichal Hocko int alloc_flags) 235286a294a8SMichal Hocko { 235386a294a8SMichal Hocko struct zone *zone; 235486a294a8SMichal Hocko struct zoneref *z; 235586a294a8SMichal Hocko 235686a294a8SMichal Hocko /* 235786a294a8SMichal Hocko * Make sure at least one zone would pass __compaction_suitable if we continue 235886a294a8SMichal Hocko * retrying the reclaim. 235986a294a8SMichal Hocko */ 236097a225e6SJoonsoo Kim for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 236197a225e6SJoonsoo Kim ac->highest_zoneidx, ac->nodemask) { 236286a294a8SMichal Hocko unsigned long available; 236386a294a8SMichal Hocko 236486a294a8SMichal Hocko /* 236586a294a8SMichal Hocko * Do not consider all the reclaimable memory because we do not 236686a294a8SMichal Hocko * want to trash just for a single high order allocation which 236786a294a8SMichal Hocko * is even not guaranteed to appear even if __compaction_suitable 236886a294a8SMichal Hocko * is happy about the watermark check. 236986a294a8SMichal Hocko */ 23705a1c84b4SMel Gorman available = zone_reclaimable_pages(zone) / order; 237186a294a8SMichal Hocko available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 2372e8606320SJohannes Weiner if (__compaction_suitable(zone, order, ac->highest_zoneidx, 23733cf04937SJohannes Weiner available)) 237486a294a8SMichal Hocko return true; 237586a294a8SMichal Hocko } 237686a294a8SMichal Hocko 237786a294a8SMichal Hocko return false; 237886a294a8SMichal Hocko } 237986a294a8SMichal Hocko 23805e1f0f09SMel Gorman static enum compact_result 23815e1f0f09SMel Gorman compact_zone(struct compact_control *cc, struct capture_control *capc) 2382748446bbSMel Gorman { 2383ea7ab982SMichal Hocko enum compact_result ret; 238440cacbcbSMel Gorman unsigned long start_pfn = cc->zone->zone_start_pfn; 238540cacbcbSMel Gorman unsigned long end_pfn = zone_end_pfn(cc->zone); 2386566e54e1SMel Gorman unsigned long last_migrated_pfn; 2387e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 23888854c55fSMel Gorman bool update_cached; 238984b328aaSBaolin Wang unsigned int nr_succeeded = 0; 2390748446bbSMel Gorman 2391a94b5252SYafang Shao /* 2392a94b5252SYafang Shao * These counters track activities during zone compaction. Initialize 2393a94b5252SYafang Shao * them before compacting a new zone. 2394a94b5252SYafang Shao */ 2395a94b5252SYafang Shao cc->total_migrate_scanned = 0; 2396a94b5252SYafang Shao cc->total_free_scanned = 0; 2397a94b5252SYafang Shao cc->nr_migratepages = 0; 2398a94b5252SYafang Shao cc->nr_freepages = 0; 2399a94b5252SYafang Shao INIT_LIST_HEAD(&cc->freepages); 2400a94b5252SYafang Shao INIT_LIST_HEAD(&cc->migratepages); 2401a94b5252SYafang Shao 240201c0bfe0SWei Yang cc->migratetype = gfp_migratetype(cc->gfp_mask); 2403e8606320SJohannes Weiner 2404e8606320SJohannes Weiner if (!is_via_compact_memory(cc->order)) { 2405e8606320SJohannes Weiner unsigned long watermark; 2406e8606320SJohannes Weiner 2407e8606320SJohannes Weiner /* Allocation can already succeed, nothing to do */ 2408e8606320SJohannes Weiner watermark = wmark_pages(cc->zone, 2409e8606320SJohannes Weiner cc->alloc_flags & ALLOC_WMARK_MASK); 2410e8606320SJohannes Weiner if (zone_watermark_ok(cc->zone, cc->order, watermark, 2411e8606320SJohannes Weiner cc->highest_zoneidx, cc->alloc_flags)) 2412e8606320SJohannes Weiner return COMPACT_SUCCESS; 2413e8606320SJohannes Weiner 24143e7d3449SMel Gorman /* Compaction is likely to fail */ 24153cf04937SJohannes Weiner if (!compaction_suitable(cc->zone, cc->order, 24163cf04937SJohannes Weiner cc->highest_zoneidx)) 24173cf04937SJohannes Weiner return COMPACT_SKIPPED; 2418e8606320SJohannes Weiner } 2419c46649deSMichal Hocko 2420c89511abSMel Gorman /* 2421d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 2422accf6242SVlastimil Babka * is about to be retried after being deferred. 2423d3132e4bSVlastimil Babka */ 242440cacbcbSMel Gorman if (compaction_restarting(cc->zone, cc->order)) 242540cacbcbSMel Gorman __reset_isolation_suitable(cc->zone); 2426d3132e4bSVlastimil Babka 2427d3132e4bSVlastimil Babka /* 2428c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 242906ed2998SVlastimil Babka * information on where the scanners should start (unless we explicitly 243006ed2998SVlastimil Babka * want to compact the whole zone), but check that it is initialised 243106ed2998SVlastimil Babka * by ensuring the values are within zone boundaries. 2432c89511abSMel Gorman */ 243370b44595SMel Gorman cc->fast_start_pfn = 0; 243406ed2998SVlastimil Babka if (cc->whole_zone) { 243506ed2998SVlastimil Babka cc->migrate_pfn = start_pfn; 243606ed2998SVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 243706ed2998SVlastimil Babka } else { 243840cacbcbSMel Gorman cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; 243940cacbcbSMel Gorman cc->free_pfn = cc->zone->compact_cached_free_pfn; 2440623446e4SJoonsoo Kim if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { 244106b6640aSVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 244240cacbcbSMel Gorman cc->zone->compact_cached_free_pfn = cc->free_pfn; 2443c89511abSMel Gorman } 2444623446e4SJoonsoo Kim if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { 2445c89511abSMel Gorman cc->migrate_pfn = start_pfn; 244640cacbcbSMel Gorman cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 244740cacbcbSMel Gorman cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 2448c89511abSMel Gorman } 2449c8f7de0bSMichal Hocko 2450e332f741SMel Gorman if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) 2451c8f7de0bSMichal Hocko cc->whole_zone = true; 245206ed2998SVlastimil Babka } 2453c8f7de0bSMichal Hocko 2454566e54e1SMel Gorman last_migrated_pfn = 0; 2455748446bbSMel Gorman 24568854c55fSMel Gorman /* 24578854c55fSMel Gorman * Migrate has separate cached PFNs for ASYNC and SYNC* migration on 24588854c55fSMel Gorman * the basis that some migrations will fail in ASYNC mode. However, 24598854c55fSMel Gorman * if the cached PFNs match and pageblocks are skipped due to having 24608854c55fSMel Gorman * no isolation candidates, then the sync state does not matter. 24618854c55fSMel Gorman * Until a pageblock with isolation candidates is found, keep the 24628854c55fSMel Gorman * cached PFNs in sync to avoid revisiting the same blocks. 24638854c55fSMel Gorman */ 24648854c55fSMel Gorman update_cached = !sync && 24658854c55fSMel Gorman cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; 24668854c55fSMel Gorman 2467abd4349fSBaolin Wang trace_mm_compaction_begin(cc, start_pfn, end_pfn, sync); 24680eb927c0SMel Gorman 2469361a2a22SMinchan Kim /* lru_add_drain_all could be expensive with involving other CPUs */ 2470361a2a22SMinchan Kim lru_add_drain(); 2471748446bbSMel Gorman 247240cacbcbSMel Gorman while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { 24739d502c1cSMinchan Kim int err; 247419d3cf9dSYanfei Xu unsigned long iteration_start_pfn = cc->migrate_pfn; 2475748446bbSMel Gorman 2476804d3121SMel Gorman /* 247748731c84SMel Gorman * Avoid multiple rescans of the same pageblock which can 247848731c84SMel Gorman * happen if a page cannot be isolated (dirty/writeback in 247948731c84SMel Gorman * async mode) or if the migrated pages are being allocated 248048731c84SMel Gorman * before the pageblock is cleared. The first rescan will 248148731c84SMel Gorman * capture the entire pageblock for migration. If it fails, 248248731c84SMel Gorman * it'll be marked skip and scanning will proceed as normal. 2483804d3121SMel Gorman */ 248448731c84SMel Gorman cc->finish_pageblock = false; 2485804d3121SMel Gorman if (pageblock_start_pfn(last_migrated_pfn) == 248619d3cf9dSYanfei Xu pageblock_start_pfn(iteration_start_pfn)) { 248748731c84SMel Gorman cc->finish_pageblock = true; 2488804d3121SMel Gorman } 2489804d3121SMel Gorman 2490cfccd2e6SMel Gorman rescan: 249132aaf055SPengfei Li switch (isolate_migratepages(cc)) { 2492f9e35b3bSMel Gorman case ISOLATE_ABORT: 24932d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 24945733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 2495e64c5237SShaohua Li cc->nr_migratepages = 0; 2496f9e35b3bSMel Gorman goto out; 2497f9e35b3bSMel Gorman case ISOLATE_NONE: 24988854c55fSMel Gorman if (update_cached) { 24998854c55fSMel Gorman cc->zone->compact_cached_migrate_pfn[1] = 25008854c55fSMel Gorman cc->zone->compact_cached_migrate_pfn[0]; 25018854c55fSMel Gorman } 25028854c55fSMel Gorman 2503fdaf7f5cSVlastimil Babka /* 2504fdaf7f5cSVlastimil Babka * We haven't isolated and migrated anything, but 2505fdaf7f5cSVlastimil Babka * there might still be unflushed migrations from 2506fdaf7f5cSVlastimil Babka * previous cc->order aligned block. 2507fdaf7f5cSVlastimil Babka */ 2508fdaf7f5cSVlastimil Babka goto check_drain; 2509f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 25108854c55fSMel Gorman update_cached = false; 25117c0a84bdSKemeng Shi last_migrated_pfn = max(cc->zone->zone_start_pfn, 25127c0a84bdSKemeng Shi pageblock_start_pfn(cc->migrate_pfn - 1)); 2513f9e35b3bSMel Gorman } 2514748446bbSMel Gorman 2515d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 2516e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 251784b328aaSBaolin Wang MR_COMPACTION, &nr_succeeded); 2518748446bbSMel Gorman 2519abd4349fSBaolin Wang trace_mm_compaction_migratepages(cc, nr_succeeded); 2520748446bbSMel Gorman 2521f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 2522f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 25239d502c1cSMinchan Kim if (err) { 25245733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 25257ed695e0SVlastimil Babka /* 25267ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 25277ed695e0SVlastimil Babka * and we want compact_finished() to detect it 25287ed695e0SVlastimil Babka */ 2529f2849aa0SVlastimil Babka if (err == -ENOMEM && !compact_scanners_met(cc)) { 25302d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 25314bf2bba3SDavid Rientjes goto out; 2532748446bbSMel Gorman } 2533fdd048e1SVlastimil Babka /* 2534cfccd2e6SMel Gorman * If an ASYNC or SYNC_LIGHT fails to migrate a page 25359ecc5fc5SMel Gorman * within the current order-aligned block and 25369ecc5fc5SMel Gorman * fast_find_migrateblock may be used then scan the 2537cfccd2e6SMel Gorman * remainder of the pageblock. This will mark the 2538cfccd2e6SMel Gorman * pageblock "skip" to avoid rescanning in the near 2539cfccd2e6SMel Gorman * future. This will isolate more pages than necessary 2540cfccd2e6SMel Gorman * for the request but avoid loops due to 2541cfccd2e6SMel Gorman * fast_find_migrateblock revisiting blocks that were 2542cfccd2e6SMel Gorman * recently partially scanned. 2543fdd048e1SVlastimil Babka */ 2544539aa041SMel Gorman if (!pageblock_aligned(cc->migrate_pfn) && 25459ecc5fc5SMel Gorman !cc->ignore_skip_hint && !cc->finish_pageblock && 2546cfccd2e6SMel Gorman (cc->mode < MIGRATE_SYNC)) { 2547cfccd2e6SMel Gorman cc->finish_pageblock = true; 2548cfccd2e6SMel Gorman 2549cfccd2e6SMel Gorman /* 2550cfccd2e6SMel Gorman * Draining pcplists does not help THP if 2551cfccd2e6SMel Gorman * any page failed to migrate. Even after 2552cfccd2e6SMel Gorman * drain, the pageblock will not be free. 2553cfccd2e6SMel Gorman */ 2554cfccd2e6SMel Gorman if (cc->order == COMPACTION_HPAGE_ORDER) 2555566e54e1SMel Gorman last_migrated_pfn = 0; 2556cfccd2e6SMel Gorman 2557cfccd2e6SMel Gorman goto rescan; 2558fdd048e1SVlastimil Babka } 25594bf2bba3SDavid Rientjes } 2560fdaf7f5cSVlastimil Babka 256116b3be40SMel Gorman /* Stop if a page has been captured */ 256216b3be40SMel Gorman if (capc && capc->page) { 256316b3be40SMel Gorman ret = COMPACT_SUCCESS; 256416b3be40SMel Gorman break; 256516b3be40SMel Gorman } 256616b3be40SMel Gorman 2567fdaf7f5cSVlastimil Babka check_drain: 2568fdaf7f5cSVlastimil Babka /* 2569fdaf7f5cSVlastimil Babka * Has the migration scanner moved away from the previous 2570fdaf7f5cSVlastimil Babka * cc->order aligned block where we migrated from? If yes, 2571fdaf7f5cSVlastimil Babka * flush the pages that were freed, so that they can merge and 2572fdaf7f5cSVlastimil Babka * compact_finished() can detect immediately if allocation 2573fdaf7f5cSVlastimil Babka * would succeed. 2574fdaf7f5cSVlastimil Babka */ 2575566e54e1SMel Gorman if (cc->order > 0 && last_migrated_pfn) { 2576fdaf7f5cSVlastimil Babka unsigned long current_block_start = 257706b6640aSVlastimil Babka block_start_pfn(cc->migrate_pfn, cc->order); 2578fdaf7f5cSVlastimil Babka 2579566e54e1SMel Gorman if (last_migrated_pfn < current_block_start) { 2580b01b2141SIngo Molnar lru_add_drain_cpu_zone(cc->zone); 2581fdaf7f5cSVlastimil Babka /* No more flushing until we migrate again */ 2582566e54e1SMel Gorman last_migrated_pfn = 0; 2583fdaf7f5cSVlastimil Babka } 2584fdaf7f5cSVlastimil Babka } 2585748446bbSMel Gorman } 2586748446bbSMel Gorman 2587f9e35b3bSMel Gorman out: 25886bace090SVlastimil Babka /* 25896bace090SVlastimil Babka * Release free pages and update where the free scanner should restart, 25906bace090SVlastimil Babka * so we don't leave any returned pages behind in the next attempt. 25916bace090SVlastimil Babka */ 25926bace090SVlastimil Babka if (cc->nr_freepages > 0) { 25936bace090SVlastimil Babka unsigned long free_pfn = release_freepages(&cc->freepages); 25946bace090SVlastimil Babka 25956bace090SVlastimil Babka cc->nr_freepages = 0; 25966bace090SVlastimil Babka VM_BUG_ON(free_pfn == 0); 25976bace090SVlastimil Babka /* The cached pfn is always the first in a pageblock */ 259806b6640aSVlastimil Babka free_pfn = pageblock_start_pfn(free_pfn); 25996bace090SVlastimil Babka /* 26006bace090SVlastimil Babka * Only go back, not forward. The cached pfn might have been 26016bace090SVlastimil Babka * already reset to zone end in compact_finished() 26026bace090SVlastimil Babka */ 260340cacbcbSMel Gorman if (free_pfn > cc->zone->compact_cached_free_pfn) 260440cacbcbSMel Gorman cc->zone->compact_cached_free_pfn = free_pfn; 26056bace090SVlastimil Babka } 2606748446bbSMel Gorman 26077f354a54SDavid Rientjes count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); 26087f354a54SDavid Rientjes count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); 26097f354a54SDavid Rientjes 2610abd4349fSBaolin Wang trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret); 26110eb927c0SMel Gorman 2612753ec50dSBaolin Wang VM_BUG_ON(!list_empty(&cc->freepages)); 2613753ec50dSBaolin Wang VM_BUG_ON(!list_empty(&cc->migratepages)); 2614753ec50dSBaolin Wang 2615748446bbSMel Gorman return ret; 2616748446bbSMel Gorman } 261776ab0f53SMel Gorman 2618ea7ab982SMichal Hocko static enum compact_result compact_zone_order(struct zone *zone, int order, 2619c3486f53SVlastimil Babka gfp_t gfp_mask, enum compact_priority prio, 262097a225e6SJoonsoo Kim unsigned int alloc_flags, int highest_zoneidx, 26215e1f0f09SMel Gorman struct page **capture) 262256de7263SMel Gorman { 2623ea7ab982SMichal Hocko enum compact_result ret; 262456de7263SMel Gorman struct compact_control cc = { 262556de7263SMel Gorman .order = order, 2626dbe2d4e4SMel Gorman .search_order = order, 26276d7ce559SDavid Rientjes .gfp_mask = gfp_mask, 262856de7263SMel Gorman .zone = zone, 2629a5508cd8SVlastimil Babka .mode = (prio == COMPACT_PRIO_ASYNC) ? 2630a5508cd8SVlastimil Babka MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, 2631ebff3980SVlastimil Babka .alloc_flags = alloc_flags, 263297a225e6SJoonsoo Kim .highest_zoneidx = highest_zoneidx, 2633accf6242SVlastimil Babka .direct_compaction = true, 2634a8e025e5SVlastimil Babka .whole_zone = (prio == MIN_COMPACT_PRIORITY), 26359f7e3387SVlastimil Babka .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), 26369f7e3387SVlastimil Babka .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) 263756de7263SMel Gorman }; 26385e1f0f09SMel Gorman struct capture_control capc = { 26395e1f0f09SMel Gorman .cc = &cc, 26405e1f0f09SMel Gorman .page = NULL, 26415e1f0f09SMel Gorman }; 26425e1f0f09SMel Gorman 2643b9e20f0dSVlastimil Babka /* 2644b9e20f0dSVlastimil Babka * Make sure the structs are really initialized before we expose the 2645b9e20f0dSVlastimil Babka * capture control, in case we are interrupted and the interrupt handler 2646b9e20f0dSVlastimil Babka * frees a page. 2647b9e20f0dSVlastimil Babka */ 2648b9e20f0dSVlastimil Babka barrier(); 2649b9e20f0dSVlastimil Babka WRITE_ONCE(current->capture_control, &capc); 265056de7263SMel Gorman 26515e1f0f09SMel Gorman ret = compact_zone(&cc, &capc); 2652e64c5237SShaohua Li 2653b9e20f0dSVlastimil Babka /* 2654b9e20f0dSVlastimil Babka * Make sure we hide capture control first before we read the captured 2655b9e20f0dSVlastimil Babka * page pointer, otherwise an interrupt could free and capture a page 2656b9e20f0dSVlastimil Babka * and we would leak it. 2657b9e20f0dSVlastimil Babka */ 2658b9e20f0dSVlastimil Babka WRITE_ONCE(current->capture_control, NULL); 2659b9e20f0dSVlastimil Babka *capture = READ_ONCE(capc.page); 266006dac2f4SCharan Teja Reddy /* 266106dac2f4SCharan Teja Reddy * Technically, it is also possible that compaction is skipped but 266206dac2f4SCharan Teja Reddy * the page is still captured out of luck(IRQ came and freed the page). 266306dac2f4SCharan Teja Reddy * Returning COMPACT_SUCCESS in such cases helps in properly accounting 266406dac2f4SCharan Teja Reddy * the COMPACT[STALL|FAIL] when compaction is skipped. 266506dac2f4SCharan Teja Reddy */ 266606dac2f4SCharan Teja Reddy if (*capture) 266706dac2f4SCharan Teja Reddy ret = COMPACT_SUCCESS; 26685e1f0f09SMel Gorman 2669e64c5237SShaohua Li return ret; 267056de7263SMel Gorman } 267156de7263SMel Gorman 267256de7263SMel Gorman /** 267356de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 267456de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 26751a6d53a1SVlastimil Babka * @order: The order of the current allocation 26761a6d53a1SVlastimil Babka * @alloc_flags: The allocation flags of the current allocation 26771a6d53a1SVlastimil Babka * @ac: The context of current allocation 2678112d2d29SYang Shi * @prio: Determines how hard direct compaction should try to succeed 26796467552cSVlastimil Babka * @capture: Pointer to free page created by compaction will be stored here 268056de7263SMel Gorman * 268156de7263SMel Gorman * This is the main entry point for direct page compaction. 268256de7263SMel Gorman */ 2683ea7ab982SMichal Hocko enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 2684c603844bSMel Gorman unsigned int alloc_flags, const struct alloc_context *ac, 26855e1f0f09SMel Gorman enum compact_priority prio, struct page **capture) 268656de7263SMel Gorman { 2687fe573327SVasily Averin int may_perform_io = (__force int)(gfp_mask & __GFP_IO); 268856de7263SMel Gorman struct zoneref *z; 268956de7263SMel Gorman struct zone *zone; 26901d4746d3SMichal Hocko enum compact_result rc = COMPACT_SKIPPED; 269156de7263SMel Gorman 269273e64c51SMichal Hocko /* 269373e64c51SMichal Hocko * Check if the GFP flags allow compaction - GFP_NOIO is really 269473e64c51SMichal Hocko * tricky context because the migration might require IO 269573e64c51SMichal Hocko */ 269673e64c51SMichal Hocko if (!may_perform_io) 269753853e2dSVlastimil Babka return COMPACT_SKIPPED; 269856de7263SMel Gorman 2699a5508cd8SVlastimil Babka trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); 2700837d026dSJoonsoo Kim 270156de7263SMel Gorman /* Compact each zone in the list */ 270297a225e6SJoonsoo Kim for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 270397a225e6SJoonsoo Kim ac->highest_zoneidx, ac->nodemask) { 2704ea7ab982SMichal Hocko enum compact_result status; 270556de7263SMel Gorman 2706a8e025e5SVlastimil Babka if (prio > MIN_COMPACT_PRIORITY 2707a8e025e5SVlastimil Babka && compaction_deferred(zone, order)) { 27081d4746d3SMichal Hocko rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); 270953853e2dSVlastimil Babka continue; 27101d4746d3SMichal Hocko } 271153853e2dSVlastimil Babka 2712a5508cd8SVlastimil Babka status = compact_zone_order(zone, order, gfp_mask, prio, 271397a225e6SJoonsoo Kim alloc_flags, ac->highest_zoneidx, capture); 271456de7263SMel Gorman rc = max(status, rc); 271556de7263SMel Gorman 27167ceb009aSVlastimil Babka /* The allocation should succeed, stop compacting */ 27177ceb009aSVlastimil Babka if (status == COMPACT_SUCCESS) { 271853853e2dSVlastimil Babka /* 271953853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 272053853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 272153853e2dSVlastimil Babka * will repeat this with true if allocation indeed 272253853e2dSVlastimil Babka * succeeds in this zone. 272353853e2dSVlastimil Babka */ 272453853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 27251f9efdefSVlastimil Babka 2726c3486f53SVlastimil Babka break; 27271f9efdefSVlastimil Babka } 27281f9efdefSVlastimil Babka 2729a5508cd8SVlastimil Babka if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE || 2730c3486f53SVlastimil Babka status == COMPACT_PARTIAL_SKIPPED)) 273153853e2dSVlastimil Babka /* 273253853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 273353853e2dSVlastimil Babka * so we defer compaction there. If it ends up 273453853e2dSVlastimil Babka * succeeding after all, it will be reset. 273553853e2dSVlastimil Babka */ 273653853e2dSVlastimil Babka defer_compaction(zone, order); 27371f9efdefSVlastimil Babka 27381f9efdefSVlastimil Babka /* 27391f9efdefSVlastimil Babka * We might have stopped compacting due to need_resched() in 27401f9efdefSVlastimil Babka * async compaction, or due to a fatal signal detected. In that 2741c3486f53SVlastimil Babka * case do not try further zones 27421f9efdefSVlastimil Babka */ 2743c3486f53SVlastimil Babka if ((prio == COMPACT_PRIO_ASYNC && need_resched()) 2744c3486f53SVlastimil Babka || fatal_signal_pending(current)) 27451f9efdefSVlastimil Babka break; 27461f9efdefSVlastimil Babka } 27471f9efdefSVlastimil Babka 274856de7263SMel Gorman return rc; 274956de7263SMel Gorman } 275056de7263SMel Gorman 2751facdaa91SNitin Gupta /* 2752facdaa91SNitin Gupta * Compact all zones within a node till each zone's fragmentation score 2753facdaa91SNitin Gupta * reaches within proactive compaction thresholds (as determined by the 2754facdaa91SNitin Gupta * proactiveness tunable). 2755facdaa91SNitin Gupta * 2756facdaa91SNitin Gupta * It is possible that the function returns before reaching score targets 2757facdaa91SNitin Gupta * due to various back-off conditions, such as, contention on per-node or 2758facdaa91SNitin Gupta * per-zone locks. 2759facdaa91SNitin Gupta */ 2760facdaa91SNitin Gupta static void proactive_compact_node(pg_data_t *pgdat) 2761facdaa91SNitin Gupta { 2762facdaa91SNitin Gupta int zoneid; 2763facdaa91SNitin Gupta struct zone *zone; 2764facdaa91SNitin Gupta struct compact_control cc = { 2765facdaa91SNitin Gupta .order = -1, 2766facdaa91SNitin Gupta .mode = MIGRATE_SYNC_LIGHT, 2767facdaa91SNitin Gupta .ignore_skip_hint = true, 2768facdaa91SNitin Gupta .whole_zone = true, 2769facdaa91SNitin Gupta .gfp_mask = GFP_KERNEL, 2770facdaa91SNitin Gupta .proactive_compaction = true, 2771facdaa91SNitin Gupta }; 2772facdaa91SNitin Gupta 2773facdaa91SNitin Gupta for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2774facdaa91SNitin Gupta zone = &pgdat->node_zones[zoneid]; 2775facdaa91SNitin Gupta if (!populated_zone(zone)) 2776facdaa91SNitin Gupta continue; 2777facdaa91SNitin Gupta 2778facdaa91SNitin Gupta cc.zone = zone; 2779facdaa91SNitin Gupta 2780facdaa91SNitin Gupta compact_zone(&cc, NULL); 2781facdaa91SNitin Gupta 27821bfb7684SBaolin Wang count_compact_events(KCOMPACTD_MIGRATE_SCANNED, 27831bfb7684SBaolin Wang cc.total_migrate_scanned); 27841bfb7684SBaolin Wang count_compact_events(KCOMPACTD_FREE_SCANNED, 27851bfb7684SBaolin Wang cc.total_free_scanned); 2786facdaa91SNitin Gupta } 2787facdaa91SNitin Gupta } 278856de7263SMel Gorman 278976ab0f53SMel Gorman /* Compact all zones within a node */ 27907103f16dSAndrew Morton static void compact_node(int nid) 27917be62de9SRik van Riel { 2792791cae96SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2793791cae96SVlastimil Babka int zoneid; 2794791cae96SVlastimil Babka struct zone *zone; 27957be62de9SRik van Riel struct compact_control cc = { 27967be62de9SRik van Riel .order = -1, 2797e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 279891ca9186SDavid Rientjes .ignore_skip_hint = true, 279906ed2998SVlastimil Babka .whole_zone = true, 280073e64c51SMichal Hocko .gfp_mask = GFP_KERNEL, 28017be62de9SRik van Riel }; 28027be62de9SRik van Riel 2803791cae96SVlastimil Babka 2804791cae96SVlastimil Babka for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2805791cae96SVlastimil Babka 2806791cae96SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 2807791cae96SVlastimil Babka if (!populated_zone(zone)) 2808791cae96SVlastimil Babka continue; 2809791cae96SVlastimil Babka 2810791cae96SVlastimil Babka cc.zone = zone; 2811791cae96SVlastimil Babka 28125e1f0f09SMel Gorman compact_zone(&cc, NULL); 2813791cae96SVlastimil Babka } 28147be62de9SRik van Riel } 28157be62de9SRik van Riel 281676ab0f53SMel Gorman /* Compact all nodes in the system */ 28177964c06dSJason Liu static void compact_nodes(void) 281876ab0f53SMel Gorman { 281976ab0f53SMel Gorman int nid; 282076ab0f53SMel Gorman 28218575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 28228575ec29SHugh Dickins lru_add_drain_all(); 28238575ec29SHugh Dickins 282476ab0f53SMel Gorman for_each_online_node(nid) 282576ab0f53SMel Gorman compact_node(nid); 282676ab0f53SMel Gorman } 282776ab0f53SMel Gorman 282848fe8ab8SMinghao Chi static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, 282965d759c8SCharan Teja Reddy void *buffer, size_t *length, loff_t *ppos) 283065d759c8SCharan Teja Reddy { 283165d759c8SCharan Teja Reddy int rc, nid; 283265d759c8SCharan Teja Reddy 283365d759c8SCharan Teja Reddy rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 283465d759c8SCharan Teja Reddy if (rc) 283565d759c8SCharan Teja Reddy return rc; 283665d759c8SCharan Teja Reddy 283765d759c8SCharan Teja Reddy if (write && sysctl_compaction_proactiveness) { 283865d759c8SCharan Teja Reddy for_each_online_node(nid) { 283965d759c8SCharan Teja Reddy pg_data_t *pgdat = NODE_DATA(nid); 284065d759c8SCharan Teja Reddy 284165d759c8SCharan Teja Reddy if (pgdat->proactive_compact_trigger) 284265d759c8SCharan Teja Reddy continue; 284365d759c8SCharan Teja Reddy 284465d759c8SCharan Teja Reddy pgdat->proactive_compact_trigger = true; 28458fff8b6fSBaolin Wang trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, -1, 28468fff8b6fSBaolin Wang pgdat->nr_zones - 1); 284765d759c8SCharan Teja Reddy wake_up_interruptible(&pgdat->kcompactd_wait); 284865d759c8SCharan Teja Reddy } 284965d759c8SCharan Teja Reddy } 285065d759c8SCharan Teja Reddy 285165d759c8SCharan Teja Reddy return 0; 285265d759c8SCharan Teja Reddy } 285365d759c8SCharan Teja Reddy 2854facdaa91SNitin Gupta /* 2855fec4eb2cSYaowei Bai * This is the entry point for compacting all nodes via 2856fec4eb2cSYaowei Bai * /proc/sys/vm/compact_memory 2857fec4eb2cSYaowei Bai */ 285848fe8ab8SMinghao Chi static int sysctl_compaction_handler(struct ctl_table *table, int write, 285932927393SChristoph Hellwig void *buffer, size_t *length, loff_t *ppos) 286076ab0f53SMel Gorman { 28618b9167cdSWen Yang int ret; 28628b9167cdSWen Yang 28638b9167cdSWen Yang ret = proc_dointvec(table, write, buffer, length, ppos); 28648b9167cdSWen Yang if (ret) 28658b9167cdSWen Yang return ret; 28668b9167cdSWen Yang 28678b9167cdSWen Yang if (sysctl_compact_memory != 1) 28688b9167cdSWen Yang return -EINVAL; 28698b9167cdSWen Yang 287076ab0f53SMel Gorman if (write) 28717964c06dSJason Liu compact_nodes(); 287276ab0f53SMel Gorman 287376ab0f53SMel Gorman return 0; 287476ab0f53SMel Gorman } 2875ed4a6d7fSMel Gorman 2876ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 287717adb230SYueHaibing static ssize_t compact_store(struct device *dev, 287810fbcf4cSKay Sievers struct device_attribute *attr, 2879ed4a6d7fSMel Gorman const char *buf, size_t count) 2880ed4a6d7fSMel Gorman { 28818575ec29SHugh Dickins int nid = dev->id; 28828575ec29SHugh Dickins 28838575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 28848575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 28858575ec29SHugh Dickins lru_add_drain_all(); 28868575ec29SHugh Dickins 28878575ec29SHugh Dickins compact_node(nid); 28888575ec29SHugh Dickins } 2889ed4a6d7fSMel Gorman 2890ed4a6d7fSMel Gorman return count; 2891ed4a6d7fSMel Gorman } 289217adb230SYueHaibing static DEVICE_ATTR_WO(compact); 2893ed4a6d7fSMel Gorman 2894ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 2895ed4a6d7fSMel Gorman { 289610fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 2897ed4a6d7fSMel Gorman } 2898ed4a6d7fSMel Gorman 2899ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 2900ed4a6d7fSMel Gorman { 290110fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 2902ed4a6d7fSMel Gorman } 2903ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 2904ff9543fdSMichal Nazarewicz 2905698b1b30SVlastimil Babka static inline bool kcompactd_work_requested(pg_data_t *pgdat) 2906698b1b30SVlastimil Babka { 290765d759c8SCharan Teja Reddy return pgdat->kcompactd_max_order > 0 || kthread_should_stop() || 290865d759c8SCharan Teja Reddy pgdat->proactive_compact_trigger; 2909698b1b30SVlastimil Babka } 2910698b1b30SVlastimil Babka 2911698b1b30SVlastimil Babka static bool kcompactd_node_suitable(pg_data_t *pgdat) 2912698b1b30SVlastimil Babka { 2913698b1b30SVlastimil Babka int zoneid; 2914698b1b30SVlastimil Babka struct zone *zone; 291597a225e6SJoonsoo Kim enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; 2916698b1b30SVlastimil Babka 291797a225e6SJoonsoo Kim for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) { 2918698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 2919698b1b30SVlastimil Babka 2920698b1b30SVlastimil Babka if (!populated_zone(zone)) 2921698b1b30SVlastimil Babka continue; 2922698b1b30SVlastimil Babka 2923e8606320SJohannes Weiner /* Allocation can already succeed, check other zones */ 2924e8606320SJohannes Weiner if (zone_watermark_ok(zone, pgdat->kcompactd_max_order, 2925e8606320SJohannes Weiner min_wmark_pages(zone), 2926e8606320SJohannes Weiner highest_zoneidx, 0)) 2927e8606320SJohannes Weiner continue; 2928e8606320SJohannes Weiner 2929e8606320SJohannes Weiner if (compaction_suitable(zone, pgdat->kcompactd_max_order, 29303cf04937SJohannes Weiner highest_zoneidx)) 2931698b1b30SVlastimil Babka return true; 2932698b1b30SVlastimil Babka } 2933698b1b30SVlastimil Babka 2934698b1b30SVlastimil Babka return false; 2935698b1b30SVlastimil Babka } 2936698b1b30SVlastimil Babka 2937698b1b30SVlastimil Babka static void kcompactd_do_work(pg_data_t *pgdat) 2938698b1b30SVlastimil Babka { 2939698b1b30SVlastimil Babka /* 2940698b1b30SVlastimil Babka * With no special task, compact all zones so that a page of requested 2941698b1b30SVlastimil Babka * order is allocatable. 2942698b1b30SVlastimil Babka */ 2943698b1b30SVlastimil Babka int zoneid; 2944698b1b30SVlastimil Babka struct zone *zone; 2945698b1b30SVlastimil Babka struct compact_control cc = { 2946698b1b30SVlastimil Babka .order = pgdat->kcompactd_max_order, 2947dbe2d4e4SMel Gorman .search_order = pgdat->kcompactd_max_order, 294897a225e6SJoonsoo Kim .highest_zoneidx = pgdat->kcompactd_highest_zoneidx, 2949698b1b30SVlastimil Babka .mode = MIGRATE_SYNC_LIGHT, 2950a0647dc9SDavid Rientjes .ignore_skip_hint = false, 295173e64c51SMichal Hocko .gfp_mask = GFP_KERNEL, 2952698b1b30SVlastimil Babka }; 2953698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, 295497a225e6SJoonsoo Kim cc.highest_zoneidx); 29557f354a54SDavid Rientjes count_compact_event(KCOMPACTD_WAKE); 2956698b1b30SVlastimil Babka 295797a225e6SJoonsoo Kim for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) { 2958698b1b30SVlastimil Babka int status; 2959698b1b30SVlastimil Babka 2960698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 2961698b1b30SVlastimil Babka if (!populated_zone(zone)) 2962698b1b30SVlastimil Babka continue; 2963698b1b30SVlastimil Babka 2964698b1b30SVlastimil Babka if (compaction_deferred(zone, cc.order)) 2965698b1b30SVlastimil Babka continue; 2966698b1b30SVlastimil Babka 2967e8606320SJohannes Weiner /* Allocation can already succeed, nothing to do */ 2968e8606320SJohannes Weiner if (zone_watermark_ok(zone, cc.order, 2969e8606320SJohannes Weiner min_wmark_pages(zone), zoneid, 0)) 2970698b1b30SVlastimil Babka continue; 2971698b1b30SVlastimil Babka 29723cf04937SJohannes Weiner if (!compaction_suitable(zone, cc.order, zoneid)) 2973e8606320SJohannes Weiner continue; 2974f98a497eSJohannes Weiner 2975172400c6SVlastimil Babka if (kthread_should_stop()) 2976172400c6SVlastimil Babka return; 2977a94b5252SYafang Shao 2978a94b5252SYafang Shao cc.zone = zone; 29795e1f0f09SMel Gorman status = compact_zone(&cc, NULL); 2980698b1b30SVlastimil Babka 29817ceb009aSVlastimil Babka if (status == COMPACT_SUCCESS) { 2982698b1b30SVlastimil Babka compaction_defer_reset(zone, cc.order, false); 2983c8f7de0bSMichal Hocko } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { 2984698b1b30SVlastimil Babka /* 2985bc3106b2SDavid Rientjes * Buddy pages may become stranded on pcps that could 2986bc3106b2SDavid Rientjes * otherwise coalesce on the zone's free area for 2987bc3106b2SDavid Rientjes * order >= cc.order. This is ratelimited by the 2988bc3106b2SDavid Rientjes * upcoming deferral. 2989bc3106b2SDavid Rientjes */ 2990bc3106b2SDavid Rientjes drain_all_pages(zone); 2991bc3106b2SDavid Rientjes 2992bc3106b2SDavid Rientjes /* 2993698b1b30SVlastimil Babka * We use sync migration mode here, so we defer like 2994698b1b30SVlastimil Babka * sync direct compaction does. 2995698b1b30SVlastimil Babka */ 2996698b1b30SVlastimil Babka defer_compaction(zone, cc.order); 2997698b1b30SVlastimil Babka } 2998698b1b30SVlastimil Babka 29997f354a54SDavid Rientjes count_compact_events(KCOMPACTD_MIGRATE_SCANNED, 30007f354a54SDavid Rientjes cc.total_migrate_scanned); 30017f354a54SDavid Rientjes count_compact_events(KCOMPACTD_FREE_SCANNED, 30027f354a54SDavid Rientjes cc.total_free_scanned); 3003698b1b30SVlastimil Babka } 3004698b1b30SVlastimil Babka 3005698b1b30SVlastimil Babka /* 3006698b1b30SVlastimil Babka * Regardless of success, we are done until woken up next. But remember 300797a225e6SJoonsoo Kim * the requested order/highest_zoneidx in case it was higher/tighter 300897a225e6SJoonsoo Kim * than our current ones 3009698b1b30SVlastimil Babka */ 3010698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order <= cc.order) 3011698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 301297a225e6SJoonsoo Kim if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx) 301397a225e6SJoonsoo Kim pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 3014698b1b30SVlastimil Babka } 3015698b1b30SVlastimil Babka 301697a225e6SJoonsoo Kim void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) 3017698b1b30SVlastimil Babka { 3018698b1b30SVlastimil Babka if (!order) 3019698b1b30SVlastimil Babka return; 3020698b1b30SVlastimil Babka 3021698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order < order) 3022698b1b30SVlastimil Babka pgdat->kcompactd_max_order = order; 3023698b1b30SVlastimil Babka 302497a225e6SJoonsoo Kim if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx) 302597a225e6SJoonsoo Kim pgdat->kcompactd_highest_zoneidx = highest_zoneidx; 3026698b1b30SVlastimil Babka 30276818600fSDavidlohr Bueso /* 30286818600fSDavidlohr Bueso * Pairs with implicit barrier in wait_event_freezable() 30296818600fSDavidlohr Bueso * such that wakeups are not missed. 30306818600fSDavidlohr Bueso */ 30316818600fSDavidlohr Bueso if (!wq_has_sleeper(&pgdat->kcompactd_wait)) 3032698b1b30SVlastimil Babka return; 3033698b1b30SVlastimil Babka 3034698b1b30SVlastimil Babka if (!kcompactd_node_suitable(pgdat)) 3035698b1b30SVlastimil Babka return; 3036698b1b30SVlastimil Babka 3037698b1b30SVlastimil Babka trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, 303897a225e6SJoonsoo Kim highest_zoneidx); 3039698b1b30SVlastimil Babka wake_up_interruptible(&pgdat->kcompactd_wait); 3040698b1b30SVlastimil Babka } 3041698b1b30SVlastimil Babka 3042698b1b30SVlastimil Babka /* 3043698b1b30SVlastimil Babka * The background compaction daemon, started as a kernel thread 3044698b1b30SVlastimil Babka * from the init process. 3045698b1b30SVlastimil Babka */ 3046698b1b30SVlastimil Babka static int kcompactd(void *p) 3047698b1b30SVlastimil Babka { 3048698b1b30SVlastimil Babka pg_data_t *pgdat = (pg_data_t *)p; 3049698b1b30SVlastimil Babka struct task_struct *tsk = current; 3050e1e92bfaSCharan Teja Reddy long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC); 3051e1e92bfaSCharan Teja Reddy long timeout = default_timeout; 3052698b1b30SVlastimil Babka 3053698b1b30SVlastimil Babka const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 3054698b1b30SVlastimil Babka 3055698b1b30SVlastimil Babka if (!cpumask_empty(cpumask)) 3056698b1b30SVlastimil Babka set_cpus_allowed_ptr(tsk, cpumask); 3057698b1b30SVlastimil Babka 3058698b1b30SVlastimil Babka set_freezable(); 3059698b1b30SVlastimil Babka 3060698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 306197a225e6SJoonsoo Kim pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 3062698b1b30SVlastimil Babka 3063698b1b30SVlastimil Babka while (!kthread_should_stop()) { 3064eb414681SJohannes Weiner unsigned long pflags; 3065eb414681SJohannes Weiner 306665d759c8SCharan Teja Reddy /* 306765d759c8SCharan Teja Reddy * Avoid the unnecessary wakeup for proactive compaction 306865d759c8SCharan Teja Reddy * when it is disabled. 306965d759c8SCharan Teja Reddy */ 307065d759c8SCharan Teja Reddy if (!sysctl_compaction_proactiveness) 307165d759c8SCharan Teja Reddy timeout = MAX_SCHEDULE_TIMEOUT; 3072698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_sleep(pgdat->node_id); 3073facdaa91SNitin Gupta if (wait_event_freezable_timeout(pgdat->kcompactd_wait, 307465d759c8SCharan Teja Reddy kcompactd_work_requested(pgdat), timeout) && 307565d759c8SCharan Teja Reddy !pgdat->proactive_compact_trigger) { 3076698b1b30SVlastimil Babka 3077eb414681SJohannes Weiner psi_memstall_enter(&pflags); 3078698b1b30SVlastimil Babka kcompactd_do_work(pgdat); 3079eb414681SJohannes Weiner psi_memstall_leave(&pflags); 3080e1e92bfaSCharan Teja Reddy /* 3081e1e92bfaSCharan Teja Reddy * Reset the timeout value. The defer timeout from 3082e1e92bfaSCharan Teja Reddy * proactive compaction is lost here but that is fine 3083e1e92bfaSCharan Teja Reddy * as the condition of the zone changing substantionally 3084e1e92bfaSCharan Teja Reddy * then carrying on with the previous defer interval is 3085e1e92bfaSCharan Teja Reddy * not useful. 3086e1e92bfaSCharan Teja Reddy */ 3087e1e92bfaSCharan Teja Reddy timeout = default_timeout; 3088facdaa91SNitin Gupta continue; 3089facdaa91SNitin Gupta } 3090facdaa91SNitin Gupta 3091e1e92bfaSCharan Teja Reddy /* 3092e1e92bfaSCharan Teja Reddy * Start the proactive work with default timeout. Based 3093e1e92bfaSCharan Teja Reddy * on the fragmentation score, this timeout is updated. 3094e1e92bfaSCharan Teja Reddy */ 3095e1e92bfaSCharan Teja Reddy timeout = default_timeout; 3096facdaa91SNitin Gupta if (should_proactive_compact_node(pgdat)) { 3097facdaa91SNitin Gupta unsigned int prev_score, score; 3098facdaa91SNitin Gupta 3099facdaa91SNitin Gupta prev_score = fragmentation_score_node(pgdat); 3100facdaa91SNitin Gupta proactive_compact_node(pgdat); 3101facdaa91SNitin Gupta score = fragmentation_score_node(pgdat); 3102facdaa91SNitin Gupta /* 3103facdaa91SNitin Gupta * Defer proactive compaction if the fragmentation 3104facdaa91SNitin Gupta * score did not go down i.e. no progress made. 3105facdaa91SNitin Gupta */ 3106e1e92bfaSCharan Teja Reddy if (unlikely(score >= prev_score)) 3107e1e92bfaSCharan Teja Reddy timeout = 3108e1e92bfaSCharan Teja Reddy default_timeout << COMPACT_MAX_DEFER_SHIFT; 3109facdaa91SNitin Gupta } 311065d759c8SCharan Teja Reddy if (unlikely(pgdat->proactive_compact_trigger)) 311165d759c8SCharan Teja Reddy pgdat->proactive_compact_trigger = false; 3112698b1b30SVlastimil Babka } 3113698b1b30SVlastimil Babka 3114698b1b30SVlastimil Babka return 0; 3115698b1b30SVlastimil Babka } 3116698b1b30SVlastimil Babka 3117698b1b30SVlastimil Babka /* 3118698b1b30SVlastimil Babka * This kcompactd start function will be called by init and node-hot-add. 3119698b1b30SVlastimil Babka * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. 3120698b1b30SVlastimil Babka */ 3121833dfc00SMiaohe Lin void __meminit kcompactd_run(int nid) 3122698b1b30SVlastimil Babka { 3123698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 3124698b1b30SVlastimil Babka 3125698b1b30SVlastimil Babka if (pgdat->kcompactd) 3126024c61eaSMiaohe Lin return; 3127698b1b30SVlastimil Babka 3128698b1b30SVlastimil Babka pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); 3129698b1b30SVlastimil Babka if (IS_ERR(pgdat->kcompactd)) { 3130698b1b30SVlastimil Babka pr_err("Failed to start kcompactd on node %d\n", nid); 3131698b1b30SVlastimil Babka pgdat->kcompactd = NULL; 3132698b1b30SVlastimil Babka } 3133698b1b30SVlastimil Babka } 3134698b1b30SVlastimil Babka 3135698b1b30SVlastimil Babka /* 3136698b1b30SVlastimil Babka * Called by memory hotplug when all memory in a node is offlined. Caller must 3137e8da368aSYun-Ze Li * be holding mem_hotplug_begin/done(). 3138698b1b30SVlastimil Babka */ 3139833dfc00SMiaohe Lin void __meminit kcompactd_stop(int nid) 3140698b1b30SVlastimil Babka { 3141698b1b30SVlastimil Babka struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; 3142698b1b30SVlastimil Babka 3143698b1b30SVlastimil Babka if (kcompactd) { 3144698b1b30SVlastimil Babka kthread_stop(kcompactd); 3145698b1b30SVlastimil Babka NODE_DATA(nid)->kcompactd = NULL; 3146698b1b30SVlastimil Babka } 3147698b1b30SVlastimil Babka } 3148698b1b30SVlastimil Babka 3149698b1b30SVlastimil Babka /* 3150698b1b30SVlastimil Babka * It's optimal to keep kcompactd on the same CPUs as their memory, but 3151698b1b30SVlastimil Babka * not required for correctness. So if the last cpu in a node goes 3152698b1b30SVlastimil Babka * away, we get changed to run anywhere: as the first one comes back, 3153698b1b30SVlastimil Babka * restore their cpu bindings. 3154698b1b30SVlastimil Babka */ 3155e46b1db2SAnna-Maria Gleixner static int kcompactd_cpu_online(unsigned int cpu) 3156698b1b30SVlastimil Babka { 3157698b1b30SVlastimil Babka int nid; 3158698b1b30SVlastimil Babka 3159698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) { 3160698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 3161698b1b30SVlastimil Babka const struct cpumask *mask; 3162698b1b30SVlastimil Babka 3163698b1b30SVlastimil Babka mask = cpumask_of_node(pgdat->node_id); 3164698b1b30SVlastimil Babka 3165698b1b30SVlastimil Babka if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 3166698b1b30SVlastimil Babka /* One of our CPUs online: restore mask */ 31673109de30SMiaohe Lin if (pgdat->kcompactd) 3168698b1b30SVlastimil Babka set_cpus_allowed_ptr(pgdat->kcompactd, mask); 3169698b1b30SVlastimil Babka } 3170e46b1db2SAnna-Maria Gleixner return 0; 3171698b1b30SVlastimil Babka } 3172698b1b30SVlastimil Babka 317348fe8ab8SMinghao Chi static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table, 317448fe8ab8SMinghao Chi int write, void *buffer, size_t *lenp, loff_t *ppos) 317548fe8ab8SMinghao Chi { 317648fe8ab8SMinghao Chi int ret, old; 317748fe8ab8SMinghao Chi 317848fe8ab8SMinghao Chi if (!IS_ENABLED(CONFIG_PREEMPT_RT) || !write) 317948fe8ab8SMinghao Chi return proc_dointvec_minmax(table, write, buffer, lenp, ppos); 318048fe8ab8SMinghao Chi 318148fe8ab8SMinghao Chi old = *(int *)table->data; 318248fe8ab8SMinghao Chi ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 318348fe8ab8SMinghao Chi if (ret) 318448fe8ab8SMinghao Chi return ret; 318548fe8ab8SMinghao Chi if (old != *(int *)table->data) 318648fe8ab8SMinghao Chi pr_warn_once("sysctl attribute %s changed by %s[%d]\n", 318748fe8ab8SMinghao Chi table->procname, current->comm, 318848fe8ab8SMinghao Chi task_pid_nr(current)); 318948fe8ab8SMinghao Chi return ret; 319048fe8ab8SMinghao Chi } 319148fe8ab8SMinghao Chi 319248fe8ab8SMinghao Chi static struct ctl_table vm_compaction[] = { 319348fe8ab8SMinghao Chi { 319448fe8ab8SMinghao Chi .procname = "compact_memory", 31958b9167cdSWen Yang .data = &sysctl_compact_memory, 319648fe8ab8SMinghao Chi .maxlen = sizeof(int), 319748fe8ab8SMinghao Chi .mode = 0200, 319848fe8ab8SMinghao Chi .proc_handler = sysctl_compaction_handler, 319948fe8ab8SMinghao Chi }, 320048fe8ab8SMinghao Chi { 320148fe8ab8SMinghao Chi .procname = "compaction_proactiveness", 320248fe8ab8SMinghao Chi .data = &sysctl_compaction_proactiveness, 320348fe8ab8SMinghao Chi .maxlen = sizeof(sysctl_compaction_proactiveness), 320448fe8ab8SMinghao Chi .mode = 0644, 320548fe8ab8SMinghao Chi .proc_handler = compaction_proactiveness_sysctl_handler, 320648fe8ab8SMinghao Chi .extra1 = SYSCTL_ZERO, 320748fe8ab8SMinghao Chi .extra2 = SYSCTL_ONE_HUNDRED, 320848fe8ab8SMinghao Chi }, 320948fe8ab8SMinghao Chi { 321048fe8ab8SMinghao Chi .procname = "extfrag_threshold", 321148fe8ab8SMinghao Chi .data = &sysctl_extfrag_threshold, 321248fe8ab8SMinghao Chi .maxlen = sizeof(int), 321348fe8ab8SMinghao Chi .mode = 0644, 321448fe8ab8SMinghao Chi .proc_handler = proc_dointvec_minmax, 321548fe8ab8SMinghao Chi .extra1 = SYSCTL_ZERO, 321648fe8ab8SMinghao Chi .extra2 = SYSCTL_ONE_THOUSAND, 321748fe8ab8SMinghao Chi }, 321848fe8ab8SMinghao Chi { 321948fe8ab8SMinghao Chi .procname = "compact_unevictable_allowed", 322048fe8ab8SMinghao Chi .data = &sysctl_compact_unevictable_allowed, 322148fe8ab8SMinghao Chi .maxlen = sizeof(int), 322248fe8ab8SMinghao Chi .mode = 0644, 322348fe8ab8SMinghao Chi .proc_handler = proc_dointvec_minmax_warn_RT_change, 322448fe8ab8SMinghao Chi .extra1 = SYSCTL_ZERO, 322548fe8ab8SMinghao Chi .extra2 = SYSCTL_ONE, 322648fe8ab8SMinghao Chi }, 322748fe8ab8SMinghao Chi { } 322848fe8ab8SMinghao Chi }; 322948fe8ab8SMinghao Chi 3230698b1b30SVlastimil Babka static int __init kcompactd_init(void) 3231698b1b30SVlastimil Babka { 3232698b1b30SVlastimil Babka int nid; 3233e46b1db2SAnna-Maria Gleixner int ret; 3234e46b1db2SAnna-Maria Gleixner 3235e46b1db2SAnna-Maria Gleixner ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 3236e46b1db2SAnna-Maria Gleixner "mm/compaction:online", 3237e46b1db2SAnna-Maria Gleixner kcompactd_cpu_online, NULL); 3238e46b1db2SAnna-Maria Gleixner if (ret < 0) { 3239e46b1db2SAnna-Maria Gleixner pr_err("kcompactd: failed to register hotplug callbacks.\n"); 3240e46b1db2SAnna-Maria Gleixner return ret; 3241e46b1db2SAnna-Maria Gleixner } 3242698b1b30SVlastimil Babka 3243698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) 3244698b1b30SVlastimil Babka kcompactd_run(nid); 324548fe8ab8SMinghao Chi register_sysctl_init("vm", vm_compaction); 3246698b1b30SVlastimil Babka return 0; 3247698b1b30SVlastimil Babka } 3248698b1b30SVlastimil Babka subsys_initcall(kcompactd_init) 3249698b1b30SVlastimil Babka 3250ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 3251