1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 2748446bbSMel Gorman /* 3748446bbSMel Gorman * linux/mm/compaction.c 4748446bbSMel Gorman * 5748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 6748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 7748446bbSMel Gorman * lifting 8748446bbSMel Gorman * 9748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 10748446bbSMel Gorman */ 11698b1b30SVlastimil Babka #include <linux/cpu.h> 12748446bbSMel Gorman #include <linux/swap.h> 13748446bbSMel Gorman #include <linux/migrate.h> 14748446bbSMel Gorman #include <linux/compaction.h> 15748446bbSMel Gorman #include <linux/mm_inline.h> 16174cd4b1SIngo Molnar #include <linux/sched/signal.h> 17748446bbSMel Gorman #include <linux/backing-dev.h> 1876ab0f53SMel Gorman #include <linux/sysctl.h> 19ed4a6d7fSMel Gorman #include <linux/sysfs.h> 20194159fbSMinchan Kim #include <linux/page-isolation.h> 21b8c73fc2SAndrey Ryabinin #include <linux/kasan.h> 22698b1b30SVlastimil Babka #include <linux/kthread.h> 23698b1b30SVlastimil Babka #include <linux/freezer.h> 2483358eceSJoonsoo Kim #include <linux/page_owner.h> 25eb414681SJohannes Weiner #include <linux/psi.h> 26748446bbSMel Gorman #include "internal.h" 27748446bbSMel Gorman 28010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 29010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 30010fc29aSMinchan Kim { 31010fc29aSMinchan Kim count_vm_event(item); 32010fc29aSMinchan Kim } 33010fc29aSMinchan Kim 34010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 35010fc29aSMinchan Kim { 36010fc29aSMinchan Kim count_vm_events(item, delta); 37010fc29aSMinchan Kim } 38010fc29aSMinchan Kim #else 39010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 40010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 41010fc29aSMinchan Kim #endif 42010fc29aSMinchan Kim 43ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 44ff9543fdSMichal Nazarewicz 45b7aba698SMel Gorman #define CREATE_TRACE_POINTS 46b7aba698SMel Gorman #include <trace/events/compaction.h> 47b7aba698SMel Gorman 4806b6640aSVlastimil Babka #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) 4906b6640aSVlastimil Babka #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) 5006b6640aSVlastimil Babka #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) 5106b6640aSVlastimil Babka #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) 5206b6640aSVlastimil Babka 53748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 54748446bbSMel Gorman { 55748446bbSMel Gorman struct page *page, *next; 566bace090SVlastimil Babka unsigned long high_pfn = 0; 57748446bbSMel Gorman 58748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 596bace090SVlastimil Babka unsigned long pfn = page_to_pfn(page); 60748446bbSMel Gorman list_del(&page->lru); 61748446bbSMel Gorman __free_page(page); 626bace090SVlastimil Babka if (pfn > high_pfn) 636bace090SVlastimil Babka high_pfn = pfn; 64748446bbSMel Gorman } 65748446bbSMel Gorman 666bace090SVlastimil Babka return high_pfn; 67748446bbSMel Gorman } 68748446bbSMel Gorman 694469ab98SMel Gorman static void split_map_pages(struct list_head *list) 70ff9543fdSMichal Nazarewicz { 7166c64223SJoonsoo Kim unsigned int i, order, nr_pages; 7266c64223SJoonsoo Kim struct page *page, *next; 7366c64223SJoonsoo Kim LIST_HEAD(tmp_list); 74ff9543fdSMichal Nazarewicz 7566c64223SJoonsoo Kim list_for_each_entry_safe(page, next, list, lru) { 7666c64223SJoonsoo Kim list_del(&page->lru); 7766c64223SJoonsoo Kim 7866c64223SJoonsoo Kim order = page_private(page); 7966c64223SJoonsoo Kim nr_pages = 1 << order; 8066c64223SJoonsoo Kim 8146f24fd8SJoonsoo Kim post_alloc_hook(page, order, __GFP_MOVABLE); 8266c64223SJoonsoo Kim if (order) 8366c64223SJoonsoo Kim split_page(page, order); 8466c64223SJoonsoo Kim 8566c64223SJoonsoo Kim for (i = 0; i < nr_pages; i++) { 8666c64223SJoonsoo Kim list_add(&page->lru, &tmp_list); 8766c64223SJoonsoo Kim page++; 88ff9543fdSMichal Nazarewicz } 89ff9543fdSMichal Nazarewicz } 90ff9543fdSMichal Nazarewicz 9166c64223SJoonsoo Kim list_splice(&tmp_list, list); 9266c64223SJoonsoo Kim } 9366c64223SJoonsoo Kim 94bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 9524e2716fSJoonsoo Kim 96bda807d4SMinchan Kim int PageMovable(struct page *page) 97bda807d4SMinchan Kim { 98bda807d4SMinchan Kim struct address_space *mapping; 99bda807d4SMinchan Kim 100bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 101bda807d4SMinchan Kim if (!__PageMovable(page)) 102bda807d4SMinchan Kim return 0; 103bda807d4SMinchan Kim 104bda807d4SMinchan Kim mapping = page_mapping(page); 105bda807d4SMinchan Kim if (mapping && mapping->a_ops && mapping->a_ops->isolate_page) 106bda807d4SMinchan Kim return 1; 107bda807d4SMinchan Kim 108bda807d4SMinchan Kim return 0; 109bda807d4SMinchan Kim } 110bda807d4SMinchan Kim EXPORT_SYMBOL(PageMovable); 111bda807d4SMinchan Kim 112bda807d4SMinchan Kim void __SetPageMovable(struct page *page, struct address_space *mapping) 113bda807d4SMinchan Kim { 114bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 115bda807d4SMinchan Kim VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page); 116bda807d4SMinchan Kim page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE); 117bda807d4SMinchan Kim } 118bda807d4SMinchan Kim EXPORT_SYMBOL(__SetPageMovable); 119bda807d4SMinchan Kim 120bda807d4SMinchan Kim void __ClearPageMovable(struct page *page) 121bda807d4SMinchan Kim { 122bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageLocked(page), page); 123bda807d4SMinchan Kim VM_BUG_ON_PAGE(!PageMovable(page), page); 124bda807d4SMinchan Kim /* 125bda807d4SMinchan Kim * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE 126bda807d4SMinchan Kim * flag so that VM can catch up released page by driver after isolation. 127bda807d4SMinchan Kim * With it, VM migration doesn't try to put it back. 128bda807d4SMinchan Kim */ 129bda807d4SMinchan Kim page->mapping = (void *)((unsigned long)page->mapping & 130bda807d4SMinchan Kim PAGE_MAPPING_MOVABLE); 131bda807d4SMinchan Kim } 132bda807d4SMinchan Kim EXPORT_SYMBOL(__ClearPageMovable); 133bda807d4SMinchan Kim 13424e2716fSJoonsoo Kim /* Do not skip compaction more than 64 times */ 13524e2716fSJoonsoo Kim #define COMPACT_MAX_DEFER_SHIFT 6 13624e2716fSJoonsoo Kim 13724e2716fSJoonsoo Kim /* 13824e2716fSJoonsoo Kim * Compaction is deferred when compaction fails to result in a page 13924e2716fSJoonsoo Kim * allocation success. 1 << compact_defer_limit compactions are skipped up 14024e2716fSJoonsoo Kim * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 14124e2716fSJoonsoo Kim */ 14224e2716fSJoonsoo Kim void defer_compaction(struct zone *zone, int order) 14324e2716fSJoonsoo Kim { 14424e2716fSJoonsoo Kim zone->compact_considered = 0; 14524e2716fSJoonsoo Kim zone->compact_defer_shift++; 14624e2716fSJoonsoo Kim 14724e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 14824e2716fSJoonsoo Kim zone->compact_order_failed = order; 14924e2716fSJoonsoo Kim 15024e2716fSJoonsoo Kim if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 15124e2716fSJoonsoo Kim zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 15224e2716fSJoonsoo Kim 15324e2716fSJoonsoo Kim trace_mm_compaction_defer_compaction(zone, order); 15424e2716fSJoonsoo Kim } 15524e2716fSJoonsoo Kim 15624e2716fSJoonsoo Kim /* Returns true if compaction should be skipped this time */ 15724e2716fSJoonsoo Kim bool compaction_deferred(struct zone *zone, int order) 15824e2716fSJoonsoo Kim { 15924e2716fSJoonsoo Kim unsigned long defer_limit = 1UL << zone->compact_defer_shift; 16024e2716fSJoonsoo Kim 16124e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 16224e2716fSJoonsoo Kim return false; 16324e2716fSJoonsoo Kim 16424e2716fSJoonsoo Kim /* Avoid possible overflow */ 16524e2716fSJoonsoo Kim if (++zone->compact_considered > defer_limit) 16624e2716fSJoonsoo Kim zone->compact_considered = defer_limit; 16724e2716fSJoonsoo Kim 16824e2716fSJoonsoo Kim if (zone->compact_considered >= defer_limit) 16924e2716fSJoonsoo Kim return false; 17024e2716fSJoonsoo Kim 17124e2716fSJoonsoo Kim trace_mm_compaction_deferred(zone, order); 17224e2716fSJoonsoo Kim 17324e2716fSJoonsoo Kim return true; 17424e2716fSJoonsoo Kim } 17524e2716fSJoonsoo Kim 17624e2716fSJoonsoo Kim /* 17724e2716fSJoonsoo Kim * Update defer tracking counters after successful compaction of given order, 17824e2716fSJoonsoo Kim * which means an allocation either succeeded (alloc_success == true) or is 17924e2716fSJoonsoo Kim * expected to succeed. 18024e2716fSJoonsoo Kim */ 18124e2716fSJoonsoo Kim void compaction_defer_reset(struct zone *zone, int order, 18224e2716fSJoonsoo Kim bool alloc_success) 18324e2716fSJoonsoo Kim { 18424e2716fSJoonsoo Kim if (alloc_success) { 18524e2716fSJoonsoo Kim zone->compact_considered = 0; 18624e2716fSJoonsoo Kim zone->compact_defer_shift = 0; 18724e2716fSJoonsoo Kim } 18824e2716fSJoonsoo Kim if (order >= zone->compact_order_failed) 18924e2716fSJoonsoo Kim zone->compact_order_failed = order + 1; 19024e2716fSJoonsoo Kim 19124e2716fSJoonsoo Kim trace_mm_compaction_defer_reset(zone, order); 19224e2716fSJoonsoo Kim } 19324e2716fSJoonsoo Kim 19424e2716fSJoonsoo Kim /* Returns true if restarting compaction after many failures */ 19524e2716fSJoonsoo Kim bool compaction_restarting(struct zone *zone, int order) 19624e2716fSJoonsoo Kim { 19724e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 19824e2716fSJoonsoo Kim return false; 19924e2716fSJoonsoo Kim 20024e2716fSJoonsoo Kim return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 20124e2716fSJoonsoo Kim zone->compact_considered >= 1UL << zone->compact_defer_shift; 20224e2716fSJoonsoo Kim } 20324e2716fSJoonsoo Kim 204bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 205bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 206bb13ffebSMel Gorman struct page *page) 207bb13ffebSMel Gorman { 208bb13ffebSMel Gorman if (cc->ignore_skip_hint) 209bb13ffebSMel Gorman return true; 210bb13ffebSMel Gorman 211bb13ffebSMel Gorman return !get_pageblock_skip(page); 212bb13ffebSMel Gorman } 213bb13ffebSMel Gorman 21402333641SVlastimil Babka static void reset_cached_positions(struct zone *zone) 21502333641SVlastimil Babka { 21602333641SVlastimil Babka zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 21702333641SVlastimil Babka zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 218623446e4SJoonsoo Kim zone->compact_cached_free_pfn = 21906b6640aSVlastimil Babka pageblock_start_pfn(zone_end_pfn(zone) - 1); 22002333641SVlastimil Babka } 22102333641SVlastimil Babka 222bb13ffebSMel Gorman /* 223b527cfe5SVlastimil Babka * Compound pages of >= pageblock_order should consistenly be skipped until 224b527cfe5SVlastimil Babka * released. It is always pointless to compact pages of such order (if they are 225b527cfe5SVlastimil Babka * migratable), and the pageblocks they occupy cannot contain any free pages. 22621dc7e02SDavid Rientjes */ 227b527cfe5SVlastimil Babka static bool pageblock_skip_persistent(struct page *page) 22821dc7e02SDavid Rientjes { 229b527cfe5SVlastimil Babka if (!PageCompound(page)) 23021dc7e02SDavid Rientjes return false; 231b527cfe5SVlastimil Babka 232b527cfe5SVlastimil Babka page = compound_head(page); 233b527cfe5SVlastimil Babka 234b527cfe5SVlastimil Babka if (compound_order(page) >= pageblock_order) 23521dc7e02SDavid Rientjes return true; 236b527cfe5SVlastimil Babka 237b527cfe5SVlastimil Babka return false; 23821dc7e02SDavid Rientjes } 23921dc7e02SDavid Rientjes 24021dc7e02SDavid Rientjes /* 241bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 242bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 243bb13ffebSMel Gorman * meet. 244bb13ffebSMel Gorman */ 24562997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 246bb13ffebSMel Gorman { 247bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 248108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 249bb13ffebSMel Gorman unsigned long pfn; 250bb13ffebSMel Gorman 25162997027SMel Gorman zone->compact_blockskip_flush = false; 252bb13ffebSMel Gorman 253bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 254bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 255bb13ffebSMel Gorman struct page *page; 256bb13ffebSMel Gorman 257bb13ffebSMel Gorman cond_resched(); 258bb13ffebSMel Gorman 259ccbe1e4dSMichal Hocko page = pfn_to_online_page(pfn); 260ccbe1e4dSMichal Hocko if (!page) 261bb13ffebSMel Gorman continue; 262bb13ffebSMel Gorman if (zone != page_zone(page)) 263bb13ffebSMel Gorman continue; 264b527cfe5SVlastimil Babka if (pageblock_skip_persistent(page)) 26521dc7e02SDavid Rientjes continue; 266bb13ffebSMel Gorman 267bb13ffebSMel Gorman clear_pageblock_skip(page); 268bb13ffebSMel Gorman } 26902333641SVlastimil Babka 27002333641SVlastimil Babka reset_cached_positions(zone); 271bb13ffebSMel Gorman } 272bb13ffebSMel Gorman 27362997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 27462997027SMel Gorman { 27562997027SMel Gorman int zoneid; 27662997027SMel Gorman 27762997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 27862997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 27962997027SMel Gorman if (!populated_zone(zone)) 28062997027SMel Gorman continue; 28162997027SMel Gorman 28262997027SMel Gorman /* Only flush if a full compaction finished recently */ 28362997027SMel Gorman if (zone->compact_blockskip_flush) 28462997027SMel Gorman __reset_isolation_suitable(zone); 28562997027SMel Gorman } 28662997027SMel Gorman } 28762997027SMel Gorman 288bb13ffebSMel Gorman /* 289e380bebeSMel Gorman * Sets the pageblock skip bit if it was clear. Note that this is a hint as 290e380bebeSMel Gorman * locks are not required for read/writers. Returns true if it was already set. 291e380bebeSMel Gorman */ 292e380bebeSMel Gorman static bool test_and_set_skip(struct compact_control *cc, struct page *page, 293e380bebeSMel Gorman unsigned long pfn) 294e380bebeSMel Gorman { 295e380bebeSMel Gorman bool skip; 296e380bebeSMel Gorman 297e380bebeSMel Gorman /* Do no update if skip hint is being ignored */ 298e380bebeSMel Gorman if (cc->ignore_skip_hint) 299e380bebeSMel Gorman return false; 300e380bebeSMel Gorman 301e380bebeSMel Gorman if (!IS_ALIGNED(pfn, pageblock_nr_pages)) 302e380bebeSMel Gorman return false; 303e380bebeSMel Gorman 304e380bebeSMel Gorman skip = get_pageblock_skip(page); 305e380bebeSMel Gorman if (!skip && !cc->no_set_skip_hint) 306e380bebeSMel Gorman set_pageblock_skip(page); 307e380bebeSMel Gorman 308e380bebeSMel Gorman return skip; 309e380bebeSMel Gorman } 310e380bebeSMel Gorman 311e380bebeSMel Gorman static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 312e380bebeSMel Gorman { 313e380bebeSMel Gorman struct zone *zone = cc->zone; 314e380bebeSMel Gorman 315e380bebeSMel Gorman pfn = pageblock_end_pfn(pfn); 316e380bebeSMel Gorman 317e380bebeSMel Gorman /* Set for isolation rather than compaction */ 318e380bebeSMel Gorman if (cc->no_set_skip_hint) 319e380bebeSMel Gorman return; 320e380bebeSMel Gorman 321e380bebeSMel Gorman if (pfn > zone->compact_cached_migrate_pfn[0]) 322e380bebeSMel Gorman zone->compact_cached_migrate_pfn[0] = pfn; 323e380bebeSMel Gorman if (cc->mode != MIGRATE_ASYNC && 324e380bebeSMel Gorman pfn > zone->compact_cached_migrate_pfn[1]) 325e380bebeSMel Gorman zone->compact_cached_migrate_pfn[1] = pfn; 326e380bebeSMel Gorman } 327e380bebeSMel Gorman 328e380bebeSMel Gorman /* 329bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 33062997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 331bb13ffebSMel Gorman */ 332c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 333e380bebeSMel Gorman struct page *page, unsigned long nr_isolated) 334bb13ffebSMel Gorman { 335c89511abSMel Gorman struct zone *zone = cc->zone; 33635979ef3SDavid Rientjes unsigned long pfn; 3376815bf3fSJoonsoo Kim 3382583d671SVlastimil Babka if (cc->no_set_skip_hint) 3396815bf3fSJoonsoo Kim return; 3406815bf3fSJoonsoo Kim 341bb13ffebSMel Gorman if (!page) 342bb13ffebSMel Gorman return; 343bb13ffebSMel Gorman 34435979ef3SDavid Rientjes if (nr_isolated) 34535979ef3SDavid Rientjes return; 34635979ef3SDavid Rientjes 347bb13ffebSMel Gorman set_pageblock_skip(page); 348c89511abSMel Gorman 34935979ef3SDavid Rientjes pfn = page_to_pfn(page); 35035979ef3SDavid Rientjes 35135979ef3SDavid Rientjes /* Update where async and sync compaction should restart */ 35235979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 353c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 354c89511abSMel Gorman } 355bb13ffebSMel Gorman #else 356bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 357bb13ffebSMel Gorman struct page *page) 358bb13ffebSMel Gorman { 359bb13ffebSMel Gorman return true; 360bb13ffebSMel Gorman } 361bb13ffebSMel Gorman 362b527cfe5SVlastimil Babka static inline bool pageblock_skip_persistent(struct page *page) 36321dc7e02SDavid Rientjes { 36421dc7e02SDavid Rientjes return false; 36521dc7e02SDavid Rientjes } 36621dc7e02SDavid Rientjes 36721dc7e02SDavid Rientjes static inline void update_pageblock_skip(struct compact_control *cc, 368e380bebeSMel Gorman struct page *page, unsigned long nr_isolated) 369bb13ffebSMel Gorman { 370bb13ffebSMel Gorman } 371e380bebeSMel Gorman 372e380bebeSMel Gorman static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 373e380bebeSMel Gorman { 374e380bebeSMel Gorman } 375e380bebeSMel Gorman 376e380bebeSMel Gorman static bool test_and_set_skip(struct compact_control *cc, struct page *page, 377e380bebeSMel Gorman unsigned long pfn) 378e380bebeSMel Gorman { 379e380bebeSMel Gorman return false; 380e380bebeSMel Gorman } 381bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 382bb13ffebSMel Gorman 3831f9efdefSVlastimil Babka /* 3848b44d279SVlastimil Babka * Compaction requires the taking of some coarse locks that are potentially 385cb2dcaf0SMel Gorman * very heavily contended. For async compaction, trylock and record if the 386cb2dcaf0SMel Gorman * lock is contended. The lock will still be acquired but compaction will 387cb2dcaf0SMel Gorman * abort when the current block is finished regardless of success rate. 388cb2dcaf0SMel Gorman * Sync compaction acquires the lock. 3898b44d279SVlastimil Babka * 390cb2dcaf0SMel Gorman * Always returns true which makes it easier to track lock state in callers. 3911f9efdefSVlastimil Babka */ 392cb2dcaf0SMel Gorman static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, 3938b44d279SVlastimil Babka struct compact_control *cc) 3948b44d279SVlastimil Babka { 395cb2dcaf0SMel Gorman /* Track if the lock is contended in async mode */ 396cb2dcaf0SMel Gorman if (cc->mode == MIGRATE_ASYNC && !cc->contended) { 397cb2dcaf0SMel Gorman if (spin_trylock_irqsave(lock, *flags)) 398cb2dcaf0SMel Gorman return true; 399cb2dcaf0SMel Gorman 400c3486f53SVlastimil Babka cc->contended = true; 4018b44d279SVlastimil Babka } 4021f9efdefSVlastimil Babka 403cb2dcaf0SMel Gorman spin_lock_irqsave(lock, *flags); 4048b44d279SVlastimil Babka return true; 4052a1402aaSMel Gorman } 4062a1402aaSMel Gorman 40785aa125fSMichal Nazarewicz /* 408c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 4098b44d279SVlastimil Babka * very heavily contended. The lock should be periodically unlocked to avoid 4108b44d279SVlastimil Babka * having disabled IRQs for a long time, even when there is nobody waiting on 4118b44d279SVlastimil Babka * the lock. It might also be that allowing the IRQs will result in 4128b44d279SVlastimil Babka * need_resched() becoming true. If scheduling is needed, async compaction 4138b44d279SVlastimil Babka * aborts. Sync compaction schedules. 4148b44d279SVlastimil Babka * Either compaction type will also abort if a fatal signal is pending. 4158b44d279SVlastimil Babka * In either case if the lock was locked, it is dropped and not regained. 416c67fe375SMel Gorman * 4178b44d279SVlastimil Babka * Returns true if compaction should abort due to fatal signal pending, or 4188b44d279SVlastimil Babka * async compaction due to need_resched() 4198b44d279SVlastimil Babka * Returns false when compaction can continue (sync compaction might have 4208b44d279SVlastimil Babka * scheduled) 421c67fe375SMel Gorman */ 4228b44d279SVlastimil Babka static bool compact_unlock_should_abort(spinlock_t *lock, 4238b44d279SVlastimil Babka unsigned long flags, bool *locked, struct compact_control *cc) 424c67fe375SMel Gorman { 4258b44d279SVlastimil Babka if (*locked) { 4268b44d279SVlastimil Babka spin_unlock_irqrestore(lock, flags); 4278b44d279SVlastimil Babka *locked = false; 428c67fe375SMel Gorman } 429c67fe375SMel Gorman 4308b44d279SVlastimil Babka if (fatal_signal_pending(current)) { 431c3486f53SVlastimil Babka cc->contended = true; 4328b44d279SVlastimil Babka return true; 4338b44d279SVlastimil Babka } 4348b44d279SVlastimil Babka 435*cf66f070SMel Gorman cond_resched(); 436be976572SVlastimil Babka 437be976572SVlastimil Babka return false; 438be976572SVlastimil Babka } 439be976572SVlastimil Babka 440c67fe375SMel Gorman /* 4419e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 4429e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 4439e4be470SJerome Marchand * (even though it may still end up isolating some pages). 44485aa125fSMichal Nazarewicz */ 445f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 446e14c720eSVlastimil Babka unsigned long *start_pfn, 44785aa125fSMichal Nazarewicz unsigned long end_pfn, 44885aa125fSMichal Nazarewicz struct list_head *freelist, 44985aa125fSMichal Nazarewicz bool strict) 450748446bbSMel Gorman { 451b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 452bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 453b8b2d825SXiubo Li unsigned long flags = 0; 454f40d1e42SMel Gorman bool locked = false; 455e14c720eSVlastimil Babka unsigned long blockpfn = *start_pfn; 45666c64223SJoonsoo Kim unsigned int order; 457748446bbSMel Gorman 458748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 459748446bbSMel Gorman 460f40d1e42SMel Gorman /* Isolate free pages. */ 461748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 46266c64223SJoonsoo Kim int isolated; 463748446bbSMel Gorman struct page *page = cursor; 464748446bbSMel Gorman 4658b44d279SVlastimil Babka /* 4668b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 4678b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort if fatal signal 4688b44d279SVlastimil Babka * pending or async compaction detects need_resched() 4698b44d279SVlastimil Babka */ 4708b44d279SVlastimil Babka if (!(blockpfn % SWAP_CLUSTER_MAX) 4718b44d279SVlastimil Babka && compact_unlock_should_abort(&cc->zone->lock, flags, 4728b44d279SVlastimil Babka &locked, cc)) 4738b44d279SVlastimil Babka break; 4748b44d279SVlastimil Babka 475b7aba698SMel Gorman nr_scanned++; 476f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 4772af120bcSLaura Abbott goto isolate_fail; 4782af120bcSLaura Abbott 479bb13ffebSMel Gorman if (!valid_page) 480bb13ffebSMel Gorman valid_page = page; 4819fcd6d2eSVlastimil Babka 4829fcd6d2eSVlastimil Babka /* 4839fcd6d2eSVlastimil Babka * For compound pages such as THP and hugetlbfs, we can save 4849fcd6d2eSVlastimil Babka * potentially a lot of iterations if we skip them at once. 4859fcd6d2eSVlastimil Babka * The check is racy, but we can consider only valid values 4869fcd6d2eSVlastimil Babka * and the only danger is skipping too much. 4879fcd6d2eSVlastimil Babka */ 4889fcd6d2eSVlastimil Babka if (PageCompound(page)) { 48921dc7e02SDavid Rientjes const unsigned int order = compound_order(page); 4909fcd6d2eSVlastimil Babka 491d3c85badSVlastimil Babka if (likely(order < MAX_ORDER)) { 49221dc7e02SDavid Rientjes blockpfn += (1UL << order) - 1; 49321dc7e02SDavid Rientjes cursor += (1UL << order) - 1; 4949fcd6d2eSVlastimil Babka } 4959fcd6d2eSVlastimil Babka goto isolate_fail; 4969fcd6d2eSVlastimil Babka } 4979fcd6d2eSVlastimil Babka 498f40d1e42SMel Gorman if (!PageBuddy(page)) 4992af120bcSLaura Abbott goto isolate_fail; 500f40d1e42SMel Gorman 501f40d1e42SMel Gorman /* 50269b7189fSVlastimil Babka * If we already hold the lock, we can skip some rechecking. 50369b7189fSVlastimil Babka * Note that if we hold the lock now, checked_pageblock was 50469b7189fSVlastimil Babka * already set in some previous iteration (or strict is true), 50569b7189fSVlastimil Babka * so it is correct to skip the suitable migration target 50669b7189fSVlastimil Babka * recheck as well. 50769b7189fSVlastimil Babka */ 50869b7189fSVlastimil Babka if (!locked) { 509cb2dcaf0SMel Gorman locked = compact_lock_irqsave(&cc->zone->lock, 5108b44d279SVlastimil Babka &flags, cc); 511f40d1e42SMel Gorman 512f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 513f40d1e42SMel Gorman if (!PageBuddy(page)) 5142af120bcSLaura Abbott goto isolate_fail; 51569b7189fSVlastimil Babka } 516748446bbSMel Gorman 51766c64223SJoonsoo Kim /* Found a free page, will break it into order-0 pages */ 51866c64223SJoonsoo Kim order = page_order(page); 51966c64223SJoonsoo Kim isolated = __isolate_free_page(page, order); 520a4f04f2cSDavid Rientjes if (!isolated) 521a4f04f2cSDavid Rientjes break; 52266c64223SJoonsoo Kim set_page_private(page, order); 523a4f04f2cSDavid Rientjes 524748446bbSMel Gorman total_isolated += isolated; 525a4f04f2cSDavid Rientjes cc->nr_freepages += isolated; 52666c64223SJoonsoo Kim list_add_tail(&page->lru, freelist); 52766c64223SJoonsoo Kim 528a4f04f2cSDavid Rientjes if (!strict && cc->nr_migratepages <= cc->nr_freepages) { 529932ff6bbSJoonsoo Kim blockpfn += isolated; 530932ff6bbSJoonsoo Kim break; 531932ff6bbSJoonsoo Kim } 532a4f04f2cSDavid Rientjes /* Advance to the end of split page */ 533748446bbSMel Gorman blockpfn += isolated - 1; 534748446bbSMel Gorman cursor += isolated - 1; 5352af120bcSLaura Abbott continue; 5362af120bcSLaura Abbott 5372af120bcSLaura Abbott isolate_fail: 5382af120bcSLaura Abbott if (strict) 5392af120bcSLaura Abbott break; 5402af120bcSLaura Abbott else 5412af120bcSLaura Abbott continue; 5422af120bcSLaura Abbott 543748446bbSMel Gorman } 544748446bbSMel Gorman 545a4f04f2cSDavid Rientjes if (locked) 546a4f04f2cSDavid Rientjes spin_unlock_irqrestore(&cc->zone->lock, flags); 547a4f04f2cSDavid Rientjes 5489fcd6d2eSVlastimil Babka /* 5499fcd6d2eSVlastimil Babka * There is a tiny chance that we have read bogus compound_order(), 5509fcd6d2eSVlastimil Babka * so be careful to not go outside of the pageblock. 5519fcd6d2eSVlastimil Babka */ 5529fcd6d2eSVlastimil Babka if (unlikely(blockpfn > end_pfn)) 5539fcd6d2eSVlastimil Babka blockpfn = end_pfn; 5549fcd6d2eSVlastimil Babka 555e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 556e34d85f0SJoonsoo Kim nr_scanned, total_isolated); 557e34d85f0SJoonsoo Kim 558e14c720eSVlastimil Babka /* Record how far we have got within the block */ 559e14c720eSVlastimil Babka *start_pfn = blockpfn; 560e14c720eSVlastimil Babka 561f40d1e42SMel Gorman /* 562f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 563f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 564f40d1e42SMel Gorman * returned and CMA will fail. 565f40d1e42SMel Gorman */ 5662af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 567f40d1e42SMel Gorman total_isolated = 0; 568f40d1e42SMel Gorman 569bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 570bb13ffebSMel Gorman if (blockpfn == end_pfn) 571e380bebeSMel Gorman update_pageblock_skip(cc, valid_page, total_isolated); 572bb13ffebSMel Gorman 5737f354a54SDavid Rientjes cc->total_free_scanned += nr_scanned; 574397487dbSMel Gorman if (total_isolated) 575010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 576748446bbSMel Gorman return total_isolated; 577748446bbSMel Gorman } 578748446bbSMel Gorman 57985aa125fSMichal Nazarewicz /** 58085aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 581e8b098fcSMike Rapoport * @cc: Compaction control structure. 58285aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 58385aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 58485aa125fSMichal Nazarewicz * 58585aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 58685aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 58785aa125fSMichal Nazarewicz * undo its actions and return zero. 58885aa125fSMichal Nazarewicz * 58985aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 59085aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 59185aa125fSMichal Nazarewicz * a free page). 59285aa125fSMichal Nazarewicz */ 593ff9543fdSMichal Nazarewicz unsigned long 594bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 595bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 59685aa125fSMichal Nazarewicz { 597e1409c32SJoonsoo Kim unsigned long isolated, pfn, block_start_pfn, block_end_pfn; 59885aa125fSMichal Nazarewicz LIST_HEAD(freelist); 59985aa125fSMichal Nazarewicz 6007d49d886SVlastimil Babka pfn = start_pfn; 60106b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 602e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 603e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 60406b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 6057d49d886SVlastimil Babka 6067d49d886SVlastimil Babka for (; pfn < end_pfn; pfn += isolated, 607e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 6087d49d886SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 609e14c720eSVlastimil Babka /* Protect pfn from changing by isolate_freepages_block */ 610e14c720eSVlastimil Babka unsigned long isolate_start_pfn = pfn; 6117d49d886SVlastimil Babka 61285aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 61385aa125fSMichal Nazarewicz 61458420016SJoonsoo Kim /* 61558420016SJoonsoo Kim * pfn could pass the block_end_pfn if isolated freepage 61658420016SJoonsoo Kim * is more than pageblock order. In this case, we adjust 61758420016SJoonsoo Kim * scanning range to right one. 61858420016SJoonsoo Kim */ 61958420016SJoonsoo Kim if (pfn >= block_end_pfn) { 62006b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 62106b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 62258420016SJoonsoo Kim block_end_pfn = min(block_end_pfn, end_pfn); 62358420016SJoonsoo Kim } 62458420016SJoonsoo Kim 625e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 626e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 6277d49d886SVlastimil Babka break; 6287d49d886SVlastimil Babka 629e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 630e14c720eSVlastimil Babka block_end_pfn, &freelist, true); 63185aa125fSMichal Nazarewicz 63285aa125fSMichal Nazarewicz /* 63385aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 63485aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 63585aa125fSMichal Nazarewicz * non-free pages). 63685aa125fSMichal Nazarewicz */ 63785aa125fSMichal Nazarewicz if (!isolated) 63885aa125fSMichal Nazarewicz break; 63985aa125fSMichal Nazarewicz 64085aa125fSMichal Nazarewicz /* 64185aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 64285aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 64385aa125fSMichal Nazarewicz * page may span two pageblocks). 64485aa125fSMichal Nazarewicz */ 64585aa125fSMichal Nazarewicz } 64685aa125fSMichal Nazarewicz 64766c64223SJoonsoo Kim /* __isolate_free_page() does not map the pages */ 6484469ab98SMel Gorman split_map_pages(&freelist); 64985aa125fSMichal Nazarewicz 65085aa125fSMichal Nazarewicz if (pfn < end_pfn) { 65185aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 65285aa125fSMichal Nazarewicz release_freepages(&freelist); 65385aa125fSMichal Nazarewicz return 0; 65485aa125fSMichal Nazarewicz } 65585aa125fSMichal Nazarewicz 65685aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 65785aa125fSMichal Nazarewicz return pfn; 65885aa125fSMichal Nazarewicz } 65985aa125fSMichal Nazarewicz 660748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 661748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 662748446bbSMel Gorman { 663bc693045SMinchan Kim unsigned long active, inactive, isolated; 664748446bbSMel Gorman 665599d0c95SMel Gorman inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) + 666599d0c95SMel Gorman node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON); 667599d0c95SMel Gorman active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) + 668599d0c95SMel Gorman node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON); 669599d0c95SMel Gorman isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) + 670599d0c95SMel Gorman node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON); 671748446bbSMel Gorman 672bc693045SMinchan Kim return isolated > (inactive + active) / 2; 673748446bbSMel Gorman } 674748446bbSMel Gorman 6752fe86e00SMichal Nazarewicz /** 676edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 677edc2ca61SVlastimil Babka * a single pageblock 6782fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 679edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 680edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 681edc2ca61SVlastimil Babka * @isolate_mode: Isolation mode to be used. 6822fe86e00SMichal Nazarewicz * 6832fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 684edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 685edc2ca61SVlastimil Babka * Returns zero if there is a fatal signal pending, otherwise PFN of the 686edc2ca61SVlastimil Babka * first page that was not scanned (which may be both less, equal to or more 687edc2ca61SVlastimil Babka * than end_pfn). 6882fe86e00SMichal Nazarewicz * 689edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 690edc2ca61SVlastimil Babka * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field 691edc2ca61SVlastimil Babka * is neither read nor updated. 692748446bbSMel Gorman */ 693edc2ca61SVlastimil Babka static unsigned long 694edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 695edc2ca61SVlastimil Babka unsigned long end_pfn, isolate_mode_t isolate_mode) 696748446bbSMel Gorman { 697edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 698b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 699fa9add64SHugh Dickins struct lruvec *lruvec; 700b8b2d825SXiubo Li unsigned long flags = 0; 7012a1402aaSMel Gorman bool locked = false; 702bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 703e34d85f0SJoonsoo Kim unsigned long start_pfn = low_pfn; 704fdd048e1SVlastimil Babka bool skip_on_failure = false; 705fdd048e1SVlastimil Babka unsigned long next_skip_pfn = 0; 706e380bebeSMel Gorman bool skip_updated = false; 707748446bbSMel Gorman 708748446bbSMel Gorman /* 709748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 710748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 711748446bbSMel Gorman * delay for some time until fewer pages are isolated 712748446bbSMel Gorman */ 713748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 714f9e35b3bSMel Gorman /* async migration should just abort */ 715e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 7162fe86e00SMichal Nazarewicz return 0; 717f9e35b3bSMel Gorman 718748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 719748446bbSMel Gorman 720748446bbSMel Gorman if (fatal_signal_pending(current)) 7212fe86e00SMichal Nazarewicz return 0; 722748446bbSMel Gorman } 723748446bbSMel Gorman 724*cf66f070SMel Gorman cond_resched(); 725aeef4b83SDavid Rientjes 726fdd048e1SVlastimil Babka if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { 727fdd048e1SVlastimil Babka skip_on_failure = true; 728fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 729fdd048e1SVlastimil Babka } 730fdd048e1SVlastimil Babka 731748446bbSMel Gorman /* Time to isolate some pages for migration */ 732748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 73329c0dde8SVlastimil Babka 734fdd048e1SVlastimil Babka if (skip_on_failure && low_pfn >= next_skip_pfn) { 735fdd048e1SVlastimil Babka /* 736fdd048e1SVlastimil Babka * We have isolated all migration candidates in the 737fdd048e1SVlastimil Babka * previous order-aligned block, and did not skip it due 738fdd048e1SVlastimil Babka * to failure. We should migrate the pages now and 739fdd048e1SVlastimil Babka * hopefully succeed compaction. 740fdd048e1SVlastimil Babka */ 741fdd048e1SVlastimil Babka if (nr_isolated) 742fdd048e1SVlastimil Babka break; 743fdd048e1SVlastimil Babka 744fdd048e1SVlastimil Babka /* 745fdd048e1SVlastimil Babka * We failed to isolate in the previous order-aligned 746fdd048e1SVlastimil Babka * block. Set the new boundary to the end of the 747fdd048e1SVlastimil Babka * current block. Note we can't simply increase 748fdd048e1SVlastimil Babka * next_skip_pfn by 1 << order, as low_pfn might have 749fdd048e1SVlastimil Babka * been incremented by a higher number due to skipping 750fdd048e1SVlastimil Babka * a compound or a high-order buddy page in the 751fdd048e1SVlastimil Babka * previous loop iteration. 752fdd048e1SVlastimil Babka */ 753fdd048e1SVlastimil Babka next_skip_pfn = block_end_pfn(low_pfn, cc->order); 754fdd048e1SVlastimil Babka } 755fdd048e1SVlastimil Babka 7568b44d279SVlastimil Babka /* 7578b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 7588b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort async compaction 7598b44d279SVlastimil Babka * if contended. 7608b44d279SVlastimil Babka */ 7618b44d279SVlastimil Babka if (!(low_pfn % SWAP_CLUSTER_MAX) 762a52633d8SMel Gorman && compact_unlock_should_abort(zone_lru_lock(zone), flags, 7638b44d279SVlastimil Babka &locked, cc)) 7648b44d279SVlastimil Babka break; 765b2eef8c0SAndrea Arcangeli 766748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 767fdd048e1SVlastimil Babka goto isolate_fail; 768b7aba698SMel Gorman nr_scanned++; 769748446bbSMel Gorman 770748446bbSMel Gorman page = pfn_to_page(low_pfn); 771dc908600SMel Gorman 772e380bebeSMel Gorman /* 773e380bebeSMel Gorman * Check if the pageblock has already been marked skipped. 774e380bebeSMel Gorman * Only the aligned PFN is checked as the caller isolates 775e380bebeSMel Gorman * COMPACT_CLUSTER_MAX at a time so the second call must 776e380bebeSMel Gorman * not falsely conclude that the block should be skipped. 777e380bebeSMel Gorman */ 778e380bebeSMel Gorman if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) { 779e380bebeSMel Gorman if (!cc->ignore_skip_hint && get_pageblock_skip(page)) { 780e380bebeSMel Gorman low_pfn = end_pfn; 781e380bebeSMel Gorman goto isolate_abort; 782e380bebeSMel Gorman } 783bb13ffebSMel Gorman valid_page = page; 784e380bebeSMel Gorman } 785bb13ffebSMel Gorman 786c122b208SJoonsoo Kim /* 78799c0fd5eSVlastimil Babka * Skip if free. We read page order here without zone lock 78899c0fd5eSVlastimil Babka * which is generally unsafe, but the race window is small and 78999c0fd5eSVlastimil Babka * the worst thing that can happen is that we skip some 79099c0fd5eSVlastimil Babka * potential isolation targets. 7916c14466cSMel Gorman */ 79299c0fd5eSVlastimil Babka if (PageBuddy(page)) { 79399c0fd5eSVlastimil Babka unsigned long freepage_order = page_order_unsafe(page); 79499c0fd5eSVlastimil Babka 79599c0fd5eSVlastimil Babka /* 79699c0fd5eSVlastimil Babka * Without lock, we cannot be sure that what we got is 79799c0fd5eSVlastimil Babka * a valid page order. Consider only values in the 79899c0fd5eSVlastimil Babka * valid order range to prevent low_pfn overflow. 79999c0fd5eSVlastimil Babka */ 80099c0fd5eSVlastimil Babka if (freepage_order > 0 && freepage_order < MAX_ORDER) 80199c0fd5eSVlastimil Babka low_pfn += (1UL << freepage_order) - 1; 802748446bbSMel Gorman continue; 80399c0fd5eSVlastimil Babka } 804748446bbSMel Gorman 8059927af74SMel Gorman /* 80629c0dde8SVlastimil Babka * Regardless of being on LRU, compound pages such as THP and 80729c0dde8SVlastimil Babka * hugetlbfs are not to be compacted. We can potentially save 80829c0dde8SVlastimil Babka * a lot of iterations if we skip them at once. The check is 80929c0dde8SVlastimil Babka * racy, but we can consider only valid values and the only 81029c0dde8SVlastimil Babka * danger is skipping too much. 811bc835011SAndrea Arcangeli */ 81229c0dde8SVlastimil Babka if (PageCompound(page)) { 81321dc7e02SDavid Rientjes const unsigned int order = compound_order(page); 81429c0dde8SVlastimil Babka 815d3c85badSVlastimil Babka if (likely(order < MAX_ORDER)) 81621dc7e02SDavid Rientjes low_pfn += (1UL << order) - 1; 817fdd048e1SVlastimil Babka goto isolate_fail; 8182a1402aaSMel Gorman } 8192a1402aaSMel Gorman 820bda807d4SMinchan Kim /* 821bda807d4SMinchan Kim * Check may be lockless but that's ok as we recheck later. 822bda807d4SMinchan Kim * It's possible to migrate LRU and non-lru movable pages. 823bda807d4SMinchan Kim * Skip any other type of page 824bda807d4SMinchan Kim */ 825bda807d4SMinchan Kim if (!PageLRU(page)) { 826bda807d4SMinchan Kim /* 827bda807d4SMinchan Kim * __PageMovable can return false positive so we need 828bda807d4SMinchan Kim * to verify it under page_lock. 829bda807d4SMinchan Kim */ 830bda807d4SMinchan Kim if (unlikely(__PageMovable(page)) && 831bda807d4SMinchan Kim !PageIsolated(page)) { 832bda807d4SMinchan Kim if (locked) { 833a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(zone), 834bda807d4SMinchan Kim flags); 835bda807d4SMinchan Kim locked = false; 836bda807d4SMinchan Kim } 837bda807d4SMinchan Kim 8389e5bcd61SYisheng Xie if (!isolate_movable_page(page, isolate_mode)) 839bda807d4SMinchan Kim goto isolate_success; 840bda807d4SMinchan Kim } 841bda807d4SMinchan Kim 842fdd048e1SVlastimil Babka goto isolate_fail; 843bda807d4SMinchan Kim } 84429c0dde8SVlastimil Babka 845119d6d59SDavid Rientjes /* 846119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 847119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 848119d6d59SDavid Rientjes * admittedly racy check. 849119d6d59SDavid Rientjes */ 850119d6d59SDavid Rientjes if (!page_mapping(page) && 851119d6d59SDavid Rientjes page_count(page) > page_mapcount(page)) 852fdd048e1SVlastimil Babka goto isolate_fail; 853119d6d59SDavid Rientjes 85473e64c51SMichal Hocko /* 85573e64c51SMichal Hocko * Only allow to migrate anonymous pages in GFP_NOFS context 85673e64c51SMichal Hocko * because those do not depend on fs locks. 85773e64c51SMichal Hocko */ 85873e64c51SMichal Hocko if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) 85973e64c51SMichal Hocko goto isolate_fail; 86073e64c51SMichal Hocko 86169b7189fSVlastimil Babka /* If we already hold the lock, we can skip some rechecking */ 86269b7189fSVlastimil Babka if (!locked) { 863cb2dcaf0SMel Gorman locked = compact_lock_irqsave(zone_lru_lock(zone), 8648b44d279SVlastimil Babka &flags, cc); 865e380bebeSMel Gorman 866e380bebeSMel Gorman /* Try get exclusive access under lock */ 867e380bebeSMel Gorman if (!skip_updated) { 868e380bebeSMel Gorman skip_updated = true; 869e380bebeSMel Gorman if (test_and_set_skip(cc, page, low_pfn)) 870e380bebeSMel Gorman goto isolate_abort; 871e380bebeSMel Gorman } 8722a1402aaSMel Gorman 87329c0dde8SVlastimil Babka /* Recheck PageLRU and PageCompound under lock */ 8742a1402aaSMel Gorman if (!PageLRU(page)) 875fdd048e1SVlastimil Babka goto isolate_fail; 87629c0dde8SVlastimil Babka 87729c0dde8SVlastimil Babka /* 87829c0dde8SVlastimil Babka * Page become compound since the non-locked check, 87929c0dde8SVlastimil Babka * and it's on LRU. It can only be a THP so the order 88029c0dde8SVlastimil Babka * is safe to read and it's 0 for tail pages. 88129c0dde8SVlastimil Babka */ 88229c0dde8SVlastimil Babka if (unlikely(PageCompound(page))) { 883d3c85badSVlastimil Babka low_pfn += (1UL << compound_order(page)) - 1; 884fdd048e1SVlastimil Babka goto isolate_fail; 885bc835011SAndrea Arcangeli } 88669b7189fSVlastimil Babka } 887bc835011SAndrea Arcangeli 888599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 889fa9add64SHugh Dickins 890748446bbSMel Gorman /* Try isolate the page */ 891edc2ca61SVlastimil Babka if (__isolate_lru_page(page, isolate_mode) != 0) 892fdd048e1SVlastimil Babka goto isolate_fail; 893748446bbSMel Gorman 89429c0dde8SVlastimil Babka VM_BUG_ON_PAGE(PageCompound(page), page); 895bc835011SAndrea Arcangeli 896748446bbSMel Gorman /* Successfully isolated */ 897fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 8986afcf8efSMing Ling inc_node_page_state(page, 8996afcf8efSMing Ling NR_ISOLATED_ANON + page_is_file_cache(page)); 900b6c75016SJoonsoo Kim 901b6c75016SJoonsoo Kim isolate_success: 902fdd048e1SVlastimil Babka list_add(&page->lru, &cc->migratepages); 903748446bbSMel Gorman cc->nr_migratepages++; 904b7aba698SMel Gorman nr_isolated++; 905748446bbSMel Gorman 906804d3121SMel Gorman /* 907804d3121SMel Gorman * Avoid isolating too much unless this block is being 908cb2dcaf0SMel Gorman * rescanned (e.g. dirty/writeback pages, parallel allocation) 909cb2dcaf0SMel Gorman * or a lock is contended. For contention, isolate quickly to 910cb2dcaf0SMel Gorman * potentially remove one source of contention. 911804d3121SMel Gorman */ 912cb2dcaf0SMel Gorman if (cc->nr_migratepages == COMPACT_CLUSTER_MAX && 913cb2dcaf0SMel Gorman !cc->rescan && !cc->contended) { 91431b8384aSHillf Danton ++low_pfn; 915748446bbSMel Gorman break; 916748446bbSMel Gorman } 917fdd048e1SVlastimil Babka 918fdd048e1SVlastimil Babka continue; 919fdd048e1SVlastimil Babka isolate_fail: 920fdd048e1SVlastimil Babka if (!skip_on_failure) 921fdd048e1SVlastimil Babka continue; 922fdd048e1SVlastimil Babka 923fdd048e1SVlastimil Babka /* 924fdd048e1SVlastimil Babka * We have isolated some pages, but then failed. Release them 925fdd048e1SVlastimil Babka * instead of migrating, as we cannot form the cc->order buddy 926fdd048e1SVlastimil Babka * page anyway. 927fdd048e1SVlastimil Babka */ 928fdd048e1SVlastimil Babka if (nr_isolated) { 929fdd048e1SVlastimil Babka if (locked) { 930a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(zone), flags); 931fdd048e1SVlastimil Babka locked = false; 932fdd048e1SVlastimil Babka } 933fdd048e1SVlastimil Babka putback_movable_pages(&cc->migratepages); 934fdd048e1SVlastimil Babka cc->nr_migratepages = 0; 935fdd048e1SVlastimil Babka nr_isolated = 0; 936fdd048e1SVlastimil Babka } 937fdd048e1SVlastimil Babka 938fdd048e1SVlastimil Babka if (low_pfn < next_skip_pfn) { 939fdd048e1SVlastimil Babka low_pfn = next_skip_pfn - 1; 940fdd048e1SVlastimil Babka /* 941fdd048e1SVlastimil Babka * The check near the loop beginning would have updated 942fdd048e1SVlastimil Babka * next_skip_pfn too, but this is a bit simpler. 943fdd048e1SVlastimil Babka */ 944fdd048e1SVlastimil Babka next_skip_pfn += 1UL << cc->order; 945fdd048e1SVlastimil Babka } 94631b8384aSHillf Danton } 947748446bbSMel Gorman 94899c0fd5eSVlastimil Babka /* 94999c0fd5eSVlastimil Babka * The PageBuddy() check could have potentially brought us outside 95099c0fd5eSVlastimil Babka * the range to be scanned. 95199c0fd5eSVlastimil Babka */ 95299c0fd5eSVlastimil Babka if (unlikely(low_pfn > end_pfn)) 95399c0fd5eSVlastimil Babka low_pfn = end_pfn; 95499c0fd5eSVlastimil Babka 955e380bebeSMel Gorman isolate_abort: 956c67fe375SMel Gorman if (locked) 957a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(zone), flags); 958748446bbSMel Gorman 95950b5b094SVlastimil Babka /* 960804d3121SMel Gorman * Updated the cached scanner pfn once the pageblock has been scanned 961804d3121SMel Gorman * Pages will either be migrated in which case there is no point 962804d3121SMel Gorman * scanning in the near future or migration failed in which case the 963804d3121SMel Gorman * failure reason may persist. The block is marked for skipping if 964804d3121SMel Gorman * there were no pages isolated in the block or if the block is 965804d3121SMel Gorman * rescanned twice in a row. 96650b5b094SVlastimil Babka */ 967804d3121SMel Gorman if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) { 968e380bebeSMel Gorman if (valid_page && !skip_updated) 969e380bebeSMel Gorman set_pageblock_skip(valid_page); 970e380bebeSMel Gorman update_cached_migrate(cc, low_pfn); 971e380bebeSMel Gorman } 972bb13ffebSMel Gorman 973e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 974e34d85f0SJoonsoo Kim nr_scanned, nr_isolated); 975b7aba698SMel Gorman 9767f354a54SDavid Rientjes cc->total_migrate_scanned += nr_scanned; 977397487dbSMel Gorman if (nr_isolated) 978010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 979397487dbSMel Gorman 9802fe86e00SMichal Nazarewicz return low_pfn; 9812fe86e00SMichal Nazarewicz } 9822fe86e00SMichal Nazarewicz 983edc2ca61SVlastimil Babka /** 984edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 985edc2ca61SVlastimil Babka * @cc: Compaction control structure. 986edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 987edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 988edc2ca61SVlastimil Babka * 989edc2ca61SVlastimil Babka * Returns zero if isolation fails fatally due to e.g. pending signal. 990edc2ca61SVlastimil Babka * Otherwise, function returns one-past-the-last PFN of isolated page 991edc2ca61SVlastimil Babka * (which may be greater than end_pfn if end fell in a middle of a THP page). 992edc2ca61SVlastimil Babka */ 993edc2ca61SVlastimil Babka unsigned long 994edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 995edc2ca61SVlastimil Babka unsigned long end_pfn) 996edc2ca61SVlastimil Babka { 997e1409c32SJoonsoo Kim unsigned long pfn, block_start_pfn, block_end_pfn; 998edc2ca61SVlastimil Babka 999edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 1000edc2ca61SVlastimil Babka pfn = start_pfn; 100106b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(pfn); 1002e1409c32SJoonsoo Kim if (block_start_pfn < cc->zone->zone_start_pfn) 1003e1409c32SJoonsoo Kim block_start_pfn = cc->zone->zone_start_pfn; 100406b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(pfn); 1005edc2ca61SVlastimil Babka 1006edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 1007e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 1008edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 1009edc2ca61SVlastimil Babka 1010edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 1011edc2ca61SVlastimil Babka 1012e1409c32SJoonsoo Kim if (!pageblock_pfn_to_page(block_start_pfn, 1013e1409c32SJoonsoo Kim block_end_pfn, cc->zone)) 1014edc2ca61SVlastimil Babka continue; 1015edc2ca61SVlastimil Babka 1016edc2ca61SVlastimil Babka pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 1017edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 1018edc2ca61SVlastimil Babka 101914af4a5eSHugh Dickins if (!pfn) 1020edc2ca61SVlastimil Babka break; 10216ea41c0cSJoonsoo Kim 10226ea41c0cSJoonsoo Kim if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) 10236ea41c0cSJoonsoo Kim break; 1024edc2ca61SVlastimil Babka } 1025edc2ca61SVlastimil Babka 1026edc2ca61SVlastimil Babka return pfn; 1027edc2ca61SVlastimil Babka } 1028edc2ca61SVlastimil Babka 1029ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 1030ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 1031018e9a49SAndrew Morton 1032b682debdSVlastimil Babka static bool suitable_migration_source(struct compact_control *cc, 1033b682debdSVlastimil Babka struct page *page) 1034b682debdSVlastimil Babka { 1035282722b0SVlastimil Babka int block_mt; 1036282722b0SVlastimil Babka 10379bebefd5SMel Gorman if (pageblock_skip_persistent(page)) 10389bebefd5SMel Gorman return false; 10399bebefd5SMel Gorman 1040282722b0SVlastimil Babka if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) 1041b682debdSVlastimil Babka return true; 1042b682debdSVlastimil Babka 1043282722b0SVlastimil Babka block_mt = get_pageblock_migratetype(page); 1044282722b0SVlastimil Babka 1045282722b0SVlastimil Babka if (cc->migratetype == MIGRATE_MOVABLE) 1046282722b0SVlastimil Babka return is_migrate_movable(block_mt); 1047282722b0SVlastimil Babka else 1048282722b0SVlastimil Babka return block_mt == cc->migratetype; 1049b682debdSVlastimil Babka } 1050b682debdSVlastimil Babka 1051018e9a49SAndrew Morton /* Returns true if the page is within a block suitable for migration to */ 10529f7e3387SVlastimil Babka static bool suitable_migration_target(struct compact_control *cc, 10539f7e3387SVlastimil Babka struct page *page) 1054018e9a49SAndrew Morton { 1055018e9a49SAndrew Morton /* If the page is a large free page, then disallow migration */ 1056018e9a49SAndrew Morton if (PageBuddy(page)) { 1057018e9a49SAndrew Morton /* 1058018e9a49SAndrew Morton * We are checking page_order without zone->lock taken. But 1059018e9a49SAndrew Morton * the only small danger is that we skip a potentially suitable 1060018e9a49SAndrew Morton * pageblock, so it's not worth to check order for valid range. 1061018e9a49SAndrew Morton */ 1062018e9a49SAndrew Morton if (page_order_unsafe(page) >= pageblock_order) 1063018e9a49SAndrew Morton return false; 1064018e9a49SAndrew Morton } 1065018e9a49SAndrew Morton 10661ef36db2SYisheng Xie if (cc->ignore_block_suitable) 10671ef36db2SYisheng Xie return true; 10681ef36db2SYisheng Xie 1069018e9a49SAndrew Morton /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 1070b682debdSVlastimil Babka if (is_migrate_movable(get_pageblock_migratetype(page))) 1071018e9a49SAndrew Morton return true; 1072018e9a49SAndrew Morton 1073018e9a49SAndrew Morton /* Otherwise skip the block */ 1074018e9a49SAndrew Morton return false; 1075018e9a49SAndrew Morton } 1076018e9a49SAndrew Morton 107770b44595SMel Gorman static inline unsigned int 107870b44595SMel Gorman freelist_scan_limit(struct compact_control *cc) 107970b44595SMel Gorman { 108070b44595SMel Gorman return (COMPACT_CLUSTER_MAX >> cc->fast_search_fail) + 1; 108170b44595SMel Gorman } 108270b44595SMel Gorman 1083ff9543fdSMichal Nazarewicz /* 1084f2849aa0SVlastimil Babka * Test whether the free scanner has reached the same or lower pageblock than 1085f2849aa0SVlastimil Babka * the migration scanner, and compaction should thus terminate. 1086f2849aa0SVlastimil Babka */ 1087f2849aa0SVlastimil Babka static inline bool compact_scanners_met(struct compact_control *cc) 1088f2849aa0SVlastimil Babka { 1089f2849aa0SVlastimil Babka return (cc->free_pfn >> pageblock_order) 1090f2849aa0SVlastimil Babka <= (cc->migrate_pfn >> pageblock_order); 1091f2849aa0SVlastimil Babka } 1092f2849aa0SVlastimil Babka 10935a811889SMel Gorman /* 10945a811889SMel Gorman * Used when scanning for a suitable migration target which scans freelists 10955a811889SMel Gorman * in reverse. Reorders the list such as the unscanned pages are scanned 10965a811889SMel Gorman * first on the next iteration of the free scanner 10975a811889SMel Gorman */ 10985a811889SMel Gorman static void 10995a811889SMel Gorman move_freelist_head(struct list_head *freelist, struct page *freepage) 11005a811889SMel Gorman { 11015a811889SMel Gorman LIST_HEAD(sublist); 11025a811889SMel Gorman 11035a811889SMel Gorman if (!list_is_last(freelist, &freepage->lru)) { 11045a811889SMel Gorman list_cut_before(&sublist, freelist, &freepage->lru); 11055a811889SMel Gorman if (!list_empty(&sublist)) 11065a811889SMel Gorman list_splice_tail(&sublist, freelist); 11075a811889SMel Gorman } 11085a811889SMel Gorman } 11095a811889SMel Gorman 11105a811889SMel Gorman /* 11115a811889SMel Gorman * Similar to move_freelist_head except used by the migration scanner 11125a811889SMel Gorman * when scanning forward. It's possible for these list operations to 11135a811889SMel Gorman * move against each other if they search the free list exactly in 11145a811889SMel Gorman * lockstep. 11155a811889SMel Gorman */ 111670b44595SMel Gorman static void 111770b44595SMel Gorman move_freelist_tail(struct list_head *freelist, struct page *freepage) 111870b44595SMel Gorman { 111970b44595SMel Gorman LIST_HEAD(sublist); 112070b44595SMel Gorman 112170b44595SMel Gorman if (!list_is_first(freelist, &freepage->lru)) { 112270b44595SMel Gorman list_cut_position(&sublist, freelist, &freepage->lru); 112370b44595SMel Gorman if (!list_empty(&sublist)) 112470b44595SMel Gorman list_splice_tail(&sublist, freelist); 112570b44595SMel Gorman } 112670b44595SMel Gorman } 112770b44595SMel Gorman 11285a811889SMel Gorman static void 11295a811889SMel Gorman fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated) 11305a811889SMel Gorman { 11315a811889SMel Gorman unsigned long start_pfn, end_pfn; 11325a811889SMel Gorman struct page *page = pfn_to_page(pfn); 11335a811889SMel Gorman 11345a811889SMel Gorman /* Do not search around if there are enough pages already */ 11355a811889SMel Gorman if (cc->nr_freepages >= cc->nr_migratepages) 11365a811889SMel Gorman return; 11375a811889SMel Gorman 11385a811889SMel Gorman /* Minimise scanning during async compaction */ 11395a811889SMel Gorman if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) 11405a811889SMel Gorman return; 11415a811889SMel Gorman 11425a811889SMel Gorman /* Pageblock boundaries */ 11435a811889SMel Gorman start_pfn = pageblock_start_pfn(pfn); 11445a811889SMel Gorman end_pfn = min(start_pfn + pageblock_nr_pages, zone_end_pfn(cc->zone)); 11455a811889SMel Gorman 11465a811889SMel Gorman /* Scan before */ 11475a811889SMel Gorman if (start_pfn != pfn) { 11485a811889SMel Gorman isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, false); 11495a811889SMel Gorman if (cc->nr_freepages >= cc->nr_migratepages) 11505a811889SMel Gorman return; 11515a811889SMel Gorman } 11525a811889SMel Gorman 11535a811889SMel Gorman /* Scan after */ 11545a811889SMel Gorman start_pfn = pfn + nr_isolated; 11555a811889SMel Gorman if (start_pfn != end_pfn) 11565a811889SMel Gorman isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, false); 11575a811889SMel Gorman 11585a811889SMel Gorman /* Skip this pageblock in the future as it's full or nearly full */ 11595a811889SMel Gorman if (cc->nr_freepages < cc->nr_migratepages) 11605a811889SMel Gorman set_pageblock_skip(page); 11615a811889SMel Gorman } 11625a811889SMel Gorman 11635a811889SMel Gorman static unsigned long 11645a811889SMel Gorman fast_isolate_freepages(struct compact_control *cc) 11655a811889SMel Gorman { 11665a811889SMel Gorman unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1); 11675a811889SMel Gorman unsigned int nr_scanned = 0; 11685a811889SMel Gorman unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0; 11695a811889SMel Gorman unsigned long nr_isolated = 0; 11705a811889SMel Gorman unsigned long distance; 11715a811889SMel Gorman struct page *page = NULL; 11725a811889SMel Gorman bool scan_start = false; 11735a811889SMel Gorman int order; 11745a811889SMel Gorman 11755a811889SMel Gorman /* Full compaction passes in a negative order */ 11765a811889SMel Gorman if (cc->order <= 0) 11775a811889SMel Gorman return cc->free_pfn; 11785a811889SMel Gorman 11795a811889SMel Gorman /* 11805a811889SMel Gorman * If starting the scan, use a deeper search and use the highest 11815a811889SMel Gorman * PFN found if a suitable one is not found. 11825a811889SMel Gorman */ 11835a811889SMel Gorman if (cc->free_pfn == pageblock_start_pfn(zone_end_pfn(cc->zone) - 1)) { 11845a811889SMel Gorman limit = pageblock_nr_pages >> 1; 11855a811889SMel Gorman scan_start = true; 11865a811889SMel Gorman } 11875a811889SMel Gorman 11885a811889SMel Gorman /* 11895a811889SMel Gorman * Preferred point is in the top quarter of the scan space but take 11905a811889SMel Gorman * a pfn from the top half if the search is problematic. 11915a811889SMel Gorman */ 11925a811889SMel Gorman distance = (cc->free_pfn - cc->migrate_pfn); 11935a811889SMel Gorman low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); 11945a811889SMel Gorman min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); 11955a811889SMel Gorman 11965a811889SMel Gorman if (WARN_ON_ONCE(min_pfn > low_pfn)) 11975a811889SMel Gorman low_pfn = min_pfn; 11985a811889SMel Gorman 11995a811889SMel Gorman for (order = cc->order - 1; 12005a811889SMel Gorman order >= 0 && !page; 12015a811889SMel Gorman order--) { 12025a811889SMel Gorman struct free_area *area = &cc->zone->free_area[order]; 12035a811889SMel Gorman struct list_head *freelist; 12045a811889SMel Gorman struct page *freepage; 12055a811889SMel Gorman unsigned long flags; 12065a811889SMel Gorman unsigned int order_scanned = 0; 12075a811889SMel Gorman 12085a811889SMel Gorman if (!area->nr_free) 12095a811889SMel Gorman continue; 12105a811889SMel Gorman 12115a811889SMel Gorman spin_lock_irqsave(&cc->zone->lock, flags); 12125a811889SMel Gorman freelist = &area->free_list[MIGRATE_MOVABLE]; 12135a811889SMel Gorman list_for_each_entry_reverse(freepage, freelist, lru) { 12145a811889SMel Gorman unsigned long pfn; 12155a811889SMel Gorman 12165a811889SMel Gorman order_scanned++; 12175a811889SMel Gorman nr_scanned++; 12185a811889SMel Gorman pfn = page_to_pfn(freepage); 12195a811889SMel Gorman 12205a811889SMel Gorman if (pfn >= highest) 12215a811889SMel Gorman highest = pageblock_start_pfn(pfn); 12225a811889SMel Gorman 12235a811889SMel Gorman if (pfn >= low_pfn) { 12245a811889SMel Gorman cc->fast_search_fail = 0; 12255a811889SMel Gorman page = freepage; 12265a811889SMel Gorman break; 12275a811889SMel Gorman } 12285a811889SMel Gorman 12295a811889SMel Gorman if (pfn >= min_pfn && pfn > high_pfn) { 12305a811889SMel Gorman high_pfn = pfn; 12315a811889SMel Gorman 12325a811889SMel Gorman /* Shorten the scan if a candidate is found */ 12335a811889SMel Gorman limit >>= 1; 12345a811889SMel Gorman } 12355a811889SMel Gorman 12365a811889SMel Gorman if (order_scanned >= limit) 12375a811889SMel Gorman break; 12385a811889SMel Gorman } 12395a811889SMel Gorman 12405a811889SMel Gorman /* Use a minimum pfn if a preferred one was not found */ 12415a811889SMel Gorman if (!page && high_pfn) { 12425a811889SMel Gorman page = pfn_to_page(high_pfn); 12435a811889SMel Gorman 12445a811889SMel Gorman /* Update freepage for the list reorder below */ 12455a811889SMel Gorman freepage = page; 12465a811889SMel Gorman } 12475a811889SMel Gorman 12485a811889SMel Gorman /* Reorder to so a future search skips recent pages */ 12495a811889SMel Gorman move_freelist_head(freelist, freepage); 12505a811889SMel Gorman 12515a811889SMel Gorman /* Isolate the page if available */ 12525a811889SMel Gorman if (page) { 12535a811889SMel Gorman if (__isolate_free_page(page, order)) { 12545a811889SMel Gorman set_page_private(page, order); 12555a811889SMel Gorman nr_isolated = 1 << order; 12565a811889SMel Gorman cc->nr_freepages += nr_isolated; 12575a811889SMel Gorman list_add_tail(&page->lru, &cc->freepages); 12585a811889SMel Gorman count_compact_events(COMPACTISOLATED, nr_isolated); 12595a811889SMel Gorman } else { 12605a811889SMel Gorman /* If isolation fails, abort the search */ 12615a811889SMel Gorman order = -1; 12625a811889SMel Gorman page = NULL; 12635a811889SMel Gorman } 12645a811889SMel Gorman } 12655a811889SMel Gorman 12665a811889SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 12675a811889SMel Gorman 12685a811889SMel Gorman /* 12695a811889SMel Gorman * Smaller scan on next order so the total scan ig related 12705a811889SMel Gorman * to freelist_scan_limit. 12715a811889SMel Gorman */ 12725a811889SMel Gorman if (order_scanned >= limit) 12735a811889SMel Gorman limit = min(1U, limit >> 1); 12745a811889SMel Gorman } 12755a811889SMel Gorman 12765a811889SMel Gorman if (!page) { 12775a811889SMel Gorman cc->fast_search_fail++; 12785a811889SMel Gorman if (scan_start) { 12795a811889SMel Gorman /* 12805a811889SMel Gorman * Use the highest PFN found above min. If one was 12815a811889SMel Gorman * not found, be pessemistic for direct compaction 12825a811889SMel Gorman * and use the min mark. 12835a811889SMel Gorman */ 12845a811889SMel Gorman if (highest) { 12855a811889SMel Gorman page = pfn_to_page(highest); 12865a811889SMel Gorman cc->free_pfn = highest; 12875a811889SMel Gorman } else { 12885a811889SMel Gorman if (cc->direct_compaction) { 12895a811889SMel Gorman page = pfn_to_page(min_pfn); 12905a811889SMel Gorman cc->free_pfn = min_pfn; 12915a811889SMel Gorman } 12925a811889SMel Gorman } 12935a811889SMel Gorman } 12945a811889SMel Gorman } 12955a811889SMel Gorman 12965a811889SMel Gorman if (highest && highest > cc->zone->compact_cached_free_pfn) 12975a811889SMel Gorman cc->zone->compact_cached_free_pfn = highest; 12985a811889SMel Gorman 12995a811889SMel Gorman cc->total_free_scanned += nr_scanned; 13005a811889SMel Gorman if (!page) 13015a811889SMel Gorman return cc->free_pfn; 13025a811889SMel Gorman 13035a811889SMel Gorman low_pfn = page_to_pfn(page); 13045a811889SMel Gorman fast_isolate_around(cc, low_pfn, nr_isolated); 13055a811889SMel Gorman return low_pfn; 13065a811889SMel Gorman } 13075a811889SMel Gorman 1308f2849aa0SVlastimil Babka /* 1309ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 1310ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 1311ff9543fdSMichal Nazarewicz */ 1312edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 1313ff9543fdSMichal Nazarewicz { 1314edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 1315ff9543fdSMichal Nazarewicz struct page *page; 1316c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 1317e14c720eSVlastimil Babka unsigned long isolate_start_pfn; /* exact pfn we start at */ 1318c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 1319c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 1320ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 13212fe86e00SMichal Nazarewicz 13225a811889SMel Gorman /* Try a small search of the free lists for a candidate */ 13235a811889SMel Gorman isolate_start_pfn = fast_isolate_freepages(cc); 13245a811889SMel Gorman if (cc->nr_freepages) 13255a811889SMel Gorman goto splitmap; 13265a811889SMel Gorman 1327ff9543fdSMichal Nazarewicz /* 1328ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 132949e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 1330e14c720eSVlastimil Babka * zone when isolating for the first time. For looping we also need 1331e14c720eSVlastimil Babka * this pfn aligned down to the pageblock boundary, because we do 1332c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 1333c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 1334c96b9e50SVlastimil Babka * a zone which ends in the middle of a pageblock. 133549e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 133649e068f0SVlastimil Babka * is using. 1337ff9543fdSMichal Nazarewicz */ 1338e14c720eSVlastimil Babka isolate_start_pfn = cc->free_pfn; 13395a811889SMel Gorman block_start_pfn = pageblock_start_pfn(isolate_start_pfn); 1340c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 1341c96b9e50SVlastimil Babka zone_end_pfn(zone)); 134206b6640aSVlastimil Babka low_pfn = pageblock_end_pfn(cc->migrate_pfn); 13432fe86e00SMichal Nazarewicz 1344ff9543fdSMichal Nazarewicz /* 1345ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 1346ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 1347ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 1348ff9543fdSMichal Nazarewicz */ 1349f5f61a32SVlastimil Babka for (; block_start_pfn >= low_pfn; 1350c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 1351e14c720eSVlastimil Babka block_start_pfn -= pageblock_nr_pages, 1352e14c720eSVlastimil Babka isolate_start_pfn = block_start_pfn) { 1353f6ea3adbSDavid Rientjes /* 1354f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 1355cb810ad2SMel Gorman * suitable migration targets, so periodically check resched. 1356f6ea3adbSDavid Rientjes */ 1357cb810ad2SMel Gorman if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) 1358*cf66f070SMel Gorman cond_resched(); 1359f6ea3adbSDavid Rientjes 13607d49d886SVlastimil Babka page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 13617d49d886SVlastimil Babka zone); 13627d49d886SVlastimil Babka if (!page) 1363ff9543fdSMichal Nazarewicz continue; 1364ff9543fdSMichal Nazarewicz 1365ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 13669f7e3387SVlastimil Babka if (!suitable_migration_target(cc, page)) 1367ff9543fdSMichal Nazarewicz continue; 136868e3e926SLinus Torvalds 1369bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 1370bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 1371bb13ffebSMel Gorman continue; 1372bb13ffebSMel Gorman 1373e14c720eSVlastimil Babka /* Found a block suitable for isolating free pages from. */ 1374a46cbf3bSDavid Rientjes isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, 1375a46cbf3bSDavid Rientjes freelist, false); 1376ff9543fdSMichal Nazarewicz 1377cb2dcaf0SMel Gorman /* Are enough freepages isolated? */ 1378cb2dcaf0SMel Gorman if (cc->nr_freepages >= cc->nr_migratepages) { 1379a46cbf3bSDavid Rientjes if (isolate_start_pfn >= block_end_pfn) { 1380a46cbf3bSDavid Rientjes /* 1381a46cbf3bSDavid Rientjes * Restart at previous pageblock if more 1382a46cbf3bSDavid Rientjes * freepages can be isolated next time. 1383a46cbf3bSDavid Rientjes */ 1384f5f61a32SVlastimil Babka isolate_start_pfn = 1385e14c720eSVlastimil Babka block_start_pfn - pageblock_nr_pages; 1386a46cbf3bSDavid Rientjes } 1387be976572SVlastimil Babka break; 1388a46cbf3bSDavid Rientjes } else if (isolate_start_pfn < block_end_pfn) { 1389f5f61a32SVlastimil Babka /* 1390a46cbf3bSDavid Rientjes * If isolation failed early, do not continue 1391a46cbf3bSDavid Rientjes * needlessly. 1392f5f61a32SVlastimil Babka */ 1393a46cbf3bSDavid Rientjes break; 1394f5f61a32SVlastimil Babka } 1395c89511abSMel Gorman } 1396ff9543fdSMichal Nazarewicz 13977ed695e0SVlastimil Babka /* 1398f5f61a32SVlastimil Babka * Record where the free scanner will restart next time. Either we 1399f5f61a32SVlastimil Babka * broke from the loop and set isolate_start_pfn based on the last 1400f5f61a32SVlastimil Babka * call to isolate_freepages_block(), or we met the migration scanner 1401f5f61a32SVlastimil Babka * and the loop terminated due to isolate_start_pfn < low_pfn 14027ed695e0SVlastimil Babka */ 1403f5f61a32SVlastimil Babka cc->free_pfn = isolate_start_pfn; 14045a811889SMel Gorman 14055a811889SMel Gorman splitmap: 14065a811889SMel Gorman /* __isolate_free_page() does not map the pages */ 14075a811889SMel Gorman split_map_pages(freelist); 1408748446bbSMel Gorman } 1409748446bbSMel Gorman 1410748446bbSMel Gorman /* 1411748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 1412748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 1413748446bbSMel Gorman */ 1414748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 1415666feb21SMichal Hocko unsigned long data) 1416748446bbSMel Gorman { 1417748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 1418748446bbSMel Gorman struct page *freepage; 1419748446bbSMel Gorman 1420748446bbSMel Gorman if (list_empty(&cc->freepages)) { 1421edc2ca61SVlastimil Babka isolate_freepages(cc); 1422748446bbSMel Gorman 1423748446bbSMel Gorman if (list_empty(&cc->freepages)) 1424748446bbSMel Gorman return NULL; 1425748446bbSMel Gorman } 1426748446bbSMel Gorman 1427748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 1428748446bbSMel Gorman list_del(&freepage->lru); 1429748446bbSMel Gorman cc->nr_freepages--; 1430748446bbSMel Gorman 1431748446bbSMel Gorman return freepage; 1432748446bbSMel Gorman } 1433748446bbSMel Gorman 1434748446bbSMel Gorman /* 1435d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 1436d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 1437d53aea3dSDavid Rientjes * special handling needed for NUMA. 1438d53aea3dSDavid Rientjes */ 1439d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data) 1440d53aea3dSDavid Rientjes { 1441d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 1442d53aea3dSDavid Rientjes 1443d53aea3dSDavid Rientjes list_add(&page->lru, &cc->freepages); 1444d53aea3dSDavid Rientjes cc->nr_freepages++; 1445d53aea3dSDavid Rientjes } 1446d53aea3dSDavid Rientjes 1447ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 1448ff9543fdSMichal Nazarewicz typedef enum { 1449ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 1450ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 1451ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 1452ff9543fdSMichal Nazarewicz } isolate_migrate_t; 1453ff9543fdSMichal Nazarewicz 1454ff9543fdSMichal Nazarewicz /* 14555bbe3547SEric B Munson * Allow userspace to control policy on scanning the unevictable LRU for 14565bbe3547SEric B Munson * compactable pages. 14575bbe3547SEric B Munson */ 14585bbe3547SEric B Munson int sysctl_compact_unevictable_allowed __read_mostly = 1; 14595bbe3547SEric B Munson 146070b44595SMel Gorman static inline void 146170b44595SMel Gorman update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) 146270b44595SMel Gorman { 146370b44595SMel Gorman if (cc->fast_start_pfn == ULONG_MAX) 146470b44595SMel Gorman return; 146570b44595SMel Gorman 146670b44595SMel Gorman if (!cc->fast_start_pfn) 146770b44595SMel Gorman cc->fast_start_pfn = pfn; 146870b44595SMel Gorman 146970b44595SMel Gorman cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); 147070b44595SMel Gorman } 147170b44595SMel Gorman 147270b44595SMel Gorman static inline unsigned long 147370b44595SMel Gorman reinit_migrate_pfn(struct compact_control *cc) 147470b44595SMel Gorman { 147570b44595SMel Gorman if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) 147670b44595SMel Gorman return cc->migrate_pfn; 147770b44595SMel Gorman 147870b44595SMel Gorman cc->migrate_pfn = cc->fast_start_pfn; 147970b44595SMel Gorman cc->fast_start_pfn = ULONG_MAX; 148070b44595SMel Gorman 148170b44595SMel Gorman return cc->migrate_pfn; 148270b44595SMel Gorman } 148370b44595SMel Gorman 148470b44595SMel Gorman /* 148570b44595SMel Gorman * Briefly search the free lists for a migration source that already has 148670b44595SMel Gorman * some free pages to reduce the number of pages that need migration 148770b44595SMel Gorman * before a pageblock is free. 148870b44595SMel Gorman */ 148970b44595SMel Gorman static unsigned long fast_find_migrateblock(struct compact_control *cc) 149070b44595SMel Gorman { 149170b44595SMel Gorman unsigned int limit = freelist_scan_limit(cc); 149270b44595SMel Gorman unsigned int nr_scanned = 0; 149370b44595SMel Gorman unsigned long distance; 149470b44595SMel Gorman unsigned long pfn = cc->migrate_pfn; 149570b44595SMel Gorman unsigned long high_pfn; 149670b44595SMel Gorman int order; 149770b44595SMel Gorman 149870b44595SMel Gorman /* Skip hints are relied on to avoid repeats on the fast search */ 149970b44595SMel Gorman if (cc->ignore_skip_hint) 150070b44595SMel Gorman return pfn; 150170b44595SMel Gorman 150270b44595SMel Gorman /* 150370b44595SMel Gorman * If the migrate_pfn is not at the start of a zone or the start 150470b44595SMel Gorman * of a pageblock then assume this is a continuation of a previous 150570b44595SMel Gorman * scan restarted due to COMPACT_CLUSTER_MAX. 150670b44595SMel Gorman */ 150770b44595SMel Gorman if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) 150870b44595SMel Gorman return pfn; 150970b44595SMel Gorman 151070b44595SMel Gorman /* 151170b44595SMel Gorman * For smaller orders, just linearly scan as the number of pages 151270b44595SMel Gorman * to migrate should be relatively small and does not necessarily 151370b44595SMel Gorman * justify freeing up a large block for a small allocation. 151470b44595SMel Gorman */ 151570b44595SMel Gorman if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) 151670b44595SMel Gorman return pfn; 151770b44595SMel Gorman 151870b44595SMel Gorman /* 151970b44595SMel Gorman * Only allow kcompactd and direct requests for movable pages to 152070b44595SMel Gorman * quickly clear out a MOVABLE pageblock for allocation. This 152170b44595SMel Gorman * reduces the risk that a large movable pageblock is freed for 152270b44595SMel Gorman * an unmovable/reclaimable small allocation. 152370b44595SMel Gorman */ 152470b44595SMel Gorman if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) 152570b44595SMel Gorman return pfn; 152670b44595SMel Gorman 152770b44595SMel Gorman /* 152870b44595SMel Gorman * When starting the migration scanner, pick any pageblock within the 152970b44595SMel Gorman * first half of the search space. Otherwise try and pick a pageblock 153070b44595SMel Gorman * within the first eighth to reduce the chances that a migration 153170b44595SMel Gorman * target later becomes a source. 153270b44595SMel Gorman */ 153370b44595SMel Gorman distance = (cc->free_pfn - cc->migrate_pfn) >> 1; 153470b44595SMel Gorman if (cc->migrate_pfn != cc->zone->zone_start_pfn) 153570b44595SMel Gorman distance >>= 2; 153670b44595SMel Gorman high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); 153770b44595SMel Gorman 153870b44595SMel Gorman for (order = cc->order - 1; 153970b44595SMel Gorman order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit; 154070b44595SMel Gorman order--) { 154170b44595SMel Gorman struct free_area *area = &cc->zone->free_area[order]; 154270b44595SMel Gorman struct list_head *freelist; 154370b44595SMel Gorman unsigned long flags; 154470b44595SMel Gorman struct page *freepage; 154570b44595SMel Gorman 154670b44595SMel Gorman if (!area->nr_free) 154770b44595SMel Gorman continue; 154870b44595SMel Gorman 154970b44595SMel Gorman spin_lock_irqsave(&cc->zone->lock, flags); 155070b44595SMel Gorman freelist = &area->free_list[MIGRATE_MOVABLE]; 155170b44595SMel Gorman list_for_each_entry(freepage, freelist, lru) { 155270b44595SMel Gorman unsigned long free_pfn; 155370b44595SMel Gorman 155470b44595SMel Gorman nr_scanned++; 155570b44595SMel Gorman free_pfn = page_to_pfn(freepage); 155670b44595SMel Gorman if (free_pfn < high_pfn) { 155770b44595SMel Gorman /* 155870b44595SMel Gorman * Avoid if skipped recently. Ideally it would 155970b44595SMel Gorman * move to the tail but even safe iteration of 156070b44595SMel Gorman * the list assumes an entry is deleted, not 156170b44595SMel Gorman * reordered. 156270b44595SMel Gorman */ 156370b44595SMel Gorman if (get_pageblock_skip(freepage)) { 156470b44595SMel Gorman if (list_is_last(freelist, &freepage->lru)) 156570b44595SMel Gorman break; 156670b44595SMel Gorman 156770b44595SMel Gorman continue; 156870b44595SMel Gorman } 156970b44595SMel Gorman 157070b44595SMel Gorman /* Reorder to so a future search skips recent pages */ 157170b44595SMel Gorman move_freelist_tail(freelist, freepage); 157270b44595SMel Gorman 1573e380bebeSMel Gorman update_fast_start_pfn(cc, free_pfn); 157470b44595SMel Gorman pfn = pageblock_start_pfn(free_pfn); 157570b44595SMel Gorman cc->fast_search_fail = 0; 157670b44595SMel Gorman set_pageblock_skip(freepage); 157770b44595SMel Gorman break; 157870b44595SMel Gorman } 157970b44595SMel Gorman 158070b44595SMel Gorman if (nr_scanned >= limit) { 158170b44595SMel Gorman cc->fast_search_fail++; 158270b44595SMel Gorman move_freelist_tail(freelist, freepage); 158370b44595SMel Gorman break; 158470b44595SMel Gorman } 158570b44595SMel Gorman } 158670b44595SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 158770b44595SMel Gorman } 158870b44595SMel Gorman 158970b44595SMel Gorman cc->total_migrate_scanned += nr_scanned; 159070b44595SMel Gorman 159170b44595SMel Gorman /* 159270b44595SMel Gorman * If fast scanning failed then use a cached entry for a page block 159370b44595SMel Gorman * that had free pages as the basis for starting a linear scan. 159470b44595SMel Gorman */ 159570b44595SMel Gorman if (pfn == cc->migrate_pfn) 159670b44595SMel Gorman pfn = reinit_migrate_pfn(cc); 159770b44595SMel Gorman 159870b44595SMel Gorman return pfn; 159970b44595SMel Gorman } 160070b44595SMel Gorman 16015bbe3547SEric B Munson /* 1602edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 1603edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 1604edc2ca61SVlastimil Babka * compact_control. 1605ff9543fdSMichal Nazarewicz */ 1606ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 1607ff9543fdSMichal Nazarewicz struct compact_control *cc) 1608ff9543fdSMichal Nazarewicz { 1609e1409c32SJoonsoo Kim unsigned long block_start_pfn; 1610e1409c32SJoonsoo Kim unsigned long block_end_pfn; 1611e1409c32SJoonsoo Kim unsigned long low_pfn; 1612edc2ca61SVlastimil Babka struct page *page; 1613edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 16145bbe3547SEric B Munson (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | 16151d2047feSHugh Dickins (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); 161670b44595SMel Gorman bool fast_find_block; 1617ff9543fdSMichal Nazarewicz 1618edc2ca61SVlastimil Babka /* 1619edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 162070b44595SMel Gorman * initialized by compact_zone(). The first failure will use 162170b44595SMel Gorman * the lowest PFN as the starting point for linear scanning. 1622edc2ca61SVlastimil Babka */ 162370b44595SMel Gorman low_pfn = fast_find_migrateblock(cc); 162406b6640aSVlastimil Babka block_start_pfn = pageblock_start_pfn(low_pfn); 1625e1409c32SJoonsoo Kim if (block_start_pfn < zone->zone_start_pfn) 1626e1409c32SJoonsoo Kim block_start_pfn = zone->zone_start_pfn; 1627ff9543fdSMichal Nazarewicz 162870b44595SMel Gorman /* 162970b44595SMel Gorman * fast_find_migrateblock marks a pageblock skipped so to avoid 163070b44595SMel Gorman * the isolation_suitable check below, check whether the fast 163170b44595SMel Gorman * search was successful. 163270b44595SMel Gorman */ 163370b44595SMel Gorman fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; 163470b44595SMel Gorman 1635ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 163606b6640aSVlastimil Babka block_end_pfn = pageblock_end_pfn(low_pfn); 1637ff9543fdSMichal Nazarewicz 1638edc2ca61SVlastimil Babka /* 1639edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 1640edc2ca61SVlastimil Babka * Do not cross the free scanner. 1641edc2ca61SVlastimil Babka */ 1642e1409c32SJoonsoo Kim for (; block_end_pfn <= cc->free_pfn; 164370b44595SMel Gorman fast_find_block = false, 1644e1409c32SJoonsoo Kim low_pfn = block_end_pfn, 1645e1409c32SJoonsoo Kim block_start_pfn = block_end_pfn, 1646e1409c32SJoonsoo Kim block_end_pfn += pageblock_nr_pages) { 1647edc2ca61SVlastimil Babka 1648edc2ca61SVlastimil Babka /* 1649edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 1650edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 1651cb810ad2SMel Gorman * need to schedule. 1652edc2ca61SVlastimil Babka */ 1653cb810ad2SMel Gorman if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) 1654*cf66f070SMel Gorman cond_resched(); 1655edc2ca61SVlastimil Babka 1656e1409c32SJoonsoo Kim page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 1657e1409c32SJoonsoo Kim zone); 16587d49d886SVlastimil Babka if (!page) 1659edc2ca61SVlastimil Babka continue; 1660edc2ca61SVlastimil Babka 1661e380bebeSMel Gorman /* 1662e380bebeSMel Gorman * If isolation recently failed, do not retry. Only check the 1663e380bebeSMel Gorman * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock 1664e380bebeSMel Gorman * to be visited multiple times. Assume skip was checked 1665e380bebeSMel Gorman * before making it "skip" so other compaction instances do 1666e380bebeSMel Gorman * not scan the same block. 1667e380bebeSMel Gorman */ 1668e380bebeSMel Gorman if (IS_ALIGNED(low_pfn, pageblock_nr_pages) && 1669e380bebeSMel Gorman !fast_find_block && !isolation_suitable(cc, page)) 1670edc2ca61SVlastimil Babka continue; 1671edc2ca61SVlastimil Babka 1672edc2ca61SVlastimil Babka /* 16739bebefd5SMel Gorman * For async compaction, also only scan in MOVABLE blocks 16749bebefd5SMel Gorman * without huge pages. Async compaction is optimistic to see 16759bebefd5SMel Gorman * if the minimum amount of work satisfies the allocation. 16769bebefd5SMel Gorman * The cached PFN is updated as it's possible that all 16779bebefd5SMel Gorman * remaining blocks between source and target are unsuitable 16789bebefd5SMel Gorman * and the compaction scanners fail to meet. 1679edc2ca61SVlastimil Babka */ 16809bebefd5SMel Gorman if (!suitable_migration_source(cc, page)) { 16819bebefd5SMel Gorman update_cached_migrate(cc, block_end_pfn); 1682edc2ca61SVlastimil Babka continue; 16839bebefd5SMel Gorman } 1684ff9543fdSMichal Nazarewicz 1685ff9543fdSMichal Nazarewicz /* Perform the isolation */ 1686e1409c32SJoonsoo Kim low_pfn = isolate_migratepages_block(cc, low_pfn, 1687e1409c32SJoonsoo Kim block_end_pfn, isolate_mode); 1688edc2ca61SVlastimil Babka 1689cb2dcaf0SMel Gorman if (!low_pfn) 1690ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 1691ff9543fdSMichal Nazarewicz 1692edc2ca61SVlastimil Babka /* 1693edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 1694edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 1695edc2ca61SVlastimil Babka * continue or not. 1696edc2ca61SVlastimil Babka */ 1697edc2ca61SVlastimil Babka break; 1698edc2ca61SVlastimil Babka } 1699edc2ca61SVlastimil Babka 1700f2849aa0SVlastimil Babka /* Record where migration scanner will be restarted. */ 1701f2849aa0SVlastimil Babka cc->migrate_pfn = low_pfn; 1702ff9543fdSMichal Nazarewicz 1703edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1704ff9543fdSMichal Nazarewicz } 1705ff9543fdSMichal Nazarewicz 170621c527a3SYaowei Bai /* 170721c527a3SYaowei Bai * order == -1 is expected when compacting via 170821c527a3SYaowei Bai * /proc/sys/vm/compact_memory 170921c527a3SYaowei Bai */ 171021c527a3SYaowei Bai static inline bool is_via_compact_memory(int order) 171121c527a3SYaowei Bai { 171221c527a3SYaowei Bai return order == -1; 171321c527a3SYaowei Bai } 171421c527a3SYaowei Bai 171540cacbcbSMel Gorman static enum compact_result __compact_finished(struct compact_control *cc) 1716748446bbSMel Gorman { 17178fb74b9fSMel Gorman unsigned int order; 1718d39773a0SVlastimil Babka const int migratetype = cc->migratetype; 1719cb2dcaf0SMel Gorman int ret; 1720748446bbSMel Gorman 1721753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 1722f2849aa0SVlastimil Babka if (compact_scanners_met(cc)) { 172355b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 172440cacbcbSMel Gorman reset_cached_positions(cc->zone); 172555b7c4c9SVlastimil Babka 172662997027SMel Gorman /* 172762997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 1728accf6242SVlastimil Babka * by kswapd when it goes to sleep. kcompactd does not set the 172962997027SMel Gorman * flag itself as the decision to be clear should be directly 173062997027SMel Gorman * based on an allocation request. 173162997027SMel Gorman */ 1732accf6242SVlastimil Babka if (cc->direct_compaction) 173340cacbcbSMel Gorman cc->zone->compact_blockskip_flush = true; 173462997027SMel Gorman 1735c8f7de0bSMichal Hocko if (cc->whole_zone) 1736748446bbSMel Gorman return COMPACT_COMPLETE; 1737c8f7de0bSMichal Hocko else 1738c8f7de0bSMichal Hocko return COMPACT_PARTIAL_SKIPPED; 1739bb13ffebSMel Gorman } 1740748446bbSMel Gorman 174121c527a3SYaowei Bai if (is_via_compact_memory(cc->order)) 174256de7263SMel Gorman return COMPACT_CONTINUE; 174356de7263SMel Gorman 1744baf6a9a1SVlastimil Babka /* 1745efe771c7SMel Gorman * Always finish scanning a pageblock to reduce the possibility of 1746efe771c7SMel Gorman * fallbacks in the future. This is particularly important when 1747efe771c7SMel Gorman * migration source is unmovable/reclaimable but it's not worth 1748efe771c7SMel Gorman * special casing. 1749baf6a9a1SVlastimil Babka */ 1750efe771c7SMel Gorman if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages)) 1751baf6a9a1SVlastimil Babka return COMPACT_CONTINUE; 1752baf6a9a1SVlastimil Babka 175356de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 1754cb2dcaf0SMel Gorman ret = COMPACT_NO_SUITABLE_PAGE; 175556de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 175640cacbcbSMel Gorman struct free_area *area = &cc->zone->free_area[order]; 17572149cdaeSJoonsoo Kim bool can_steal; 17588fb74b9fSMel Gorman 175956de7263SMel Gorman /* Job done if page is free of the right migratetype */ 17606d7ce559SDavid Rientjes if (!list_empty(&area->free_list[migratetype])) 1761cf378319SVlastimil Babka return COMPACT_SUCCESS; 176256de7263SMel Gorman 17632149cdaeSJoonsoo Kim #ifdef CONFIG_CMA 17642149cdaeSJoonsoo Kim /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ 17652149cdaeSJoonsoo Kim if (migratetype == MIGRATE_MOVABLE && 17662149cdaeSJoonsoo Kim !list_empty(&area->free_list[MIGRATE_CMA])) 1767cf378319SVlastimil Babka return COMPACT_SUCCESS; 17682149cdaeSJoonsoo Kim #endif 17692149cdaeSJoonsoo Kim /* 17702149cdaeSJoonsoo Kim * Job done if allocation would steal freepages from 17712149cdaeSJoonsoo Kim * other migratetype buddy lists. 17722149cdaeSJoonsoo Kim */ 17732149cdaeSJoonsoo Kim if (find_suitable_fallback(area, order, migratetype, 1774baf6a9a1SVlastimil Babka true, &can_steal) != -1) { 1775baf6a9a1SVlastimil Babka 1776baf6a9a1SVlastimil Babka /* movable pages are OK in any pageblock */ 1777baf6a9a1SVlastimil Babka if (migratetype == MIGRATE_MOVABLE) 1778cf378319SVlastimil Babka return COMPACT_SUCCESS; 1779baf6a9a1SVlastimil Babka 1780baf6a9a1SVlastimil Babka /* 1781baf6a9a1SVlastimil Babka * We are stealing for a non-movable allocation. Make 1782baf6a9a1SVlastimil Babka * sure we finish compacting the current pageblock 1783baf6a9a1SVlastimil Babka * first so it is as free as possible and we won't 1784baf6a9a1SVlastimil Babka * have to steal another one soon. This only applies 1785baf6a9a1SVlastimil Babka * to sync compaction, as async compaction operates 1786baf6a9a1SVlastimil Babka * on pageblocks of the same migratetype. 1787baf6a9a1SVlastimil Babka */ 1788baf6a9a1SVlastimil Babka if (cc->mode == MIGRATE_ASYNC || 1789baf6a9a1SVlastimil Babka IS_ALIGNED(cc->migrate_pfn, 1790baf6a9a1SVlastimil Babka pageblock_nr_pages)) { 1791baf6a9a1SVlastimil Babka return COMPACT_SUCCESS; 1792baf6a9a1SVlastimil Babka } 1793baf6a9a1SVlastimil Babka 1794cb2dcaf0SMel Gorman ret = COMPACT_CONTINUE; 1795cb2dcaf0SMel Gorman break; 1796baf6a9a1SVlastimil Babka } 179756de7263SMel Gorman } 179856de7263SMel Gorman 1799cb2dcaf0SMel Gorman if (cc->contended || fatal_signal_pending(current)) 1800cb2dcaf0SMel Gorman ret = COMPACT_CONTENDED; 1801cb2dcaf0SMel Gorman 1802cb2dcaf0SMel Gorman return ret; 1803837d026dSJoonsoo Kim } 1804837d026dSJoonsoo Kim 180540cacbcbSMel Gorman static enum compact_result compact_finished(struct compact_control *cc) 1806837d026dSJoonsoo Kim { 1807837d026dSJoonsoo Kim int ret; 1808837d026dSJoonsoo Kim 180940cacbcbSMel Gorman ret = __compact_finished(cc); 181040cacbcbSMel Gorman trace_mm_compaction_finished(cc->zone, cc->order, ret); 1811837d026dSJoonsoo Kim if (ret == COMPACT_NO_SUITABLE_PAGE) 1812837d026dSJoonsoo Kim ret = COMPACT_CONTINUE; 1813837d026dSJoonsoo Kim 1814837d026dSJoonsoo Kim return ret; 1815748446bbSMel Gorman } 1816748446bbSMel Gorman 18173e7d3449SMel Gorman /* 18183e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 18193e7d3449SMel Gorman * Returns 18203e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 1821cf378319SVlastimil Babka * COMPACT_SUCCESS - If the allocation would succeed without compaction 18223e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 18233e7d3449SMel Gorman */ 1824ea7ab982SMichal Hocko static enum compact_result __compaction_suitable(struct zone *zone, int order, 1825c603844bSMel Gorman unsigned int alloc_flags, 182686a294a8SMichal Hocko int classzone_idx, 182786a294a8SMichal Hocko unsigned long wmark_target) 18283e7d3449SMel Gorman { 18293e7d3449SMel Gorman unsigned long watermark; 18303e7d3449SMel Gorman 183121c527a3SYaowei Bai if (is_via_compact_memory(order)) 18323957c776SMichal Hocko return COMPACT_CONTINUE; 18333957c776SMichal Hocko 1834a9214443SMel Gorman watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 1835ebff3980SVlastimil Babka /* 1836ebff3980SVlastimil Babka * If watermarks for high-order allocation are already met, there 1837ebff3980SVlastimil Babka * should be no need for compaction at all. 1838ebff3980SVlastimil Babka */ 1839ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, watermark, classzone_idx, 1840ebff3980SVlastimil Babka alloc_flags)) 1841cf378319SVlastimil Babka return COMPACT_SUCCESS; 1842ebff3980SVlastimil Babka 18433957c776SMichal Hocko /* 18449861a62cSVlastimil Babka * Watermarks for order-0 must be met for compaction to be able to 1845984fdba6SVlastimil Babka * isolate free pages for migration targets. This means that the 1846984fdba6SVlastimil Babka * watermark and alloc_flags have to match, or be more pessimistic than 1847984fdba6SVlastimil Babka * the check in __isolate_free_page(). We don't use the direct 1848984fdba6SVlastimil Babka * compactor's alloc_flags, as they are not relevant for freepage 1849984fdba6SVlastimil Babka * isolation. We however do use the direct compactor's classzone_idx to 1850984fdba6SVlastimil Babka * skip over zones where lowmem reserves would prevent allocation even 1851984fdba6SVlastimil Babka * if compaction succeeds. 18528348faf9SVlastimil Babka * For costly orders, we require low watermark instead of min for 18538348faf9SVlastimil Babka * compaction to proceed to increase its chances. 1854d883c6cfSJoonsoo Kim * ALLOC_CMA is used, as pages in CMA pageblocks are considered 1855d883c6cfSJoonsoo Kim * suitable migration targets 18563e7d3449SMel Gorman */ 18578348faf9SVlastimil Babka watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? 18588348faf9SVlastimil Babka low_wmark_pages(zone) : min_wmark_pages(zone); 18598348faf9SVlastimil Babka watermark += compact_gap(order); 186086a294a8SMichal Hocko if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, 1861d883c6cfSJoonsoo Kim ALLOC_CMA, wmark_target)) 18623e7d3449SMel Gorman return COMPACT_SKIPPED; 18633e7d3449SMel Gorman 1864cc5c9f09SVlastimil Babka return COMPACT_CONTINUE; 1865cc5c9f09SVlastimil Babka } 1866cc5c9f09SVlastimil Babka 1867cc5c9f09SVlastimil Babka enum compact_result compaction_suitable(struct zone *zone, int order, 1868cc5c9f09SVlastimil Babka unsigned int alloc_flags, 1869cc5c9f09SVlastimil Babka int classzone_idx) 1870cc5c9f09SVlastimil Babka { 1871cc5c9f09SVlastimil Babka enum compact_result ret; 1872cc5c9f09SVlastimil Babka int fragindex; 1873cc5c9f09SVlastimil Babka 1874cc5c9f09SVlastimil Babka ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx, 1875cc5c9f09SVlastimil Babka zone_page_state(zone, NR_FREE_PAGES)); 18763e7d3449SMel Gorman /* 18773e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 18783e7d3449SMel Gorman * low memory or external fragmentation 18793e7d3449SMel Gorman * 1880ebff3980SVlastimil Babka * index of -1000 would imply allocations might succeed depending on 1881ebff3980SVlastimil Babka * watermarks, but we already failed the high-order watermark check 18823e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 18833e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 18843e7d3449SMel Gorman * 188520311420SVlastimil Babka * Only compact if a failure would be due to fragmentation. Also 188620311420SVlastimil Babka * ignore fragindex for non-costly orders where the alternative to 188720311420SVlastimil Babka * a successful reclaim/compaction is OOM. Fragindex and the 188820311420SVlastimil Babka * vm.extfrag_threshold sysctl is meant as a heuristic to prevent 188920311420SVlastimil Babka * excessive compaction for costly orders, but it should not be at the 189020311420SVlastimil Babka * expense of system stability. 18913e7d3449SMel Gorman */ 189220311420SVlastimil Babka if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) { 18933e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 18943e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 1895cc5c9f09SVlastimil Babka ret = COMPACT_NOT_SUITABLE_ZONE; 18963e7d3449SMel Gorman } 18973e7d3449SMel Gorman 1898837d026dSJoonsoo Kim trace_mm_compaction_suitable(zone, order, ret); 1899837d026dSJoonsoo Kim if (ret == COMPACT_NOT_SUITABLE_ZONE) 1900837d026dSJoonsoo Kim ret = COMPACT_SKIPPED; 1901837d026dSJoonsoo Kim 1902837d026dSJoonsoo Kim return ret; 1903837d026dSJoonsoo Kim } 1904837d026dSJoonsoo Kim 190586a294a8SMichal Hocko bool compaction_zonelist_suitable(struct alloc_context *ac, int order, 190686a294a8SMichal Hocko int alloc_flags) 190786a294a8SMichal Hocko { 190886a294a8SMichal Hocko struct zone *zone; 190986a294a8SMichal Hocko struct zoneref *z; 191086a294a8SMichal Hocko 191186a294a8SMichal Hocko /* 191286a294a8SMichal Hocko * Make sure at least one zone would pass __compaction_suitable if we continue 191386a294a8SMichal Hocko * retrying the reclaim. 191486a294a8SMichal Hocko */ 191586a294a8SMichal Hocko for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 191686a294a8SMichal Hocko ac->nodemask) { 191786a294a8SMichal Hocko unsigned long available; 191886a294a8SMichal Hocko enum compact_result compact_result; 191986a294a8SMichal Hocko 192086a294a8SMichal Hocko /* 192186a294a8SMichal Hocko * Do not consider all the reclaimable memory because we do not 192286a294a8SMichal Hocko * want to trash just for a single high order allocation which 192386a294a8SMichal Hocko * is even not guaranteed to appear even if __compaction_suitable 192486a294a8SMichal Hocko * is happy about the watermark check. 192586a294a8SMichal Hocko */ 19265a1c84b4SMel Gorman available = zone_reclaimable_pages(zone) / order; 192786a294a8SMichal Hocko available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 192886a294a8SMichal Hocko compact_result = __compaction_suitable(zone, order, alloc_flags, 192986a294a8SMichal Hocko ac_classzone_idx(ac), available); 1930cc5c9f09SVlastimil Babka if (compact_result != COMPACT_SKIPPED) 193186a294a8SMichal Hocko return true; 193286a294a8SMichal Hocko } 193386a294a8SMichal Hocko 193486a294a8SMichal Hocko return false; 193586a294a8SMichal Hocko } 193686a294a8SMichal Hocko 193740cacbcbSMel Gorman static enum compact_result compact_zone(struct compact_control *cc) 1938748446bbSMel Gorman { 1939ea7ab982SMichal Hocko enum compact_result ret; 194040cacbcbSMel Gorman unsigned long start_pfn = cc->zone->zone_start_pfn; 194140cacbcbSMel Gorman unsigned long end_pfn = zone_end_pfn(cc->zone); 1942566e54e1SMel Gorman unsigned long last_migrated_pfn; 1943e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 19448854c55fSMel Gorman bool update_cached; 1945748446bbSMel Gorman 1946d39773a0SVlastimil Babka cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); 194740cacbcbSMel Gorman ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, 1948ebff3980SVlastimil Babka cc->classzone_idx); 19493e7d3449SMel Gorman /* Compaction is likely to fail */ 1950cf378319SVlastimil Babka if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) 19513e7d3449SMel Gorman return ret; 1952c46649deSMichal Hocko 1953c46649deSMichal Hocko /* huh, compaction_suitable is returning something unexpected */ 1954c46649deSMichal Hocko VM_BUG_ON(ret != COMPACT_CONTINUE); 19553e7d3449SMel Gorman 1956c89511abSMel Gorman /* 1957d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 1958accf6242SVlastimil Babka * is about to be retried after being deferred. 1959d3132e4bSVlastimil Babka */ 196040cacbcbSMel Gorman if (compaction_restarting(cc->zone, cc->order)) 196140cacbcbSMel Gorman __reset_isolation_suitable(cc->zone); 1962d3132e4bSVlastimil Babka 1963d3132e4bSVlastimil Babka /* 1964c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 196506ed2998SVlastimil Babka * information on where the scanners should start (unless we explicitly 196606ed2998SVlastimil Babka * want to compact the whole zone), but check that it is initialised 196706ed2998SVlastimil Babka * by ensuring the values are within zone boundaries. 1968c89511abSMel Gorman */ 196970b44595SMel Gorman cc->fast_start_pfn = 0; 197006ed2998SVlastimil Babka if (cc->whole_zone) { 197106ed2998SVlastimil Babka cc->migrate_pfn = start_pfn; 197206ed2998SVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 197306ed2998SVlastimil Babka } else { 197440cacbcbSMel Gorman cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; 197540cacbcbSMel Gorman cc->free_pfn = cc->zone->compact_cached_free_pfn; 1976623446e4SJoonsoo Kim if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { 197706b6640aSVlastimil Babka cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 197840cacbcbSMel Gorman cc->zone->compact_cached_free_pfn = cc->free_pfn; 1979c89511abSMel Gorman } 1980623446e4SJoonsoo Kim if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { 1981c89511abSMel Gorman cc->migrate_pfn = start_pfn; 198240cacbcbSMel Gorman cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 198340cacbcbSMel Gorman cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 1984c89511abSMel Gorman } 1985c8f7de0bSMichal Hocko 1986c8f7de0bSMichal Hocko if (cc->migrate_pfn == start_pfn) 1987c8f7de0bSMichal Hocko cc->whole_zone = true; 198806ed2998SVlastimil Babka } 1989c8f7de0bSMichal Hocko 1990566e54e1SMel Gorman last_migrated_pfn = 0; 1991748446bbSMel Gorman 19928854c55fSMel Gorman /* 19938854c55fSMel Gorman * Migrate has separate cached PFNs for ASYNC and SYNC* migration on 19948854c55fSMel Gorman * the basis that some migrations will fail in ASYNC mode. However, 19958854c55fSMel Gorman * if the cached PFNs match and pageblocks are skipped due to having 19968854c55fSMel Gorman * no isolation candidates, then the sync state does not matter. 19978854c55fSMel Gorman * Until a pageblock with isolation candidates is found, keep the 19988854c55fSMel Gorman * cached PFNs in sync to avoid revisiting the same blocks. 19998854c55fSMel Gorman */ 20008854c55fSMel Gorman update_cached = !sync && 20018854c55fSMel Gorman cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; 20028854c55fSMel Gorman 200316c4a097SJoonsoo Kim trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, 200416c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync); 20050eb927c0SMel Gorman 2006748446bbSMel Gorman migrate_prep_local(); 2007748446bbSMel Gorman 200840cacbcbSMel Gorman while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { 20099d502c1cSMinchan Kim int err; 2010566e54e1SMel Gorman unsigned long start_pfn = cc->migrate_pfn; 2011748446bbSMel Gorman 2012804d3121SMel Gorman /* 2013804d3121SMel Gorman * Avoid multiple rescans which can happen if a page cannot be 2014804d3121SMel Gorman * isolated (dirty/writeback in async mode) or if the migrated 2015804d3121SMel Gorman * pages are being allocated before the pageblock is cleared. 2016804d3121SMel Gorman * The first rescan will capture the entire pageblock for 2017804d3121SMel Gorman * migration. If it fails, it'll be marked skip and scanning 2018804d3121SMel Gorman * will proceed as normal. 2019804d3121SMel Gorman */ 2020804d3121SMel Gorman cc->rescan = false; 2021804d3121SMel Gorman if (pageblock_start_pfn(last_migrated_pfn) == 2022804d3121SMel Gorman pageblock_start_pfn(start_pfn)) { 2023804d3121SMel Gorman cc->rescan = true; 2024804d3121SMel Gorman } 2025804d3121SMel Gorman 202640cacbcbSMel Gorman switch (isolate_migratepages(cc->zone, cc)) { 2027f9e35b3bSMel Gorman case ISOLATE_ABORT: 20282d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 20295733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 2030e64c5237SShaohua Li cc->nr_migratepages = 0; 2031566e54e1SMel Gorman last_migrated_pfn = 0; 2032f9e35b3bSMel Gorman goto out; 2033f9e35b3bSMel Gorman case ISOLATE_NONE: 20348854c55fSMel Gorman if (update_cached) { 20358854c55fSMel Gorman cc->zone->compact_cached_migrate_pfn[1] = 20368854c55fSMel Gorman cc->zone->compact_cached_migrate_pfn[0]; 20378854c55fSMel Gorman } 20388854c55fSMel Gorman 2039fdaf7f5cSVlastimil Babka /* 2040fdaf7f5cSVlastimil Babka * We haven't isolated and migrated anything, but 2041fdaf7f5cSVlastimil Babka * there might still be unflushed migrations from 2042fdaf7f5cSVlastimil Babka * previous cc->order aligned block. 2043fdaf7f5cSVlastimil Babka */ 2044fdaf7f5cSVlastimil Babka goto check_drain; 2045f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 20468854c55fSMel Gorman update_cached = false; 2047566e54e1SMel Gorman last_migrated_pfn = start_pfn; 2048f9e35b3bSMel Gorman ; 2049f9e35b3bSMel Gorman } 2050748446bbSMel Gorman 2051d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 2052e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 20537b2a2d4aSMel Gorman MR_COMPACTION); 2054748446bbSMel Gorman 2055f8c9301fSVlastimil Babka trace_mm_compaction_migratepages(cc->nr_migratepages, err, 2056f8c9301fSVlastimil Babka &cc->migratepages); 2057748446bbSMel Gorman 2058f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 2059f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 20609d502c1cSMinchan Kim if (err) { 20615733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 20627ed695e0SVlastimil Babka /* 20637ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 20647ed695e0SVlastimil Babka * and we want compact_finished() to detect it 20657ed695e0SVlastimil Babka */ 2066f2849aa0SVlastimil Babka if (err == -ENOMEM && !compact_scanners_met(cc)) { 20672d1e1041SVlastimil Babka ret = COMPACT_CONTENDED; 20684bf2bba3SDavid Rientjes goto out; 2069748446bbSMel Gorman } 2070fdd048e1SVlastimil Babka /* 2071fdd048e1SVlastimil Babka * We failed to migrate at least one page in the current 2072fdd048e1SVlastimil Babka * order-aligned block, so skip the rest of it. 2073fdd048e1SVlastimil Babka */ 2074fdd048e1SVlastimil Babka if (cc->direct_compaction && 2075fdd048e1SVlastimil Babka (cc->mode == MIGRATE_ASYNC)) { 2076fdd048e1SVlastimil Babka cc->migrate_pfn = block_end_pfn( 2077fdd048e1SVlastimil Babka cc->migrate_pfn - 1, cc->order); 2078fdd048e1SVlastimil Babka /* Draining pcplists is useless in this case */ 2079566e54e1SMel Gorman last_migrated_pfn = 0; 2080fdd048e1SVlastimil Babka } 20814bf2bba3SDavid Rientjes } 2082fdaf7f5cSVlastimil Babka 2083fdaf7f5cSVlastimil Babka check_drain: 2084fdaf7f5cSVlastimil Babka /* 2085fdaf7f5cSVlastimil Babka * Has the migration scanner moved away from the previous 2086fdaf7f5cSVlastimil Babka * cc->order aligned block where we migrated from? If yes, 2087fdaf7f5cSVlastimil Babka * flush the pages that were freed, so that they can merge and 2088fdaf7f5cSVlastimil Babka * compact_finished() can detect immediately if allocation 2089fdaf7f5cSVlastimil Babka * would succeed. 2090fdaf7f5cSVlastimil Babka */ 2091566e54e1SMel Gorman if (cc->order > 0 && last_migrated_pfn) { 2092fdaf7f5cSVlastimil Babka int cpu; 2093fdaf7f5cSVlastimil Babka unsigned long current_block_start = 209406b6640aSVlastimil Babka block_start_pfn(cc->migrate_pfn, cc->order); 2095fdaf7f5cSVlastimil Babka 2096566e54e1SMel Gorman if (last_migrated_pfn < current_block_start) { 2097fdaf7f5cSVlastimil Babka cpu = get_cpu(); 2098fdaf7f5cSVlastimil Babka lru_add_drain_cpu(cpu); 209940cacbcbSMel Gorman drain_local_pages(cc->zone); 2100fdaf7f5cSVlastimil Babka put_cpu(); 2101fdaf7f5cSVlastimil Babka /* No more flushing until we migrate again */ 2102566e54e1SMel Gorman last_migrated_pfn = 0; 2103fdaf7f5cSVlastimil Babka } 2104fdaf7f5cSVlastimil Babka } 2105fdaf7f5cSVlastimil Babka 2106748446bbSMel Gorman } 2107748446bbSMel Gorman 2108f9e35b3bSMel Gorman out: 21096bace090SVlastimil Babka /* 21106bace090SVlastimil Babka * Release free pages and update where the free scanner should restart, 21116bace090SVlastimil Babka * so we don't leave any returned pages behind in the next attempt. 21126bace090SVlastimil Babka */ 21136bace090SVlastimil Babka if (cc->nr_freepages > 0) { 21146bace090SVlastimil Babka unsigned long free_pfn = release_freepages(&cc->freepages); 21156bace090SVlastimil Babka 21166bace090SVlastimil Babka cc->nr_freepages = 0; 21176bace090SVlastimil Babka VM_BUG_ON(free_pfn == 0); 21186bace090SVlastimil Babka /* The cached pfn is always the first in a pageblock */ 211906b6640aSVlastimil Babka free_pfn = pageblock_start_pfn(free_pfn); 21206bace090SVlastimil Babka /* 21216bace090SVlastimil Babka * Only go back, not forward. The cached pfn might have been 21226bace090SVlastimil Babka * already reset to zone end in compact_finished() 21236bace090SVlastimil Babka */ 212440cacbcbSMel Gorman if (free_pfn > cc->zone->compact_cached_free_pfn) 212540cacbcbSMel Gorman cc->zone->compact_cached_free_pfn = free_pfn; 21266bace090SVlastimil Babka } 2127748446bbSMel Gorman 21287f354a54SDavid Rientjes count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); 21297f354a54SDavid Rientjes count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); 21307f354a54SDavid Rientjes 213116c4a097SJoonsoo Kim trace_mm_compaction_end(start_pfn, cc->migrate_pfn, 213216c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync, ret); 21330eb927c0SMel Gorman 2134748446bbSMel Gorman return ret; 2135748446bbSMel Gorman } 213676ab0f53SMel Gorman 2137ea7ab982SMichal Hocko static enum compact_result compact_zone_order(struct zone *zone, int order, 2138c3486f53SVlastimil Babka gfp_t gfp_mask, enum compact_priority prio, 2139c603844bSMel Gorman unsigned int alloc_flags, int classzone_idx) 214056de7263SMel Gorman { 2141ea7ab982SMichal Hocko enum compact_result ret; 214256de7263SMel Gorman struct compact_control cc = { 214356de7263SMel Gorman .nr_freepages = 0, 214456de7263SMel Gorman .nr_migratepages = 0, 21457f354a54SDavid Rientjes .total_migrate_scanned = 0, 21467f354a54SDavid Rientjes .total_free_scanned = 0, 214756de7263SMel Gorman .order = order, 21486d7ce559SDavid Rientjes .gfp_mask = gfp_mask, 214956de7263SMel Gorman .zone = zone, 2150a5508cd8SVlastimil Babka .mode = (prio == COMPACT_PRIO_ASYNC) ? 2151a5508cd8SVlastimil Babka MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, 2152ebff3980SVlastimil Babka .alloc_flags = alloc_flags, 2153ebff3980SVlastimil Babka .classzone_idx = classzone_idx, 2154accf6242SVlastimil Babka .direct_compaction = true, 2155a8e025e5SVlastimil Babka .whole_zone = (prio == MIN_COMPACT_PRIORITY), 21569f7e3387SVlastimil Babka .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), 21579f7e3387SVlastimil Babka .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) 215856de7263SMel Gorman }; 215956de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 216056de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 216156de7263SMel Gorman 216240cacbcbSMel Gorman ret = compact_zone(&cc); 2163e64c5237SShaohua Li 2164e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 2165e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 2166e64c5237SShaohua Li 2167e64c5237SShaohua Li return ret; 216856de7263SMel Gorman } 216956de7263SMel Gorman 21705e771905SMel Gorman int sysctl_extfrag_threshold = 500; 21715e771905SMel Gorman 217256de7263SMel Gorman /** 217356de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 217456de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 21751a6d53a1SVlastimil Babka * @order: The order of the current allocation 21761a6d53a1SVlastimil Babka * @alloc_flags: The allocation flags of the current allocation 21771a6d53a1SVlastimil Babka * @ac: The context of current allocation 2178112d2d29SYang Shi * @prio: Determines how hard direct compaction should try to succeed 217956de7263SMel Gorman * 218056de7263SMel Gorman * This is the main entry point for direct page compaction. 218156de7263SMel Gorman */ 2182ea7ab982SMichal Hocko enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 2183c603844bSMel Gorman unsigned int alloc_flags, const struct alloc_context *ac, 2184c3486f53SVlastimil Babka enum compact_priority prio) 218556de7263SMel Gorman { 218656de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 218756de7263SMel Gorman struct zoneref *z; 218856de7263SMel Gorman struct zone *zone; 21891d4746d3SMichal Hocko enum compact_result rc = COMPACT_SKIPPED; 219056de7263SMel Gorman 219173e64c51SMichal Hocko /* 219273e64c51SMichal Hocko * Check if the GFP flags allow compaction - GFP_NOIO is really 219373e64c51SMichal Hocko * tricky context because the migration might require IO 219473e64c51SMichal Hocko */ 219573e64c51SMichal Hocko if (!may_perform_io) 219653853e2dSVlastimil Babka return COMPACT_SKIPPED; 219756de7263SMel Gorman 2198a5508cd8SVlastimil Babka trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); 2199837d026dSJoonsoo Kim 220056de7263SMel Gorman /* Compact each zone in the list */ 22011a6d53a1SVlastimil Babka for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 22021a6d53a1SVlastimil Babka ac->nodemask) { 2203ea7ab982SMichal Hocko enum compact_result status; 220456de7263SMel Gorman 2205a8e025e5SVlastimil Babka if (prio > MIN_COMPACT_PRIORITY 2206a8e025e5SVlastimil Babka && compaction_deferred(zone, order)) { 22071d4746d3SMichal Hocko rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); 220853853e2dSVlastimil Babka continue; 22091d4746d3SMichal Hocko } 221053853e2dSVlastimil Babka 2211a5508cd8SVlastimil Babka status = compact_zone_order(zone, order, gfp_mask, prio, 2212c3486f53SVlastimil Babka alloc_flags, ac_classzone_idx(ac)); 221356de7263SMel Gorman rc = max(status, rc); 221456de7263SMel Gorman 22157ceb009aSVlastimil Babka /* The allocation should succeed, stop compacting */ 22167ceb009aSVlastimil Babka if (status == COMPACT_SUCCESS) { 221753853e2dSVlastimil Babka /* 221853853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 221953853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 222053853e2dSVlastimil Babka * will repeat this with true if allocation indeed 222153853e2dSVlastimil Babka * succeeds in this zone. 222253853e2dSVlastimil Babka */ 222353853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 22241f9efdefSVlastimil Babka 2225c3486f53SVlastimil Babka break; 22261f9efdefSVlastimil Babka } 22271f9efdefSVlastimil Babka 2228a5508cd8SVlastimil Babka if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE || 2229c3486f53SVlastimil Babka status == COMPACT_PARTIAL_SKIPPED)) 223053853e2dSVlastimil Babka /* 223153853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 223253853e2dSVlastimil Babka * so we defer compaction there. If it ends up 223353853e2dSVlastimil Babka * succeeding after all, it will be reset. 223453853e2dSVlastimil Babka */ 223553853e2dSVlastimil Babka defer_compaction(zone, order); 22361f9efdefSVlastimil Babka 22371f9efdefSVlastimil Babka /* 22381f9efdefSVlastimil Babka * We might have stopped compacting due to need_resched() in 22391f9efdefSVlastimil Babka * async compaction, or due to a fatal signal detected. In that 2240c3486f53SVlastimil Babka * case do not try further zones 22411f9efdefSVlastimil Babka */ 2242c3486f53SVlastimil Babka if ((prio == COMPACT_PRIO_ASYNC && need_resched()) 2243c3486f53SVlastimil Babka || fatal_signal_pending(current)) 22441f9efdefSVlastimil Babka break; 22451f9efdefSVlastimil Babka } 22461f9efdefSVlastimil Babka 224756de7263SMel Gorman return rc; 224856de7263SMel Gorman } 224956de7263SMel Gorman 225056de7263SMel Gorman 225176ab0f53SMel Gorman /* Compact all zones within a node */ 22527103f16dSAndrew Morton static void compact_node(int nid) 22537be62de9SRik van Riel { 2254791cae96SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2255791cae96SVlastimil Babka int zoneid; 2256791cae96SVlastimil Babka struct zone *zone; 22577be62de9SRik van Riel struct compact_control cc = { 22587be62de9SRik van Riel .order = -1, 22597f354a54SDavid Rientjes .total_migrate_scanned = 0, 22607f354a54SDavid Rientjes .total_free_scanned = 0, 2261e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 226291ca9186SDavid Rientjes .ignore_skip_hint = true, 226306ed2998SVlastimil Babka .whole_zone = true, 226473e64c51SMichal Hocko .gfp_mask = GFP_KERNEL, 22657be62de9SRik van Riel }; 22667be62de9SRik van Riel 2267791cae96SVlastimil Babka 2268791cae96SVlastimil Babka for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2269791cae96SVlastimil Babka 2270791cae96SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 2271791cae96SVlastimil Babka if (!populated_zone(zone)) 2272791cae96SVlastimil Babka continue; 2273791cae96SVlastimil Babka 2274791cae96SVlastimil Babka cc.nr_freepages = 0; 2275791cae96SVlastimil Babka cc.nr_migratepages = 0; 2276791cae96SVlastimil Babka cc.zone = zone; 2277791cae96SVlastimil Babka INIT_LIST_HEAD(&cc.freepages); 2278791cae96SVlastimil Babka INIT_LIST_HEAD(&cc.migratepages); 2279791cae96SVlastimil Babka 228040cacbcbSMel Gorman compact_zone(&cc); 2281791cae96SVlastimil Babka 2282791cae96SVlastimil Babka VM_BUG_ON(!list_empty(&cc.freepages)); 2283791cae96SVlastimil Babka VM_BUG_ON(!list_empty(&cc.migratepages)); 2284791cae96SVlastimil Babka } 22857be62de9SRik van Riel } 22867be62de9SRik van Riel 228776ab0f53SMel Gorman /* Compact all nodes in the system */ 22887964c06dSJason Liu static void compact_nodes(void) 228976ab0f53SMel Gorman { 229076ab0f53SMel Gorman int nid; 229176ab0f53SMel Gorman 22928575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 22938575ec29SHugh Dickins lru_add_drain_all(); 22948575ec29SHugh Dickins 229576ab0f53SMel Gorman for_each_online_node(nid) 229676ab0f53SMel Gorman compact_node(nid); 229776ab0f53SMel Gorman } 229876ab0f53SMel Gorman 229976ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 230076ab0f53SMel Gorman int sysctl_compact_memory; 230176ab0f53SMel Gorman 2302fec4eb2cSYaowei Bai /* 2303fec4eb2cSYaowei Bai * This is the entry point for compacting all nodes via 2304fec4eb2cSYaowei Bai * /proc/sys/vm/compact_memory 2305fec4eb2cSYaowei Bai */ 230676ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 230776ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 230876ab0f53SMel Gorman { 230976ab0f53SMel Gorman if (write) 23107964c06dSJason Liu compact_nodes(); 231176ab0f53SMel Gorman 231276ab0f53SMel Gorman return 0; 231376ab0f53SMel Gorman } 2314ed4a6d7fSMel Gorman 2315ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 231674e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev, 231710fbcf4cSKay Sievers struct device_attribute *attr, 2318ed4a6d7fSMel Gorman const char *buf, size_t count) 2319ed4a6d7fSMel Gorman { 23208575ec29SHugh Dickins int nid = dev->id; 23218575ec29SHugh Dickins 23228575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 23238575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 23248575ec29SHugh Dickins lru_add_drain_all(); 23258575ec29SHugh Dickins 23268575ec29SHugh Dickins compact_node(nid); 23278575ec29SHugh Dickins } 2328ed4a6d7fSMel Gorman 2329ed4a6d7fSMel Gorman return count; 2330ed4a6d7fSMel Gorman } 23310825a6f9SJoe Perches static DEVICE_ATTR(compact, 0200, NULL, sysfs_compact_node); 2332ed4a6d7fSMel Gorman 2333ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 2334ed4a6d7fSMel Gorman { 233510fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 2336ed4a6d7fSMel Gorman } 2337ed4a6d7fSMel Gorman 2338ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 2339ed4a6d7fSMel Gorman { 234010fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 2341ed4a6d7fSMel Gorman } 2342ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 2343ff9543fdSMichal Nazarewicz 2344698b1b30SVlastimil Babka static inline bool kcompactd_work_requested(pg_data_t *pgdat) 2345698b1b30SVlastimil Babka { 2346172400c6SVlastimil Babka return pgdat->kcompactd_max_order > 0 || kthread_should_stop(); 2347698b1b30SVlastimil Babka } 2348698b1b30SVlastimil Babka 2349698b1b30SVlastimil Babka static bool kcompactd_node_suitable(pg_data_t *pgdat) 2350698b1b30SVlastimil Babka { 2351698b1b30SVlastimil Babka int zoneid; 2352698b1b30SVlastimil Babka struct zone *zone; 2353698b1b30SVlastimil Babka enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx; 2354698b1b30SVlastimil Babka 23556cd9dc3eSChen Feng for (zoneid = 0; zoneid <= classzone_idx; zoneid++) { 2356698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 2357698b1b30SVlastimil Babka 2358698b1b30SVlastimil Babka if (!populated_zone(zone)) 2359698b1b30SVlastimil Babka continue; 2360698b1b30SVlastimil Babka 2361698b1b30SVlastimil Babka if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, 2362698b1b30SVlastimil Babka classzone_idx) == COMPACT_CONTINUE) 2363698b1b30SVlastimil Babka return true; 2364698b1b30SVlastimil Babka } 2365698b1b30SVlastimil Babka 2366698b1b30SVlastimil Babka return false; 2367698b1b30SVlastimil Babka } 2368698b1b30SVlastimil Babka 2369698b1b30SVlastimil Babka static void kcompactd_do_work(pg_data_t *pgdat) 2370698b1b30SVlastimil Babka { 2371698b1b30SVlastimil Babka /* 2372698b1b30SVlastimil Babka * With no special task, compact all zones so that a page of requested 2373698b1b30SVlastimil Babka * order is allocatable. 2374698b1b30SVlastimil Babka */ 2375698b1b30SVlastimil Babka int zoneid; 2376698b1b30SVlastimil Babka struct zone *zone; 2377698b1b30SVlastimil Babka struct compact_control cc = { 2378698b1b30SVlastimil Babka .order = pgdat->kcompactd_max_order, 23797f354a54SDavid Rientjes .total_migrate_scanned = 0, 23807f354a54SDavid Rientjes .total_free_scanned = 0, 2381698b1b30SVlastimil Babka .classzone_idx = pgdat->kcompactd_classzone_idx, 2382698b1b30SVlastimil Babka .mode = MIGRATE_SYNC_LIGHT, 2383a0647dc9SDavid Rientjes .ignore_skip_hint = false, 238473e64c51SMichal Hocko .gfp_mask = GFP_KERNEL, 2385698b1b30SVlastimil Babka }; 2386698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, 2387698b1b30SVlastimil Babka cc.classzone_idx); 23887f354a54SDavid Rientjes count_compact_event(KCOMPACTD_WAKE); 2389698b1b30SVlastimil Babka 23906cd9dc3eSChen Feng for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) { 2391698b1b30SVlastimil Babka int status; 2392698b1b30SVlastimil Babka 2393698b1b30SVlastimil Babka zone = &pgdat->node_zones[zoneid]; 2394698b1b30SVlastimil Babka if (!populated_zone(zone)) 2395698b1b30SVlastimil Babka continue; 2396698b1b30SVlastimil Babka 2397698b1b30SVlastimil Babka if (compaction_deferred(zone, cc.order)) 2398698b1b30SVlastimil Babka continue; 2399698b1b30SVlastimil Babka 2400698b1b30SVlastimil Babka if (compaction_suitable(zone, cc.order, 0, zoneid) != 2401698b1b30SVlastimil Babka COMPACT_CONTINUE) 2402698b1b30SVlastimil Babka continue; 2403698b1b30SVlastimil Babka 2404698b1b30SVlastimil Babka cc.nr_freepages = 0; 2405698b1b30SVlastimil Babka cc.nr_migratepages = 0; 24067f354a54SDavid Rientjes cc.total_migrate_scanned = 0; 24077f354a54SDavid Rientjes cc.total_free_scanned = 0; 2408698b1b30SVlastimil Babka cc.zone = zone; 2409698b1b30SVlastimil Babka INIT_LIST_HEAD(&cc.freepages); 2410698b1b30SVlastimil Babka INIT_LIST_HEAD(&cc.migratepages); 2411698b1b30SVlastimil Babka 2412172400c6SVlastimil Babka if (kthread_should_stop()) 2413172400c6SVlastimil Babka return; 241440cacbcbSMel Gorman status = compact_zone(&cc); 2415698b1b30SVlastimil Babka 24167ceb009aSVlastimil Babka if (status == COMPACT_SUCCESS) { 2417698b1b30SVlastimil Babka compaction_defer_reset(zone, cc.order, false); 2418c8f7de0bSMichal Hocko } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { 2419698b1b30SVlastimil Babka /* 2420bc3106b2SDavid Rientjes * Buddy pages may become stranded on pcps that could 2421bc3106b2SDavid Rientjes * otherwise coalesce on the zone's free area for 2422bc3106b2SDavid Rientjes * order >= cc.order. This is ratelimited by the 2423bc3106b2SDavid Rientjes * upcoming deferral. 2424bc3106b2SDavid Rientjes */ 2425bc3106b2SDavid Rientjes drain_all_pages(zone); 2426bc3106b2SDavid Rientjes 2427bc3106b2SDavid Rientjes /* 2428698b1b30SVlastimil Babka * We use sync migration mode here, so we defer like 2429698b1b30SVlastimil Babka * sync direct compaction does. 2430698b1b30SVlastimil Babka */ 2431698b1b30SVlastimil Babka defer_compaction(zone, cc.order); 2432698b1b30SVlastimil Babka } 2433698b1b30SVlastimil Babka 24347f354a54SDavid Rientjes count_compact_events(KCOMPACTD_MIGRATE_SCANNED, 24357f354a54SDavid Rientjes cc.total_migrate_scanned); 24367f354a54SDavid Rientjes count_compact_events(KCOMPACTD_FREE_SCANNED, 24377f354a54SDavid Rientjes cc.total_free_scanned); 24387f354a54SDavid Rientjes 2439698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.freepages)); 2440698b1b30SVlastimil Babka VM_BUG_ON(!list_empty(&cc.migratepages)); 2441698b1b30SVlastimil Babka } 2442698b1b30SVlastimil Babka 2443698b1b30SVlastimil Babka /* 2444698b1b30SVlastimil Babka * Regardless of success, we are done until woken up next. But remember 2445698b1b30SVlastimil Babka * the requested order/classzone_idx in case it was higher/tighter than 2446698b1b30SVlastimil Babka * our current ones 2447698b1b30SVlastimil Babka */ 2448698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order <= cc.order) 2449698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 2450698b1b30SVlastimil Babka if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx) 2451698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; 2452698b1b30SVlastimil Babka } 2453698b1b30SVlastimil Babka 2454698b1b30SVlastimil Babka void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) 2455698b1b30SVlastimil Babka { 2456698b1b30SVlastimil Babka if (!order) 2457698b1b30SVlastimil Babka return; 2458698b1b30SVlastimil Babka 2459698b1b30SVlastimil Babka if (pgdat->kcompactd_max_order < order) 2460698b1b30SVlastimil Babka pgdat->kcompactd_max_order = order; 2461698b1b30SVlastimil Babka 2462698b1b30SVlastimil Babka if (pgdat->kcompactd_classzone_idx > classzone_idx) 2463698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = classzone_idx; 2464698b1b30SVlastimil Babka 24656818600fSDavidlohr Bueso /* 24666818600fSDavidlohr Bueso * Pairs with implicit barrier in wait_event_freezable() 24676818600fSDavidlohr Bueso * such that wakeups are not missed. 24686818600fSDavidlohr Bueso */ 24696818600fSDavidlohr Bueso if (!wq_has_sleeper(&pgdat->kcompactd_wait)) 2470698b1b30SVlastimil Babka return; 2471698b1b30SVlastimil Babka 2472698b1b30SVlastimil Babka if (!kcompactd_node_suitable(pgdat)) 2473698b1b30SVlastimil Babka return; 2474698b1b30SVlastimil Babka 2475698b1b30SVlastimil Babka trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, 2476698b1b30SVlastimil Babka classzone_idx); 2477698b1b30SVlastimil Babka wake_up_interruptible(&pgdat->kcompactd_wait); 2478698b1b30SVlastimil Babka } 2479698b1b30SVlastimil Babka 2480698b1b30SVlastimil Babka /* 2481698b1b30SVlastimil Babka * The background compaction daemon, started as a kernel thread 2482698b1b30SVlastimil Babka * from the init process. 2483698b1b30SVlastimil Babka */ 2484698b1b30SVlastimil Babka static int kcompactd(void *p) 2485698b1b30SVlastimil Babka { 2486698b1b30SVlastimil Babka pg_data_t *pgdat = (pg_data_t*)p; 2487698b1b30SVlastimil Babka struct task_struct *tsk = current; 2488698b1b30SVlastimil Babka 2489698b1b30SVlastimil Babka const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2490698b1b30SVlastimil Babka 2491698b1b30SVlastimil Babka if (!cpumask_empty(cpumask)) 2492698b1b30SVlastimil Babka set_cpus_allowed_ptr(tsk, cpumask); 2493698b1b30SVlastimil Babka 2494698b1b30SVlastimil Babka set_freezable(); 2495698b1b30SVlastimil Babka 2496698b1b30SVlastimil Babka pgdat->kcompactd_max_order = 0; 2497698b1b30SVlastimil Babka pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1; 2498698b1b30SVlastimil Babka 2499698b1b30SVlastimil Babka while (!kthread_should_stop()) { 2500eb414681SJohannes Weiner unsigned long pflags; 2501eb414681SJohannes Weiner 2502698b1b30SVlastimil Babka trace_mm_compaction_kcompactd_sleep(pgdat->node_id); 2503698b1b30SVlastimil Babka wait_event_freezable(pgdat->kcompactd_wait, 2504698b1b30SVlastimil Babka kcompactd_work_requested(pgdat)); 2505698b1b30SVlastimil Babka 2506eb414681SJohannes Weiner psi_memstall_enter(&pflags); 2507698b1b30SVlastimil Babka kcompactd_do_work(pgdat); 2508eb414681SJohannes Weiner psi_memstall_leave(&pflags); 2509698b1b30SVlastimil Babka } 2510698b1b30SVlastimil Babka 2511698b1b30SVlastimil Babka return 0; 2512698b1b30SVlastimil Babka } 2513698b1b30SVlastimil Babka 2514698b1b30SVlastimil Babka /* 2515698b1b30SVlastimil Babka * This kcompactd start function will be called by init and node-hot-add. 2516698b1b30SVlastimil Babka * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. 2517698b1b30SVlastimil Babka */ 2518698b1b30SVlastimil Babka int kcompactd_run(int nid) 2519698b1b30SVlastimil Babka { 2520698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2521698b1b30SVlastimil Babka int ret = 0; 2522698b1b30SVlastimil Babka 2523698b1b30SVlastimil Babka if (pgdat->kcompactd) 2524698b1b30SVlastimil Babka return 0; 2525698b1b30SVlastimil Babka 2526698b1b30SVlastimil Babka pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); 2527698b1b30SVlastimil Babka if (IS_ERR(pgdat->kcompactd)) { 2528698b1b30SVlastimil Babka pr_err("Failed to start kcompactd on node %d\n", nid); 2529698b1b30SVlastimil Babka ret = PTR_ERR(pgdat->kcompactd); 2530698b1b30SVlastimil Babka pgdat->kcompactd = NULL; 2531698b1b30SVlastimil Babka } 2532698b1b30SVlastimil Babka return ret; 2533698b1b30SVlastimil Babka } 2534698b1b30SVlastimil Babka 2535698b1b30SVlastimil Babka /* 2536698b1b30SVlastimil Babka * Called by memory hotplug when all memory in a node is offlined. Caller must 2537698b1b30SVlastimil Babka * hold mem_hotplug_begin/end(). 2538698b1b30SVlastimil Babka */ 2539698b1b30SVlastimil Babka void kcompactd_stop(int nid) 2540698b1b30SVlastimil Babka { 2541698b1b30SVlastimil Babka struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; 2542698b1b30SVlastimil Babka 2543698b1b30SVlastimil Babka if (kcompactd) { 2544698b1b30SVlastimil Babka kthread_stop(kcompactd); 2545698b1b30SVlastimil Babka NODE_DATA(nid)->kcompactd = NULL; 2546698b1b30SVlastimil Babka } 2547698b1b30SVlastimil Babka } 2548698b1b30SVlastimil Babka 2549698b1b30SVlastimil Babka /* 2550698b1b30SVlastimil Babka * It's optimal to keep kcompactd on the same CPUs as their memory, but 2551698b1b30SVlastimil Babka * not required for correctness. So if the last cpu in a node goes 2552698b1b30SVlastimil Babka * away, we get changed to run anywhere: as the first one comes back, 2553698b1b30SVlastimil Babka * restore their cpu bindings. 2554698b1b30SVlastimil Babka */ 2555e46b1db2SAnna-Maria Gleixner static int kcompactd_cpu_online(unsigned int cpu) 2556698b1b30SVlastimil Babka { 2557698b1b30SVlastimil Babka int nid; 2558698b1b30SVlastimil Babka 2559698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) { 2560698b1b30SVlastimil Babka pg_data_t *pgdat = NODE_DATA(nid); 2561698b1b30SVlastimil Babka const struct cpumask *mask; 2562698b1b30SVlastimil Babka 2563698b1b30SVlastimil Babka mask = cpumask_of_node(pgdat->node_id); 2564698b1b30SVlastimil Babka 2565698b1b30SVlastimil Babka if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 2566698b1b30SVlastimil Babka /* One of our CPUs online: restore mask */ 2567698b1b30SVlastimil Babka set_cpus_allowed_ptr(pgdat->kcompactd, mask); 2568698b1b30SVlastimil Babka } 2569e46b1db2SAnna-Maria Gleixner return 0; 2570698b1b30SVlastimil Babka } 2571698b1b30SVlastimil Babka 2572698b1b30SVlastimil Babka static int __init kcompactd_init(void) 2573698b1b30SVlastimil Babka { 2574698b1b30SVlastimil Babka int nid; 2575e46b1db2SAnna-Maria Gleixner int ret; 2576e46b1db2SAnna-Maria Gleixner 2577e46b1db2SAnna-Maria Gleixner ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 2578e46b1db2SAnna-Maria Gleixner "mm/compaction:online", 2579e46b1db2SAnna-Maria Gleixner kcompactd_cpu_online, NULL); 2580e46b1db2SAnna-Maria Gleixner if (ret < 0) { 2581e46b1db2SAnna-Maria Gleixner pr_err("kcompactd: failed to register hotplug callbacks.\n"); 2582e46b1db2SAnna-Maria Gleixner return ret; 2583e46b1db2SAnna-Maria Gleixner } 2584698b1b30SVlastimil Babka 2585698b1b30SVlastimil Babka for_each_node_state(nid, N_MEMORY) 2586698b1b30SVlastimil Babka kcompactd_run(nid); 2587698b1b30SVlastimil Babka return 0; 2588698b1b30SVlastimil Babka } 2589698b1b30SVlastimil Babka subsys_initcall(kcompactd_init) 2590698b1b30SVlastimil Babka 2591ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 2592