1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h> 18194159fbSMinchan Kim #include <linux/page-isolation.h> 19b8c73fc2SAndrey Ryabinin #include <linux/kasan.h> 20748446bbSMel Gorman #include "internal.h" 21748446bbSMel Gorman 22010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 23010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 24010fc29aSMinchan Kim { 25010fc29aSMinchan Kim count_vm_event(item); 26010fc29aSMinchan Kim } 27010fc29aSMinchan Kim 28010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 29010fc29aSMinchan Kim { 30010fc29aSMinchan Kim count_vm_events(item, delta); 31010fc29aSMinchan Kim } 32010fc29aSMinchan Kim #else 33010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 34010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 35010fc29aSMinchan Kim #endif 36010fc29aSMinchan Kim 37ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 3816c4a097SJoonsoo Kim #ifdef CONFIG_TRACEPOINTS 3916c4a097SJoonsoo Kim static const char *const compaction_status_string[] = { 4016c4a097SJoonsoo Kim "deferred", 4116c4a097SJoonsoo Kim "skipped", 4216c4a097SJoonsoo Kim "continue", 4316c4a097SJoonsoo Kim "partial", 4416c4a097SJoonsoo Kim "complete", 45837d026dSJoonsoo Kim "no_suitable_page", 46837d026dSJoonsoo Kim "not_suitable_zone", 4716c4a097SJoonsoo Kim }; 4816c4a097SJoonsoo Kim #endif 49ff9543fdSMichal Nazarewicz 50b7aba698SMel Gorman #define CREATE_TRACE_POINTS 51b7aba698SMel Gorman #include <trace/events/compaction.h> 52b7aba698SMel Gorman 53748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 54748446bbSMel Gorman { 55748446bbSMel Gorman struct page *page, *next; 566bace090SVlastimil Babka unsigned long high_pfn = 0; 57748446bbSMel Gorman 58748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 596bace090SVlastimil Babka unsigned long pfn = page_to_pfn(page); 60748446bbSMel Gorman list_del(&page->lru); 61748446bbSMel Gorman __free_page(page); 626bace090SVlastimil Babka if (pfn > high_pfn) 636bace090SVlastimil Babka high_pfn = pfn; 64748446bbSMel Gorman } 65748446bbSMel Gorman 666bace090SVlastimil Babka return high_pfn; 67748446bbSMel Gorman } 68748446bbSMel Gorman 69ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 70ff9543fdSMichal Nazarewicz { 71ff9543fdSMichal Nazarewicz struct page *page; 72ff9543fdSMichal Nazarewicz 73ff9543fdSMichal Nazarewicz list_for_each_entry(page, list, lru) { 74ff9543fdSMichal Nazarewicz arch_alloc_page(page, 0); 75ff9543fdSMichal Nazarewicz kernel_map_pages(page, 1, 1); 76b8c73fc2SAndrey Ryabinin kasan_alloc_pages(page, 0); 77ff9543fdSMichal Nazarewicz } 78ff9543fdSMichal Nazarewicz } 79ff9543fdSMichal Nazarewicz 8047118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype) 8147118af0SMichal Nazarewicz { 8247118af0SMichal Nazarewicz return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 8347118af0SMichal Nazarewicz } 8447118af0SMichal Nazarewicz 857d49d886SVlastimil Babka /* 867d49d886SVlastimil Babka * Check that the whole (or subset of) a pageblock given by the interval of 877d49d886SVlastimil Babka * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 887d49d886SVlastimil Babka * with the migration of free compaction scanner. The scanners then need to 897d49d886SVlastimil Babka * use only pfn_valid_within() check for arches that allow holes within 907d49d886SVlastimil Babka * pageblocks. 917d49d886SVlastimil Babka * 927d49d886SVlastimil Babka * Return struct page pointer of start_pfn, or NULL if checks were not passed. 937d49d886SVlastimil Babka * 947d49d886SVlastimil Babka * It's possible on some configurations to have a setup like node0 node1 node0 957d49d886SVlastimil Babka * i.e. it's possible that all pages within a zones range of pages do not 967d49d886SVlastimil Babka * belong to a single zone. We assume that a border between node0 and node1 977d49d886SVlastimil Babka * can occur within a single pageblock, but not a node0 node1 node0 987d49d886SVlastimil Babka * interleaving within a single pageblock. It is therefore sufficient to check 997d49d886SVlastimil Babka * the first and last page of a pageblock and avoid checking each individual 1007d49d886SVlastimil Babka * page in a pageblock. 1017d49d886SVlastimil Babka */ 1027d49d886SVlastimil Babka static struct page *pageblock_pfn_to_page(unsigned long start_pfn, 1037d49d886SVlastimil Babka unsigned long end_pfn, struct zone *zone) 1047d49d886SVlastimil Babka { 1057d49d886SVlastimil Babka struct page *start_page; 1067d49d886SVlastimil Babka struct page *end_page; 1077d49d886SVlastimil Babka 1087d49d886SVlastimil Babka /* end_pfn is one past the range we are checking */ 1097d49d886SVlastimil Babka end_pfn--; 1107d49d886SVlastimil Babka 1117d49d886SVlastimil Babka if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 1127d49d886SVlastimil Babka return NULL; 1137d49d886SVlastimil Babka 1147d49d886SVlastimil Babka start_page = pfn_to_page(start_pfn); 1157d49d886SVlastimil Babka 1167d49d886SVlastimil Babka if (page_zone(start_page) != zone) 1177d49d886SVlastimil Babka return NULL; 1187d49d886SVlastimil Babka 1197d49d886SVlastimil Babka end_page = pfn_to_page(end_pfn); 1207d49d886SVlastimil Babka 1217d49d886SVlastimil Babka /* This gives a shorter code than deriving page_zone(end_page) */ 1227d49d886SVlastimil Babka if (page_zone_id(start_page) != page_zone_id(end_page)) 1237d49d886SVlastimil Babka return NULL; 1247d49d886SVlastimil Babka 1257d49d886SVlastimil Babka return start_page; 1267d49d886SVlastimil Babka } 1277d49d886SVlastimil Babka 128bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 12924e2716fSJoonsoo Kim 13024e2716fSJoonsoo Kim /* Do not skip compaction more than 64 times */ 13124e2716fSJoonsoo Kim #define COMPACT_MAX_DEFER_SHIFT 6 13224e2716fSJoonsoo Kim 13324e2716fSJoonsoo Kim /* 13424e2716fSJoonsoo Kim * Compaction is deferred when compaction fails to result in a page 13524e2716fSJoonsoo Kim * allocation success. 1 << compact_defer_limit compactions are skipped up 13624e2716fSJoonsoo Kim * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 13724e2716fSJoonsoo Kim */ 13824e2716fSJoonsoo Kim void defer_compaction(struct zone *zone, int order) 13924e2716fSJoonsoo Kim { 14024e2716fSJoonsoo Kim zone->compact_considered = 0; 14124e2716fSJoonsoo Kim zone->compact_defer_shift++; 14224e2716fSJoonsoo Kim 14324e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 14424e2716fSJoonsoo Kim zone->compact_order_failed = order; 14524e2716fSJoonsoo Kim 14624e2716fSJoonsoo Kim if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 14724e2716fSJoonsoo Kim zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 14824e2716fSJoonsoo Kim 14924e2716fSJoonsoo Kim trace_mm_compaction_defer_compaction(zone, order); 15024e2716fSJoonsoo Kim } 15124e2716fSJoonsoo Kim 15224e2716fSJoonsoo Kim /* Returns true if compaction should be skipped this time */ 15324e2716fSJoonsoo Kim bool compaction_deferred(struct zone *zone, int order) 15424e2716fSJoonsoo Kim { 15524e2716fSJoonsoo Kim unsigned long defer_limit = 1UL << zone->compact_defer_shift; 15624e2716fSJoonsoo Kim 15724e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 15824e2716fSJoonsoo Kim return false; 15924e2716fSJoonsoo Kim 16024e2716fSJoonsoo Kim /* Avoid possible overflow */ 16124e2716fSJoonsoo Kim if (++zone->compact_considered > defer_limit) 16224e2716fSJoonsoo Kim zone->compact_considered = defer_limit; 16324e2716fSJoonsoo Kim 16424e2716fSJoonsoo Kim if (zone->compact_considered >= defer_limit) 16524e2716fSJoonsoo Kim return false; 16624e2716fSJoonsoo Kim 16724e2716fSJoonsoo Kim trace_mm_compaction_deferred(zone, order); 16824e2716fSJoonsoo Kim 16924e2716fSJoonsoo Kim return true; 17024e2716fSJoonsoo Kim } 17124e2716fSJoonsoo Kim 17224e2716fSJoonsoo Kim /* 17324e2716fSJoonsoo Kim * Update defer tracking counters after successful compaction of given order, 17424e2716fSJoonsoo Kim * which means an allocation either succeeded (alloc_success == true) or is 17524e2716fSJoonsoo Kim * expected to succeed. 17624e2716fSJoonsoo Kim */ 17724e2716fSJoonsoo Kim void compaction_defer_reset(struct zone *zone, int order, 17824e2716fSJoonsoo Kim bool alloc_success) 17924e2716fSJoonsoo Kim { 18024e2716fSJoonsoo Kim if (alloc_success) { 18124e2716fSJoonsoo Kim zone->compact_considered = 0; 18224e2716fSJoonsoo Kim zone->compact_defer_shift = 0; 18324e2716fSJoonsoo Kim } 18424e2716fSJoonsoo Kim if (order >= zone->compact_order_failed) 18524e2716fSJoonsoo Kim zone->compact_order_failed = order + 1; 18624e2716fSJoonsoo Kim 18724e2716fSJoonsoo Kim trace_mm_compaction_defer_reset(zone, order); 18824e2716fSJoonsoo Kim } 18924e2716fSJoonsoo Kim 19024e2716fSJoonsoo Kim /* Returns true if restarting compaction after many failures */ 19124e2716fSJoonsoo Kim bool compaction_restarting(struct zone *zone, int order) 19224e2716fSJoonsoo Kim { 19324e2716fSJoonsoo Kim if (order < zone->compact_order_failed) 19424e2716fSJoonsoo Kim return false; 19524e2716fSJoonsoo Kim 19624e2716fSJoonsoo Kim return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 19724e2716fSJoonsoo Kim zone->compact_considered >= 1UL << zone->compact_defer_shift; 19824e2716fSJoonsoo Kim } 19924e2716fSJoonsoo Kim 200bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 201bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 202bb13ffebSMel Gorman struct page *page) 203bb13ffebSMel Gorman { 204bb13ffebSMel Gorman if (cc->ignore_skip_hint) 205bb13ffebSMel Gorman return true; 206bb13ffebSMel Gorman 207bb13ffebSMel Gorman return !get_pageblock_skip(page); 208bb13ffebSMel Gorman } 209bb13ffebSMel Gorman 21002333641SVlastimil Babka static void reset_cached_positions(struct zone *zone) 21102333641SVlastimil Babka { 21202333641SVlastimil Babka zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 21302333641SVlastimil Babka zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 21402333641SVlastimil Babka zone->compact_cached_free_pfn = zone_end_pfn(zone); 21502333641SVlastimil Babka } 21602333641SVlastimil Babka 217bb13ffebSMel Gorman /* 218bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 219bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 220bb13ffebSMel Gorman * meet. 221bb13ffebSMel Gorman */ 22262997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 223bb13ffebSMel Gorman { 224bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 225108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 226bb13ffebSMel Gorman unsigned long pfn; 227bb13ffebSMel Gorman 22862997027SMel Gorman zone->compact_blockskip_flush = false; 229bb13ffebSMel Gorman 230bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 231bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 232bb13ffebSMel Gorman struct page *page; 233bb13ffebSMel Gorman 234bb13ffebSMel Gorman cond_resched(); 235bb13ffebSMel Gorman 236bb13ffebSMel Gorman if (!pfn_valid(pfn)) 237bb13ffebSMel Gorman continue; 238bb13ffebSMel Gorman 239bb13ffebSMel Gorman page = pfn_to_page(pfn); 240bb13ffebSMel Gorman if (zone != page_zone(page)) 241bb13ffebSMel Gorman continue; 242bb13ffebSMel Gorman 243bb13ffebSMel Gorman clear_pageblock_skip(page); 244bb13ffebSMel Gorman } 24502333641SVlastimil Babka 24602333641SVlastimil Babka reset_cached_positions(zone); 247bb13ffebSMel Gorman } 248bb13ffebSMel Gorman 24962997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 25062997027SMel Gorman { 25162997027SMel Gorman int zoneid; 25262997027SMel Gorman 25362997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 25462997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 25562997027SMel Gorman if (!populated_zone(zone)) 25662997027SMel Gorman continue; 25762997027SMel Gorman 25862997027SMel Gorman /* Only flush if a full compaction finished recently */ 25962997027SMel Gorman if (zone->compact_blockskip_flush) 26062997027SMel Gorman __reset_isolation_suitable(zone); 26162997027SMel Gorman } 26262997027SMel Gorman } 26362997027SMel Gorman 264bb13ffebSMel Gorman /* 265bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 26662997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 267bb13ffebSMel Gorman */ 268c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 269c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 270edc2ca61SVlastimil Babka bool migrate_scanner) 271bb13ffebSMel Gorman { 272c89511abSMel Gorman struct zone *zone = cc->zone; 27335979ef3SDavid Rientjes unsigned long pfn; 2746815bf3fSJoonsoo Kim 2756815bf3fSJoonsoo Kim if (cc->ignore_skip_hint) 2766815bf3fSJoonsoo Kim return; 2776815bf3fSJoonsoo Kim 278bb13ffebSMel Gorman if (!page) 279bb13ffebSMel Gorman return; 280bb13ffebSMel Gorman 28135979ef3SDavid Rientjes if (nr_isolated) 28235979ef3SDavid Rientjes return; 28335979ef3SDavid Rientjes 284bb13ffebSMel Gorman set_pageblock_skip(page); 285c89511abSMel Gorman 28635979ef3SDavid Rientjes pfn = page_to_pfn(page); 28735979ef3SDavid Rientjes 28835979ef3SDavid Rientjes /* Update where async and sync compaction should restart */ 289c89511abSMel Gorman if (migrate_scanner) { 29035979ef3SDavid Rientjes if (pfn > zone->compact_cached_migrate_pfn[0]) 29135979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = pfn; 292e0b9daebSDavid Rientjes if (cc->mode != MIGRATE_ASYNC && 293e0b9daebSDavid Rientjes pfn > zone->compact_cached_migrate_pfn[1]) 29435979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = pfn; 295c89511abSMel Gorman } else { 29635979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 297c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 298c89511abSMel Gorman } 299c89511abSMel Gorman } 300bb13ffebSMel Gorman #else 301bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 302bb13ffebSMel Gorman struct page *page) 303bb13ffebSMel Gorman { 304bb13ffebSMel Gorman return true; 305bb13ffebSMel Gorman } 306bb13ffebSMel Gorman 307c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 308c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 309edc2ca61SVlastimil Babka bool migrate_scanner) 310bb13ffebSMel Gorman { 311bb13ffebSMel Gorman } 312bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 313bb13ffebSMel Gorman 3141f9efdefSVlastimil Babka /* 3158b44d279SVlastimil Babka * Compaction requires the taking of some coarse locks that are potentially 3168b44d279SVlastimil Babka * very heavily contended. For async compaction, back out if the lock cannot 3178b44d279SVlastimil Babka * be taken immediately. For sync compaction, spin on the lock if needed. 3188b44d279SVlastimil Babka * 3198b44d279SVlastimil Babka * Returns true if the lock is held 3208b44d279SVlastimil Babka * Returns false if the lock is not held and compaction should abort 3211f9efdefSVlastimil Babka */ 3228b44d279SVlastimil Babka static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, 3238b44d279SVlastimil Babka struct compact_control *cc) 3248b44d279SVlastimil Babka { 3258b44d279SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 3268b44d279SVlastimil Babka if (!spin_trylock_irqsave(lock, *flags)) { 3278b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_LOCK; 3288b44d279SVlastimil Babka return false; 3298b44d279SVlastimil Babka } 3308b44d279SVlastimil Babka } else { 3318b44d279SVlastimil Babka spin_lock_irqsave(lock, *flags); 3328b44d279SVlastimil Babka } 3331f9efdefSVlastimil Babka 3348b44d279SVlastimil Babka return true; 3352a1402aaSMel Gorman } 3362a1402aaSMel Gorman 33785aa125fSMichal Nazarewicz /* 338c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 3398b44d279SVlastimil Babka * very heavily contended. The lock should be periodically unlocked to avoid 3408b44d279SVlastimil Babka * having disabled IRQs for a long time, even when there is nobody waiting on 3418b44d279SVlastimil Babka * the lock. It might also be that allowing the IRQs will result in 3428b44d279SVlastimil Babka * need_resched() becoming true. If scheduling is needed, async compaction 3438b44d279SVlastimil Babka * aborts. Sync compaction schedules. 3448b44d279SVlastimil Babka * Either compaction type will also abort if a fatal signal is pending. 3458b44d279SVlastimil Babka * In either case if the lock was locked, it is dropped and not regained. 346c67fe375SMel Gorman * 3478b44d279SVlastimil Babka * Returns true if compaction should abort due to fatal signal pending, or 3488b44d279SVlastimil Babka * async compaction due to need_resched() 3498b44d279SVlastimil Babka * Returns false when compaction can continue (sync compaction might have 3508b44d279SVlastimil Babka * scheduled) 351c67fe375SMel Gorman */ 3528b44d279SVlastimil Babka static bool compact_unlock_should_abort(spinlock_t *lock, 3538b44d279SVlastimil Babka unsigned long flags, bool *locked, struct compact_control *cc) 354c67fe375SMel Gorman { 3558b44d279SVlastimil Babka if (*locked) { 3568b44d279SVlastimil Babka spin_unlock_irqrestore(lock, flags); 3578b44d279SVlastimil Babka *locked = false; 358c67fe375SMel Gorman } 359c67fe375SMel Gorman 3608b44d279SVlastimil Babka if (fatal_signal_pending(current)) { 3618b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 3628b44d279SVlastimil Babka return true; 3638b44d279SVlastimil Babka } 3648b44d279SVlastimil Babka 3658b44d279SVlastimil Babka if (need_resched()) { 366e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) { 3678b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 3688b44d279SVlastimil Babka return true; 369c67fe375SMel Gorman } 370c67fe375SMel Gorman cond_resched(); 371c67fe375SMel Gorman } 372c67fe375SMel Gorman 3738b44d279SVlastimil Babka return false; 374c67fe375SMel Gorman } 375c67fe375SMel Gorman 376be976572SVlastimil Babka /* 377be976572SVlastimil Babka * Aside from avoiding lock contention, compaction also periodically checks 378be976572SVlastimil Babka * need_resched() and either schedules in sync compaction or aborts async 3798b44d279SVlastimil Babka * compaction. This is similar to what compact_unlock_should_abort() does, but 380be976572SVlastimil Babka * is used where no lock is concerned. 381be976572SVlastimil Babka * 382be976572SVlastimil Babka * Returns false when no scheduling was needed, or sync compaction scheduled. 383be976572SVlastimil Babka * Returns true when async compaction should abort. 384be976572SVlastimil Babka */ 385be976572SVlastimil Babka static inline bool compact_should_abort(struct compact_control *cc) 386be976572SVlastimil Babka { 387be976572SVlastimil Babka /* async compaction aborts if contended */ 388be976572SVlastimil Babka if (need_resched()) { 389be976572SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 3901f9efdefSVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 391be976572SVlastimil Babka return true; 392be976572SVlastimil Babka } 393be976572SVlastimil Babka 394be976572SVlastimil Babka cond_resched(); 395be976572SVlastimil Babka } 396be976572SVlastimil Babka 397be976572SVlastimil Babka return false; 398be976572SVlastimil Babka } 399be976572SVlastimil Babka 400c67fe375SMel Gorman /* 4019e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 4029e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 4039e4be470SJerome Marchand * (even though it may still end up isolating some pages). 40485aa125fSMichal Nazarewicz */ 405f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 406e14c720eSVlastimil Babka unsigned long *start_pfn, 40785aa125fSMichal Nazarewicz unsigned long end_pfn, 40885aa125fSMichal Nazarewicz struct list_head *freelist, 40985aa125fSMichal Nazarewicz bool strict) 410748446bbSMel Gorman { 411b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 412bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 413b8b2d825SXiubo Li unsigned long flags = 0; 414f40d1e42SMel Gorman bool locked = false; 415e14c720eSVlastimil Babka unsigned long blockpfn = *start_pfn; 416748446bbSMel Gorman 417748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 418748446bbSMel Gorman 419f40d1e42SMel Gorman /* Isolate free pages. */ 420748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 421748446bbSMel Gorman int isolated, i; 422748446bbSMel Gorman struct page *page = cursor; 423748446bbSMel Gorman 4248b44d279SVlastimil Babka /* 4258b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 4268b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort if fatal signal 4278b44d279SVlastimil Babka * pending or async compaction detects need_resched() 4288b44d279SVlastimil Babka */ 4298b44d279SVlastimil Babka if (!(blockpfn % SWAP_CLUSTER_MAX) 4308b44d279SVlastimil Babka && compact_unlock_should_abort(&cc->zone->lock, flags, 4318b44d279SVlastimil Babka &locked, cc)) 4328b44d279SVlastimil Babka break; 4338b44d279SVlastimil Babka 434b7aba698SMel Gorman nr_scanned++; 435f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 4362af120bcSLaura Abbott goto isolate_fail; 4372af120bcSLaura Abbott 438bb13ffebSMel Gorman if (!valid_page) 439bb13ffebSMel Gorman valid_page = page; 440f40d1e42SMel Gorman if (!PageBuddy(page)) 4412af120bcSLaura Abbott goto isolate_fail; 442f40d1e42SMel Gorman 443f40d1e42SMel Gorman /* 44469b7189fSVlastimil Babka * If we already hold the lock, we can skip some rechecking. 44569b7189fSVlastimil Babka * Note that if we hold the lock now, checked_pageblock was 44669b7189fSVlastimil Babka * already set in some previous iteration (or strict is true), 44769b7189fSVlastimil Babka * so it is correct to skip the suitable migration target 44869b7189fSVlastimil Babka * recheck as well. 44969b7189fSVlastimil Babka */ 45069b7189fSVlastimil Babka if (!locked) { 45169b7189fSVlastimil Babka /* 452f40d1e42SMel Gorman * The zone lock must be held to isolate freepages. 453f40d1e42SMel Gorman * Unfortunately this is a very coarse lock and can be 454f40d1e42SMel Gorman * heavily contended if there are parallel allocations 455f40d1e42SMel Gorman * or parallel compactions. For async compaction do not 456f40d1e42SMel Gorman * spin on the lock and we acquire the lock as late as 457f40d1e42SMel Gorman * possible. 458f40d1e42SMel Gorman */ 4598b44d279SVlastimil Babka locked = compact_trylock_irqsave(&cc->zone->lock, 4608b44d279SVlastimil Babka &flags, cc); 461f40d1e42SMel Gorman if (!locked) 462f40d1e42SMel Gorman break; 463f40d1e42SMel Gorman 464f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 465f40d1e42SMel Gorman if (!PageBuddy(page)) 4662af120bcSLaura Abbott goto isolate_fail; 46769b7189fSVlastimil Babka } 468748446bbSMel Gorman 469748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 470748446bbSMel Gorman isolated = split_free_page(page); 471748446bbSMel Gorman total_isolated += isolated; 472748446bbSMel Gorman for (i = 0; i < isolated; i++) { 473748446bbSMel Gorman list_add(&page->lru, freelist); 474748446bbSMel Gorman page++; 475748446bbSMel Gorman } 476748446bbSMel Gorman 477748446bbSMel Gorman /* If a page was split, advance to the end of it */ 478748446bbSMel Gorman if (isolated) { 479932ff6bbSJoonsoo Kim cc->nr_freepages += isolated; 480932ff6bbSJoonsoo Kim if (!strict && 481932ff6bbSJoonsoo Kim cc->nr_migratepages <= cc->nr_freepages) { 482932ff6bbSJoonsoo Kim blockpfn += isolated; 483932ff6bbSJoonsoo Kim break; 484932ff6bbSJoonsoo Kim } 485932ff6bbSJoonsoo Kim 486748446bbSMel Gorman blockpfn += isolated - 1; 487748446bbSMel Gorman cursor += isolated - 1; 4882af120bcSLaura Abbott continue; 489748446bbSMel Gorman } 4902af120bcSLaura Abbott 4912af120bcSLaura Abbott isolate_fail: 4922af120bcSLaura Abbott if (strict) 4932af120bcSLaura Abbott break; 4942af120bcSLaura Abbott else 4952af120bcSLaura Abbott continue; 4962af120bcSLaura Abbott 497748446bbSMel Gorman } 498748446bbSMel Gorman 499e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 500e34d85f0SJoonsoo Kim nr_scanned, total_isolated); 501e34d85f0SJoonsoo Kim 502e14c720eSVlastimil Babka /* Record how far we have got within the block */ 503e14c720eSVlastimil Babka *start_pfn = blockpfn; 504e14c720eSVlastimil Babka 505f40d1e42SMel Gorman /* 506f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 507f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 508f40d1e42SMel Gorman * returned and CMA will fail. 509f40d1e42SMel Gorman */ 5102af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 511f40d1e42SMel Gorman total_isolated = 0; 512f40d1e42SMel Gorman 513f40d1e42SMel Gorman if (locked) 514f40d1e42SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 515f40d1e42SMel Gorman 516bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 517bb13ffebSMel Gorman if (blockpfn == end_pfn) 518edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, total_isolated, false); 519bb13ffebSMel Gorman 520010fc29aSMinchan Kim count_compact_events(COMPACTFREE_SCANNED, nr_scanned); 521397487dbSMel Gorman if (total_isolated) 522010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 523748446bbSMel Gorman return total_isolated; 524748446bbSMel Gorman } 525748446bbSMel Gorman 52685aa125fSMichal Nazarewicz /** 52785aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 52885aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 52985aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 53085aa125fSMichal Nazarewicz * 53185aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 53285aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 53385aa125fSMichal Nazarewicz * undo its actions and return zero. 53485aa125fSMichal Nazarewicz * 53585aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 53685aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 53785aa125fSMichal Nazarewicz * a free page). 53885aa125fSMichal Nazarewicz */ 539ff9543fdSMichal Nazarewicz unsigned long 540bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 541bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 54285aa125fSMichal Nazarewicz { 543f40d1e42SMel Gorman unsigned long isolated, pfn, block_end_pfn; 54485aa125fSMichal Nazarewicz LIST_HEAD(freelist); 54585aa125fSMichal Nazarewicz 5467d49d886SVlastimil Babka pfn = start_pfn; 54785aa125fSMichal Nazarewicz block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 5487d49d886SVlastimil Babka 5497d49d886SVlastimil Babka for (; pfn < end_pfn; pfn += isolated, 5507d49d886SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 551e14c720eSVlastimil Babka /* Protect pfn from changing by isolate_freepages_block */ 552e14c720eSVlastimil Babka unsigned long isolate_start_pfn = pfn; 5537d49d886SVlastimil Babka 55485aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 55585aa125fSMichal Nazarewicz 55658420016SJoonsoo Kim /* 55758420016SJoonsoo Kim * pfn could pass the block_end_pfn if isolated freepage 55858420016SJoonsoo Kim * is more than pageblock order. In this case, we adjust 55958420016SJoonsoo Kim * scanning range to right one. 56058420016SJoonsoo Kim */ 56158420016SJoonsoo Kim if (pfn >= block_end_pfn) { 56258420016SJoonsoo Kim block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 56358420016SJoonsoo Kim block_end_pfn = min(block_end_pfn, end_pfn); 56458420016SJoonsoo Kim } 56558420016SJoonsoo Kim 5667d49d886SVlastimil Babka if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 5677d49d886SVlastimil Babka break; 5687d49d886SVlastimil Babka 569e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 570e14c720eSVlastimil Babka block_end_pfn, &freelist, true); 57185aa125fSMichal Nazarewicz 57285aa125fSMichal Nazarewicz /* 57385aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 57485aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 57585aa125fSMichal Nazarewicz * non-free pages). 57685aa125fSMichal Nazarewicz */ 57785aa125fSMichal Nazarewicz if (!isolated) 57885aa125fSMichal Nazarewicz break; 57985aa125fSMichal Nazarewicz 58085aa125fSMichal Nazarewicz /* 58185aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 58285aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 58385aa125fSMichal Nazarewicz * page may span two pageblocks). 58485aa125fSMichal Nazarewicz */ 58585aa125fSMichal Nazarewicz } 58685aa125fSMichal Nazarewicz 58785aa125fSMichal Nazarewicz /* split_free_page does not map the pages */ 58885aa125fSMichal Nazarewicz map_pages(&freelist); 58985aa125fSMichal Nazarewicz 59085aa125fSMichal Nazarewicz if (pfn < end_pfn) { 59185aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 59285aa125fSMichal Nazarewicz release_freepages(&freelist); 59385aa125fSMichal Nazarewicz return 0; 59485aa125fSMichal Nazarewicz } 59585aa125fSMichal Nazarewicz 59685aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 59785aa125fSMichal Nazarewicz return pfn; 59885aa125fSMichal Nazarewicz } 59985aa125fSMichal Nazarewicz 600748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 601edc2ca61SVlastimil Babka static void acct_isolated(struct zone *zone, struct compact_control *cc) 602748446bbSMel Gorman { 603748446bbSMel Gorman struct page *page; 604b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 605748446bbSMel Gorman 606edc2ca61SVlastimil Babka if (list_empty(&cc->migratepages)) 607edc2ca61SVlastimil Babka return; 608edc2ca61SVlastimil Babka 609b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 610b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 611748446bbSMel Gorman 612c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 613c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 614c67fe375SMel Gorman } 615748446bbSMel Gorman 616748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 617748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 618748446bbSMel Gorman { 619bc693045SMinchan Kim unsigned long active, inactive, isolated; 620748446bbSMel Gorman 621748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 622748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 623bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 624bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 625748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 626748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 627748446bbSMel Gorman 628bc693045SMinchan Kim return isolated > (inactive + active) / 2; 629748446bbSMel Gorman } 630748446bbSMel Gorman 6312fe86e00SMichal Nazarewicz /** 632edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 633edc2ca61SVlastimil Babka * a single pageblock 6342fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 635edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 636edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 637edc2ca61SVlastimil Babka * @isolate_mode: Isolation mode to be used. 6382fe86e00SMichal Nazarewicz * 6392fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 640edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 641edc2ca61SVlastimil Babka * Returns zero if there is a fatal signal pending, otherwise PFN of the 642edc2ca61SVlastimil Babka * first page that was not scanned (which may be both less, equal to or more 643edc2ca61SVlastimil Babka * than end_pfn). 6442fe86e00SMichal Nazarewicz * 645edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 646edc2ca61SVlastimil Babka * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field 647edc2ca61SVlastimil Babka * is neither read nor updated. 648748446bbSMel Gorman */ 649edc2ca61SVlastimil Babka static unsigned long 650edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 651edc2ca61SVlastimil Babka unsigned long end_pfn, isolate_mode_t isolate_mode) 652748446bbSMel Gorman { 653edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 654b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 655748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 656fa9add64SHugh Dickins struct lruvec *lruvec; 657b8b2d825SXiubo Li unsigned long flags = 0; 6582a1402aaSMel Gorman bool locked = false; 659bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 660e34d85f0SJoonsoo Kim unsigned long start_pfn = low_pfn; 661748446bbSMel Gorman 662748446bbSMel Gorman /* 663748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 664748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 665748446bbSMel Gorman * delay for some time until fewer pages are isolated 666748446bbSMel Gorman */ 667748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 668f9e35b3bSMel Gorman /* async migration should just abort */ 669e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 6702fe86e00SMichal Nazarewicz return 0; 671f9e35b3bSMel Gorman 672748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 673748446bbSMel Gorman 674748446bbSMel Gorman if (fatal_signal_pending(current)) 6752fe86e00SMichal Nazarewicz return 0; 676748446bbSMel Gorman } 677748446bbSMel Gorman 678be976572SVlastimil Babka if (compact_should_abort(cc)) 679aeef4b83SDavid Rientjes return 0; 680aeef4b83SDavid Rientjes 681748446bbSMel Gorman /* Time to isolate some pages for migration */ 682748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 683*29c0dde8SVlastimil Babka bool is_lru; 684*29c0dde8SVlastimil Babka 6858b44d279SVlastimil Babka /* 6868b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 6878b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort async compaction 6888b44d279SVlastimil Babka * if contended. 6898b44d279SVlastimil Babka */ 6908b44d279SVlastimil Babka if (!(low_pfn % SWAP_CLUSTER_MAX) 6918b44d279SVlastimil Babka && compact_unlock_should_abort(&zone->lru_lock, flags, 6928b44d279SVlastimil Babka &locked, cc)) 6938b44d279SVlastimil Babka break; 694b2eef8c0SAndrea Arcangeli 695748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 696748446bbSMel Gorman continue; 697b7aba698SMel Gorman nr_scanned++; 698748446bbSMel Gorman 699748446bbSMel Gorman page = pfn_to_page(low_pfn); 700dc908600SMel Gorman 701bb13ffebSMel Gorman if (!valid_page) 702bb13ffebSMel Gorman valid_page = page; 703bb13ffebSMel Gorman 704c122b208SJoonsoo Kim /* 70599c0fd5eSVlastimil Babka * Skip if free. We read page order here without zone lock 70699c0fd5eSVlastimil Babka * which is generally unsafe, but the race window is small and 70799c0fd5eSVlastimil Babka * the worst thing that can happen is that we skip some 70899c0fd5eSVlastimil Babka * potential isolation targets. 7096c14466cSMel Gorman */ 71099c0fd5eSVlastimil Babka if (PageBuddy(page)) { 71199c0fd5eSVlastimil Babka unsigned long freepage_order = page_order_unsafe(page); 71299c0fd5eSVlastimil Babka 71399c0fd5eSVlastimil Babka /* 71499c0fd5eSVlastimil Babka * Without lock, we cannot be sure that what we got is 71599c0fd5eSVlastimil Babka * a valid page order. Consider only values in the 71699c0fd5eSVlastimil Babka * valid order range to prevent low_pfn overflow. 71799c0fd5eSVlastimil Babka */ 71899c0fd5eSVlastimil Babka if (freepage_order > 0 && freepage_order < MAX_ORDER) 71999c0fd5eSVlastimil Babka low_pfn += (1UL << freepage_order) - 1; 720748446bbSMel Gorman continue; 72199c0fd5eSVlastimil Babka } 722748446bbSMel Gorman 7239927af74SMel Gorman /* 724bf6bddf1SRafael Aquini * Check may be lockless but that's ok as we recheck later. 725bf6bddf1SRafael Aquini * It's possible to migrate LRU pages and balloon pages 726bf6bddf1SRafael Aquini * Skip any other type of page 727bf6bddf1SRafael Aquini */ 728*29c0dde8SVlastimil Babka is_lru = PageLRU(page); 729*29c0dde8SVlastimil Babka if (!is_lru) { 730bf6bddf1SRafael Aquini if (unlikely(balloon_page_movable(page))) { 731d6d86c0aSKonstantin Khlebnikov if (balloon_page_isolate(page)) { 732bf6bddf1SRafael Aquini /* Successfully isolated */ 733b6c75016SJoonsoo Kim goto isolate_success; 734bf6bddf1SRafael Aquini } 735bf6bddf1SRafael Aquini } 736bf6bddf1SRafael Aquini } 737bc835011SAndrea Arcangeli 738bc835011SAndrea Arcangeli /* 739*29c0dde8SVlastimil Babka * Regardless of being on LRU, compound pages such as THP and 740*29c0dde8SVlastimil Babka * hugetlbfs are not to be compacted. We can potentially save 741*29c0dde8SVlastimil Babka * a lot of iterations if we skip them at once. The check is 742*29c0dde8SVlastimil Babka * racy, but we can consider only valid values and the only 743*29c0dde8SVlastimil Babka * danger is skipping too much. 744bc835011SAndrea Arcangeli */ 745*29c0dde8SVlastimil Babka if (PageCompound(page)) { 746*29c0dde8SVlastimil Babka unsigned int comp_order = compound_order(page); 747*29c0dde8SVlastimil Babka 748*29c0dde8SVlastimil Babka if (likely(comp_order < MAX_ORDER)) 749*29c0dde8SVlastimil Babka low_pfn += (1UL << comp_order) - 1; 750edc2ca61SVlastimil Babka 7512a1402aaSMel Gorman continue; 7522a1402aaSMel Gorman } 7532a1402aaSMel Gorman 754*29c0dde8SVlastimil Babka if (!is_lru) 755*29c0dde8SVlastimil Babka continue; 756*29c0dde8SVlastimil Babka 757119d6d59SDavid Rientjes /* 758119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 759119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 760119d6d59SDavid Rientjes * admittedly racy check. 761119d6d59SDavid Rientjes */ 762119d6d59SDavid Rientjes if (!page_mapping(page) && 763119d6d59SDavid Rientjes page_count(page) > page_mapcount(page)) 764119d6d59SDavid Rientjes continue; 765119d6d59SDavid Rientjes 76669b7189fSVlastimil Babka /* If we already hold the lock, we can skip some rechecking */ 76769b7189fSVlastimil Babka if (!locked) { 7688b44d279SVlastimil Babka locked = compact_trylock_irqsave(&zone->lru_lock, 7698b44d279SVlastimil Babka &flags, cc); 7708b44d279SVlastimil Babka if (!locked) 7712a1402aaSMel Gorman break; 7722a1402aaSMel Gorman 773*29c0dde8SVlastimil Babka /* Recheck PageLRU and PageCompound under lock */ 7742a1402aaSMel Gorman if (!PageLRU(page)) 7752a1402aaSMel Gorman continue; 776*29c0dde8SVlastimil Babka 777*29c0dde8SVlastimil Babka /* 778*29c0dde8SVlastimil Babka * Page become compound since the non-locked check, 779*29c0dde8SVlastimil Babka * and it's on LRU. It can only be a THP so the order 780*29c0dde8SVlastimil Babka * is safe to read and it's 0 for tail pages. 781*29c0dde8SVlastimil Babka */ 782*29c0dde8SVlastimil Babka if (unlikely(PageCompound(page))) { 783*29c0dde8SVlastimil Babka low_pfn += (1UL << compound_order(page)) - 1; 784bc835011SAndrea Arcangeli continue; 785bc835011SAndrea Arcangeli } 78669b7189fSVlastimil Babka } 787bc835011SAndrea Arcangeli 788fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 789fa9add64SHugh Dickins 790748446bbSMel Gorman /* Try isolate the page */ 791edc2ca61SVlastimil Babka if (__isolate_lru_page(page, isolate_mode) != 0) 792748446bbSMel Gorman continue; 793748446bbSMel Gorman 794*29c0dde8SVlastimil Babka VM_BUG_ON_PAGE(PageCompound(page), page); 795bc835011SAndrea Arcangeli 796748446bbSMel Gorman /* Successfully isolated */ 797fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 798b6c75016SJoonsoo Kim 799b6c75016SJoonsoo Kim isolate_success: 800748446bbSMel Gorman list_add(&page->lru, migratelist); 801748446bbSMel Gorman cc->nr_migratepages++; 802b7aba698SMel Gorman nr_isolated++; 803748446bbSMel Gorman 804748446bbSMel Gorman /* Avoid isolating too much */ 80531b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 80631b8384aSHillf Danton ++low_pfn; 807748446bbSMel Gorman break; 808748446bbSMel Gorman } 80931b8384aSHillf Danton } 810748446bbSMel Gorman 81199c0fd5eSVlastimil Babka /* 81299c0fd5eSVlastimil Babka * The PageBuddy() check could have potentially brought us outside 81399c0fd5eSVlastimil Babka * the range to be scanned. 81499c0fd5eSVlastimil Babka */ 81599c0fd5eSVlastimil Babka if (unlikely(low_pfn > end_pfn)) 81699c0fd5eSVlastimil Babka low_pfn = end_pfn; 81799c0fd5eSVlastimil Babka 818c67fe375SMel Gorman if (locked) 819c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 820748446bbSMel Gorman 82150b5b094SVlastimil Babka /* 82250b5b094SVlastimil Babka * Update the pageblock-skip information and cached scanner pfn, 82350b5b094SVlastimil Babka * if the whole pageblock was scanned without isolating any page. 82450b5b094SVlastimil Babka */ 82535979ef3SDavid Rientjes if (low_pfn == end_pfn) 826edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, nr_isolated, true); 827bb13ffebSMel Gorman 828e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 829e34d85f0SJoonsoo Kim nr_scanned, nr_isolated); 830b7aba698SMel Gorman 831010fc29aSMinchan Kim count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned); 832397487dbSMel Gorman if (nr_isolated) 833010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 834397487dbSMel Gorman 8352fe86e00SMichal Nazarewicz return low_pfn; 8362fe86e00SMichal Nazarewicz } 8372fe86e00SMichal Nazarewicz 838edc2ca61SVlastimil Babka /** 839edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 840edc2ca61SVlastimil Babka * @cc: Compaction control structure. 841edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 842edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 843edc2ca61SVlastimil Babka * 844edc2ca61SVlastimil Babka * Returns zero if isolation fails fatally due to e.g. pending signal. 845edc2ca61SVlastimil Babka * Otherwise, function returns one-past-the-last PFN of isolated page 846edc2ca61SVlastimil Babka * (which may be greater than end_pfn if end fell in a middle of a THP page). 847edc2ca61SVlastimil Babka */ 848edc2ca61SVlastimil Babka unsigned long 849edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 850edc2ca61SVlastimil Babka unsigned long end_pfn) 851edc2ca61SVlastimil Babka { 852edc2ca61SVlastimil Babka unsigned long pfn, block_end_pfn; 853edc2ca61SVlastimil Babka 854edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 855edc2ca61SVlastimil Babka pfn = start_pfn; 856edc2ca61SVlastimil Babka block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 857edc2ca61SVlastimil Babka 858edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 859edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 860edc2ca61SVlastimil Babka 861edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 862edc2ca61SVlastimil Babka 8637d49d886SVlastimil Babka if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 864edc2ca61SVlastimil Babka continue; 865edc2ca61SVlastimil Babka 866edc2ca61SVlastimil Babka pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 867edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 868edc2ca61SVlastimil Babka 869edc2ca61SVlastimil Babka /* 870edc2ca61SVlastimil Babka * In case of fatal failure, release everything that might 871edc2ca61SVlastimil Babka * have been isolated in the previous iteration, and signal 872edc2ca61SVlastimil Babka * the failure back to caller. 873edc2ca61SVlastimil Babka */ 874edc2ca61SVlastimil Babka if (!pfn) { 875edc2ca61SVlastimil Babka putback_movable_pages(&cc->migratepages); 876edc2ca61SVlastimil Babka cc->nr_migratepages = 0; 877edc2ca61SVlastimil Babka break; 878edc2ca61SVlastimil Babka } 8796ea41c0cSJoonsoo Kim 8806ea41c0cSJoonsoo Kim if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) 8816ea41c0cSJoonsoo Kim break; 882edc2ca61SVlastimil Babka } 883edc2ca61SVlastimil Babka acct_isolated(cc->zone, cc); 884edc2ca61SVlastimil Babka 885edc2ca61SVlastimil Babka return pfn; 886edc2ca61SVlastimil Babka } 887edc2ca61SVlastimil Babka 888ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 889ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 890018e9a49SAndrew Morton 891018e9a49SAndrew Morton /* Returns true if the page is within a block suitable for migration to */ 892018e9a49SAndrew Morton static bool suitable_migration_target(struct page *page) 893018e9a49SAndrew Morton { 894018e9a49SAndrew Morton /* If the page is a large free page, then disallow migration */ 895018e9a49SAndrew Morton if (PageBuddy(page)) { 896018e9a49SAndrew Morton /* 897018e9a49SAndrew Morton * We are checking page_order without zone->lock taken. But 898018e9a49SAndrew Morton * the only small danger is that we skip a potentially suitable 899018e9a49SAndrew Morton * pageblock, so it's not worth to check order for valid range. 900018e9a49SAndrew Morton */ 901018e9a49SAndrew Morton if (page_order_unsafe(page) >= pageblock_order) 902018e9a49SAndrew Morton return false; 903018e9a49SAndrew Morton } 904018e9a49SAndrew Morton 905018e9a49SAndrew Morton /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 906018e9a49SAndrew Morton if (migrate_async_suitable(get_pageblock_migratetype(page))) 907018e9a49SAndrew Morton return true; 908018e9a49SAndrew Morton 909018e9a49SAndrew Morton /* Otherwise skip the block */ 910018e9a49SAndrew Morton return false; 911018e9a49SAndrew Morton } 912018e9a49SAndrew Morton 913ff9543fdSMichal Nazarewicz /* 914f2849aa0SVlastimil Babka * Test whether the free scanner has reached the same or lower pageblock than 915f2849aa0SVlastimil Babka * the migration scanner, and compaction should thus terminate. 916f2849aa0SVlastimil Babka */ 917f2849aa0SVlastimil Babka static inline bool compact_scanners_met(struct compact_control *cc) 918f2849aa0SVlastimil Babka { 919f2849aa0SVlastimil Babka return (cc->free_pfn >> pageblock_order) 920f2849aa0SVlastimil Babka <= (cc->migrate_pfn >> pageblock_order); 921f2849aa0SVlastimil Babka } 922f2849aa0SVlastimil Babka 923f2849aa0SVlastimil Babka /* 924ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 925ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 926ff9543fdSMichal Nazarewicz */ 927edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 928ff9543fdSMichal Nazarewicz { 929edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 930ff9543fdSMichal Nazarewicz struct page *page; 931c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 932e14c720eSVlastimil Babka unsigned long isolate_start_pfn; /* exact pfn we start at */ 933c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 934c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 935ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 9362fe86e00SMichal Nazarewicz 937ff9543fdSMichal Nazarewicz /* 938ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 93949e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 940e14c720eSVlastimil Babka * zone when isolating for the first time. For looping we also need 941e14c720eSVlastimil Babka * this pfn aligned down to the pageblock boundary, because we do 942c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 943c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 944c96b9e50SVlastimil Babka * a zone which ends in the middle of a pageblock. 94549e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 94649e068f0SVlastimil Babka * is using. 947ff9543fdSMichal Nazarewicz */ 948e14c720eSVlastimil Babka isolate_start_pfn = cc->free_pfn; 949c96b9e50SVlastimil Babka block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1); 950c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 951c96b9e50SVlastimil Babka zone_end_pfn(zone)); 9527ed695e0SVlastimil Babka low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); 9532fe86e00SMichal Nazarewicz 954ff9543fdSMichal Nazarewicz /* 955ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 956ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 957ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 958ff9543fdSMichal Nazarewicz */ 959f5f61a32SVlastimil Babka for (; block_start_pfn >= low_pfn; 960c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 961e14c720eSVlastimil Babka block_start_pfn -= pageblock_nr_pages, 962e14c720eSVlastimil Babka isolate_start_pfn = block_start_pfn) { 963ff9543fdSMichal Nazarewicz 964f6ea3adbSDavid Rientjes /* 965f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 966f6ea3adbSDavid Rientjes * suitable migration targets, so periodically check if we need 967be976572SVlastimil Babka * to schedule, or even abort async compaction. 968f6ea3adbSDavid Rientjes */ 969be976572SVlastimil Babka if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 970be976572SVlastimil Babka && compact_should_abort(cc)) 971be976572SVlastimil Babka break; 972f6ea3adbSDavid Rientjes 9737d49d886SVlastimil Babka page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 9747d49d886SVlastimil Babka zone); 9757d49d886SVlastimil Babka if (!page) 976ff9543fdSMichal Nazarewicz continue; 977ff9543fdSMichal Nazarewicz 978ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 97968e3e926SLinus Torvalds if (!suitable_migration_target(page)) 980ff9543fdSMichal Nazarewicz continue; 98168e3e926SLinus Torvalds 982bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 983bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 984bb13ffebSMel Gorman continue; 985bb13ffebSMel Gorman 986e14c720eSVlastimil Babka /* Found a block suitable for isolating free pages from. */ 987932ff6bbSJoonsoo Kim isolate_freepages_block(cc, &isolate_start_pfn, 988c96b9e50SVlastimil Babka block_end_pfn, freelist, false); 989ff9543fdSMichal Nazarewicz 990ff9543fdSMichal Nazarewicz /* 991f5f61a32SVlastimil Babka * If we isolated enough freepages, or aborted due to async 992f5f61a32SVlastimil Babka * compaction being contended, terminate the loop. 993e14c720eSVlastimil Babka * Remember where the free scanner should restart next time, 994e14c720eSVlastimil Babka * which is where isolate_freepages_block() left off. 995e14c720eSVlastimil Babka * But if it scanned the whole pageblock, isolate_start_pfn 996e14c720eSVlastimil Babka * now points at block_end_pfn, which is the start of the next 997e14c720eSVlastimil Babka * pageblock. 998e14c720eSVlastimil Babka * In that case we will however want to restart at the start 999e14c720eSVlastimil Babka * of the previous pageblock. 1000e14c720eSVlastimil Babka */ 1001f5f61a32SVlastimil Babka if ((cc->nr_freepages >= cc->nr_migratepages) 1002f5f61a32SVlastimil Babka || cc->contended) { 1003f5f61a32SVlastimil Babka if (isolate_start_pfn >= block_end_pfn) 1004f5f61a32SVlastimil Babka isolate_start_pfn = 1005e14c720eSVlastimil Babka block_start_pfn - pageblock_nr_pages; 1006be976572SVlastimil Babka break; 1007f5f61a32SVlastimil Babka } else { 1008f5f61a32SVlastimil Babka /* 1009f5f61a32SVlastimil Babka * isolate_freepages_block() should not terminate 1010f5f61a32SVlastimil Babka * prematurely unless contended, or isolated enough 1011f5f61a32SVlastimil Babka */ 1012f5f61a32SVlastimil Babka VM_BUG_ON(isolate_start_pfn < block_end_pfn); 1013f5f61a32SVlastimil Babka } 1014c89511abSMel Gorman } 1015ff9543fdSMichal Nazarewicz 1016ff9543fdSMichal Nazarewicz /* split_free_page does not map the pages */ 1017ff9543fdSMichal Nazarewicz map_pages(freelist); 1018ff9543fdSMichal Nazarewicz 10197ed695e0SVlastimil Babka /* 1020f5f61a32SVlastimil Babka * Record where the free scanner will restart next time. Either we 1021f5f61a32SVlastimil Babka * broke from the loop and set isolate_start_pfn based on the last 1022f5f61a32SVlastimil Babka * call to isolate_freepages_block(), or we met the migration scanner 1023f5f61a32SVlastimil Babka * and the loop terminated due to isolate_start_pfn < low_pfn 10247ed695e0SVlastimil Babka */ 1025f5f61a32SVlastimil Babka cc->free_pfn = isolate_start_pfn; 1026748446bbSMel Gorman } 1027748446bbSMel Gorman 1028748446bbSMel Gorman /* 1029748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 1030748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 1031748446bbSMel Gorman */ 1032748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 1033748446bbSMel Gorman unsigned long data, 1034748446bbSMel Gorman int **result) 1035748446bbSMel Gorman { 1036748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 1037748446bbSMel Gorman struct page *freepage; 1038748446bbSMel Gorman 1039be976572SVlastimil Babka /* 1040be976572SVlastimil Babka * Isolate free pages if necessary, and if we are not aborting due to 1041be976572SVlastimil Babka * contention. 1042be976572SVlastimil Babka */ 1043748446bbSMel Gorman if (list_empty(&cc->freepages)) { 1044be976572SVlastimil Babka if (!cc->contended) 1045edc2ca61SVlastimil Babka isolate_freepages(cc); 1046748446bbSMel Gorman 1047748446bbSMel Gorman if (list_empty(&cc->freepages)) 1048748446bbSMel Gorman return NULL; 1049748446bbSMel Gorman } 1050748446bbSMel Gorman 1051748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 1052748446bbSMel Gorman list_del(&freepage->lru); 1053748446bbSMel Gorman cc->nr_freepages--; 1054748446bbSMel Gorman 1055748446bbSMel Gorman return freepage; 1056748446bbSMel Gorman } 1057748446bbSMel Gorman 1058748446bbSMel Gorman /* 1059d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 1060d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 1061d53aea3dSDavid Rientjes * special handling needed for NUMA. 1062d53aea3dSDavid Rientjes */ 1063d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data) 1064d53aea3dSDavid Rientjes { 1065d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 1066d53aea3dSDavid Rientjes 1067d53aea3dSDavid Rientjes list_add(&page->lru, &cc->freepages); 1068d53aea3dSDavid Rientjes cc->nr_freepages++; 1069d53aea3dSDavid Rientjes } 1070d53aea3dSDavid Rientjes 1071ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 1072ff9543fdSMichal Nazarewicz typedef enum { 1073ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 1074ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 1075ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 1076ff9543fdSMichal Nazarewicz } isolate_migrate_t; 1077ff9543fdSMichal Nazarewicz 1078ff9543fdSMichal Nazarewicz /* 10795bbe3547SEric B Munson * Allow userspace to control policy on scanning the unevictable LRU for 10805bbe3547SEric B Munson * compactable pages. 10815bbe3547SEric B Munson */ 10825bbe3547SEric B Munson int sysctl_compact_unevictable_allowed __read_mostly = 1; 10835bbe3547SEric B Munson 10845bbe3547SEric B Munson /* 1085edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 1086edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 1087edc2ca61SVlastimil Babka * compact_control. 1088ff9543fdSMichal Nazarewicz */ 1089ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 1090ff9543fdSMichal Nazarewicz struct compact_control *cc) 1091ff9543fdSMichal Nazarewicz { 1092ff9543fdSMichal Nazarewicz unsigned long low_pfn, end_pfn; 1093edc2ca61SVlastimil Babka struct page *page; 1094edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 10955bbe3547SEric B Munson (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | 1096edc2ca61SVlastimil Babka (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); 1097ff9543fdSMichal Nazarewicz 1098edc2ca61SVlastimil Babka /* 1099edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 1100edc2ca61SVlastimil Babka * initialized by compact_zone() 1101edc2ca61SVlastimil Babka */ 1102edc2ca61SVlastimil Babka low_pfn = cc->migrate_pfn; 1103ff9543fdSMichal Nazarewicz 1104ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 1105a9aacbccSMel Gorman end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); 1106ff9543fdSMichal Nazarewicz 1107edc2ca61SVlastimil Babka /* 1108edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 1109edc2ca61SVlastimil Babka * Do not cross the free scanner. 1110edc2ca61SVlastimil Babka */ 1111edc2ca61SVlastimil Babka for (; end_pfn <= cc->free_pfn; 1112edc2ca61SVlastimil Babka low_pfn = end_pfn, end_pfn += pageblock_nr_pages) { 1113edc2ca61SVlastimil Babka 1114edc2ca61SVlastimil Babka /* 1115edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 1116edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 1117edc2ca61SVlastimil Babka * need to schedule, or even abort async compaction. 1118edc2ca61SVlastimil Babka */ 1119edc2ca61SVlastimil Babka if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 1120edc2ca61SVlastimil Babka && compact_should_abort(cc)) 1121edc2ca61SVlastimil Babka break; 1122edc2ca61SVlastimil Babka 11237d49d886SVlastimil Babka page = pageblock_pfn_to_page(low_pfn, end_pfn, zone); 11247d49d886SVlastimil Babka if (!page) 1125edc2ca61SVlastimil Babka continue; 1126edc2ca61SVlastimil Babka 1127edc2ca61SVlastimil Babka /* If isolation recently failed, do not retry */ 1128edc2ca61SVlastimil Babka if (!isolation_suitable(cc, page)) 1129edc2ca61SVlastimil Babka continue; 1130edc2ca61SVlastimil Babka 1131edc2ca61SVlastimil Babka /* 1132edc2ca61SVlastimil Babka * For async compaction, also only scan in MOVABLE blocks. 1133edc2ca61SVlastimil Babka * Async compaction is optimistic to see if the minimum amount 1134edc2ca61SVlastimil Babka * of work satisfies the allocation. 1135edc2ca61SVlastimil Babka */ 1136edc2ca61SVlastimil Babka if (cc->mode == MIGRATE_ASYNC && 1137edc2ca61SVlastimil Babka !migrate_async_suitable(get_pageblock_migratetype(page))) 1138edc2ca61SVlastimil Babka continue; 1139ff9543fdSMichal Nazarewicz 1140ff9543fdSMichal Nazarewicz /* Perform the isolation */ 1141edc2ca61SVlastimil Babka low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn, 1142edc2ca61SVlastimil Babka isolate_mode); 1143edc2ca61SVlastimil Babka 1144ff59909aSHugh Dickins if (!low_pfn || cc->contended) { 1145ff59909aSHugh Dickins acct_isolated(zone, cc); 1146ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 1147ff59909aSHugh Dickins } 1148ff9543fdSMichal Nazarewicz 1149edc2ca61SVlastimil Babka /* 1150edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 1151edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 1152edc2ca61SVlastimil Babka * continue or not. 1153edc2ca61SVlastimil Babka */ 1154edc2ca61SVlastimil Babka break; 1155edc2ca61SVlastimil Babka } 1156edc2ca61SVlastimil Babka 1157edc2ca61SVlastimil Babka acct_isolated(zone, cc); 1158f2849aa0SVlastimil Babka /* Record where migration scanner will be restarted. */ 1159f2849aa0SVlastimil Babka cc->migrate_pfn = low_pfn; 1160ff9543fdSMichal Nazarewicz 1161edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1162ff9543fdSMichal Nazarewicz } 1163ff9543fdSMichal Nazarewicz 1164837d026dSJoonsoo Kim static int __compact_finished(struct zone *zone, struct compact_control *cc, 11656d7ce559SDavid Rientjes const int migratetype) 1166748446bbSMel Gorman { 11678fb74b9fSMel Gorman unsigned int order; 11685a03b051SAndrea Arcangeli unsigned long watermark; 116956de7263SMel Gorman 1170be976572SVlastimil Babka if (cc->contended || fatal_signal_pending(current)) 1171748446bbSMel Gorman return COMPACT_PARTIAL; 1172748446bbSMel Gorman 1173753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 1174f2849aa0SVlastimil Babka if (compact_scanners_met(cc)) { 117555b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 117602333641SVlastimil Babka reset_cached_positions(zone); 117755b7c4c9SVlastimil Babka 117862997027SMel Gorman /* 117962997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 118062997027SMel Gorman * by kswapd when it goes to sleep. kswapd does not set the 118162997027SMel Gorman * flag itself as the decision to be clear should be directly 118262997027SMel Gorman * based on an allocation request. 118362997027SMel Gorman */ 118462997027SMel Gorman if (!current_is_kswapd()) 118562997027SMel Gorman zone->compact_blockskip_flush = true; 118662997027SMel Gorman 1187748446bbSMel Gorman return COMPACT_COMPLETE; 1188bb13ffebSMel Gorman } 1189748446bbSMel Gorman 119082478fb7SJohannes Weiner /* 119182478fb7SJohannes Weiner * order == -1 is expected when compacting via 119282478fb7SJohannes Weiner * /proc/sys/vm/compact_memory 119382478fb7SJohannes Weiner */ 119456de7263SMel Gorman if (cc->order == -1) 119556de7263SMel Gorman return COMPACT_CONTINUE; 119656de7263SMel Gorman 11973957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 11983957c776SMichal Hocko watermark = low_wmark_pages(zone); 11993957c776SMichal Hocko 1200ebff3980SVlastimil Babka if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx, 1201ebff3980SVlastimil Babka cc->alloc_flags)) 12023957c776SMichal Hocko return COMPACT_CONTINUE; 12033957c776SMichal Hocko 120456de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 120556de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 12068fb74b9fSMel Gorman struct free_area *area = &zone->free_area[order]; 12072149cdaeSJoonsoo Kim bool can_steal; 12088fb74b9fSMel Gorman 120956de7263SMel Gorman /* Job done if page is free of the right migratetype */ 12106d7ce559SDavid Rientjes if (!list_empty(&area->free_list[migratetype])) 121156de7263SMel Gorman return COMPACT_PARTIAL; 121256de7263SMel Gorman 12132149cdaeSJoonsoo Kim #ifdef CONFIG_CMA 12142149cdaeSJoonsoo Kim /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ 12152149cdaeSJoonsoo Kim if (migratetype == MIGRATE_MOVABLE && 12162149cdaeSJoonsoo Kim !list_empty(&area->free_list[MIGRATE_CMA])) 12172149cdaeSJoonsoo Kim return COMPACT_PARTIAL; 12182149cdaeSJoonsoo Kim #endif 12192149cdaeSJoonsoo Kim /* 12202149cdaeSJoonsoo Kim * Job done if allocation would steal freepages from 12212149cdaeSJoonsoo Kim * other migratetype buddy lists. 12222149cdaeSJoonsoo Kim */ 12232149cdaeSJoonsoo Kim if (find_suitable_fallback(area, order, migratetype, 12242149cdaeSJoonsoo Kim true, &can_steal) != -1) 122556de7263SMel Gorman return COMPACT_PARTIAL; 122656de7263SMel Gorman } 122756de7263SMel Gorman 1228837d026dSJoonsoo Kim return COMPACT_NO_SUITABLE_PAGE; 1229837d026dSJoonsoo Kim } 1230837d026dSJoonsoo Kim 1231837d026dSJoonsoo Kim static int compact_finished(struct zone *zone, struct compact_control *cc, 1232837d026dSJoonsoo Kim const int migratetype) 1233837d026dSJoonsoo Kim { 1234837d026dSJoonsoo Kim int ret; 1235837d026dSJoonsoo Kim 1236837d026dSJoonsoo Kim ret = __compact_finished(zone, cc, migratetype); 1237837d026dSJoonsoo Kim trace_mm_compaction_finished(zone, cc->order, ret); 1238837d026dSJoonsoo Kim if (ret == COMPACT_NO_SUITABLE_PAGE) 1239837d026dSJoonsoo Kim ret = COMPACT_CONTINUE; 1240837d026dSJoonsoo Kim 1241837d026dSJoonsoo Kim return ret; 1242748446bbSMel Gorman } 1243748446bbSMel Gorman 12443e7d3449SMel Gorman /* 12453e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 12463e7d3449SMel Gorman * Returns 12473e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 12483e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 12493e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 12503e7d3449SMel Gorman */ 1251837d026dSJoonsoo Kim static unsigned long __compaction_suitable(struct zone *zone, int order, 1252ebff3980SVlastimil Babka int alloc_flags, int classzone_idx) 12533e7d3449SMel Gorman { 12543e7d3449SMel Gorman int fragindex; 12553e7d3449SMel Gorman unsigned long watermark; 12563e7d3449SMel Gorman 12573e7d3449SMel Gorman /* 12583957c776SMichal Hocko * order == -1 is expected when compacting via 12593957c776SMichal Hocko * /proc/sys/vm/compact_memory 12603957c776SMichal Hocko */ 12613957c776SMichal Hocko if (order == -1) 12623957c776SMichal Hocko return COMPACT_CONTINUE; 12633957c776SMichal Hocko 1264ebff3980SVlastimil Babka watermark = low_wmark_pages(zone); 1265ebff3980SVlastimil Babka /* 1266ebff3980SVlastimil Babka * If watermarks for high-order allocation are already met, there 1267ebff3980SVlastimil Babka * should be no need for compaction at all. 1268ebff3980SVlastimil Babka */ 1269ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, watermark, classzone_idx, 1270ebff3980SVlastimil Babka alloc_flags)) 1271ebff3980SVlastimil Babka return COMPACT_PARTIAL; 1272ebff3980SVlastimil Babka 12733957c776SMichal Hocko /* 12743e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 12753e7d3449SMel Gorman * This is because during migration, copies of pages need to be 12763e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 12773e7d3449SMel Gorman */ 1278ebff3980SVlastimil Babka watermark += (2UL << order); 1279ebff3980SVlastimil Babka if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags)) 12803e7d3449SMel Gorman return COMPACT_SKIPPED; 12813e7d3449SMel Gorman 12823e7d3449SMel Gorman /* 12833e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 12843e7d3449SMel Gorman * low memory or external fragmentation 12853e7d3449SMel Gorman * 1286ebff3980SVlastimil Babka * index of -1000 would imply allocations might succeed depending on 1287ebff3980SVlastimil Babka * watermarks, but we already failed the high-order watermark check 12883e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 12893e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 12903e7d3449SMel Gorman * 12913e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 12923e7d3449SMel Gorman */ 12933e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 12943e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 1295837d026dSJoonsoo Kim return COMPACT_NOT_SUITABLE_ZONE; 12963e7d3449SMel Gorman 12973e7d3449SMel Gorman return COMPACT_CONTINUE; 12983e7d3449SMel Gorman } 12993e7d3449SMel Gorman 1300837d026dSJoonsoo Kim unsigned long compaction_suitable(struct zone *zone, int order, 1301837d026dSJoonsoo Kim int alloc_flags, int classzone_idx) 1302837d026dSJoonsoo Kim { 1303837d026dSJoonsoo Kim unsigned long ret; 1304837d026dSJoonsoo Kim 1305837d026dSJoonsoo Kim ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx); 1306837d026dSJoonsoo Kim trace_mm_compaction_suitable(zone, order, ret); 1307837d026dSJoonsoo Kim if (ret == COMPACT_NOT_SUITABLE_ZONE) 1308837d026dSJoonsoo Kim ret = COMPACT_SKIPPED; 1309837d026dSJoonsoo Kim 1310837d026dSJoonsoo Kim return ret; 1311837d026dSJoonsoo Kim } 1312837d026dSJoonsoo Kim 1313748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 1314748446bbSMel Gorman { 1315748446bbSMel Gorman int ret; 1316c89511abSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 1317108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 13186d7ce559SDavid Rientjes const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); 1319e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 1320fdaf7f5cSVlastimil Babka unsigned long last_migrated_pfn = 0; 1321748446bbSMel Gorman 1322ebff3980SVlastimil Babka ret = compaction_suitable(zone, cc->order, cc->alloc_flags, 1323ebff3980SVlastimil Babka cc->classzone_idx); 13243e7d3449SMel Gorman switch (ret) { 13253e7d3449SMel Gorman case COMPACT_PARTIAL: 13263e7d3449SMel Gorman case COMPACT_SKIPPED: 13273e7d3449SMel Gorman /* Compaction is likely to fail */ 13283e7d3449SMel Gorman return ret; 13293e7d3449SMel Gorman case COMPACT_CONTINUE: 13303e7d3449SMel Gorman /* Fall through to compaction */ 13313e7d3449SMel Gorman ; 13323e7d3449SMel Gorman } 13333e7d3449SMel Gorman 1334c89511abSMel Gorman /* 1335d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 1336d3132e4bSVlastimil Babka * is about to be retried after being deferred. kswapd does not do 1337d3132e4bSVlastimil Babka * this reset as it'll reset the cached information when going to sleep. 1338d3132e4bSVlastimil Babka */ 1339d3132e4bSVlastimil Babka if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) 1340d3132e4bSVlastimil Babka __reset_isolation_suitable(zone); 1341d3132e4bSVlastimil Babka 1342d3132e4bSVlastimil Babka /* 1343c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 1344c89511abSMel Gorman * information on where the scanners should start but check that it 1345c89511abSMel Gorman * is initialised by ensuring the values are within zone boundaries. 1346c89511abSMel Gorman */ 1347e0b9daebSDavid Rientjes cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; 1348c89511abSMel Gorman cc->free_pfn = zone->compact_cached_free_pfn; 1349c89511abSMel Gorman if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { 1350c89511abSMel Gorman cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); 1351c89511abSMel Gorman zone->compact_cached_free_pfn = cc->free_pfn; 1352c89511abSMel Gorman } 1353c89511abSMel Gorman if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { 1354c89511abSMel Gorman cc->migrate_pfn = start_pfn; 135535979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 135635979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 1357c89511abSMel Gorman } 1358748446bbSMel Gorman 135916c4a097SJoonsoo Kim trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, 136016c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync); 13610eb927c0SMel Gorman 1362748446bbSMel Gorman migrate_prep_local(); 1363748446bbSMel Gorman 13646d7ce559SDavid Rientjes while ((ret = compact_finished(zone, cc, migratetype)) == 13656d7ce559SDavid Rientjes COMPACT_CONTINUE) { 13669d502c1cSMinchan Kim int err; 1367fdaf7f5cSVlastimil Babka unsigned long isolate_start_pfn = cc->migrate_pfn; 1368748446bbSMel Gorman 1369f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 1370f9e35b3bSMel Gorman case ISOLATE_ABORT: 1371f9e35b3bSMel Gorman ret = COMPACT_PARTIAL; 13725733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1373e64c5237SShaohua Li cc->nr_migratepages = 0; 1374f9e35b3bSMel Gorman goto out; 1375f9e35b3bSMel Gorman case ISOLATE_NONE: 1376fdaf7f5cSVlastimil Babka /* 1377fdaf7f5cSVlastimil Babka * We haven't isolated and migrated anything, but 1378fdaf7f5cSVlastimil Babka * there might still be unflushed migrations from 1379fdaf7f5cSVlastimil Babka * previous cc->order aligned block. 1380fdaf7f5cSVlastimil Babka */ 1381fdaf7f5cSVlastimil Babka goto check_drain; 1382f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 1383f9e35b3bSMel Gorman ; 1384f9e35b3bSMel Gorman } 1385748446bbSMel Gorman 1386d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 1387e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 13887b2a2d4aSMel Gorman MR_COMPACTION); 1389748446bbSMel Gorman 1390f8c9301fSVlastimil Babka trace_mm_compaction_migratepages(cc->nr_migratepages, err, 1391f8c9301fSVlastimil Babka &cc->migratepages); 1392748446bbSMel Gorman 1393f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 1394f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 13959d502c1cSMinchan Kim if (err) { 13965733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 13977ed695e0SVlastimil Babka /* 13987ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 13997ed695e0SVlastimil Babka * and we want compact_finished() to detect it 14007ed695e0SVlastimil Babka */ 1401f2849aa0SVlastimil Babka if (err == -ENOMEM && !compact_scanners_met(cc)) { 14024bf2bba3SDavid Rientjes ret = COMPACT_PARTIAL; 14034bf2bba3SDavid Rientjes goto out; 1404748446bbSMel Gorman } 14054bf2bba3SDavid Rientjes } 1406fdaf7f5cSVlastimil Babka 1407fdaf7f5cSVlastimil Babka /* 1408fdaf7f5cSVlastimil Babka * Record where we could have freed pages by migration and not 1409fdaf7f5cSVlastimil Babka * yet flushed them to buddy allocator. We use the pfn that 1410fdaf7f5cSVlastimil Babka * isolate_migratepages() started from in this loop iteration 1411fdaf7f5cSVlastimil Babka * - this is the lowest page that could have been isolated and 1412fdaf7f5cSVlastimil Babka * then freed by migration. 1413fdaf7f5cSVlastimil Babka */ 1414fdaf7f5cSVlastimil Babka if (!last_migrated_pfn) 1415fdaf7f5cSVlastimil Babka last_migrated_pfn = isolate_start_pfn; 1416fdaf7f5cSVlastimil Babka 1417fdaf7f5cSVlastimil Babka check_drain: 1418fdaf7f5cSVlastimil Babka /* 1419fdaf7f5cSVlastimil Babka * Has the migration scanner moved away from the previous 1420fdaf7f5cSVlastimil Babka * cc->order aligned block where we migrated from? If yes, 1421fdaf7f5cSVlastimil Babka * flush the pages that were freed, so that they can merge and 1422fdaf7f5cSVlastimil Babka * compact_finished() can detect immediately if allocation 1423fdaf7f5cSVlastimil Babka * would succeed. 1424fdaf7f5cSVlastimil Babka */ 1425fdaf7f5cSVlastimil Babka if (cc->order > 0 && last_migrated_pfn) { 1426fdaf7f5cSVlastimil Babka int cpu; 1427fdaf7f5cSVlastimil Babka unsigned long current_block_start = 1428fdaf7f5cSVlastimil Babka cc->migrate_pfn & ~((1UL << cc->order) - 1); 1429fdaf7f5cSVlastimil Babka 1430fdaf7f5cSVlastimil Babka if (last_migrated_pfn < current_block_start) { 1431fdaf7f5cSVlastimil Babka cpu = get_cpu(); 1432fdaf7f5cSVlastimil Babka lru_add_drain_cpu(cpu); 1433fdaf7f5cSVlastimil Babka drain_local_pages(zone); 1434fdaf7f5cSVlastimil Babka put_cpu(); 1435fdaf7f5cSVlastimil Babka /* No more flushing until we migrate again */ 1436fdaf7f5cSVlastimil Babka last_migrated_pfn = 0; 1437fdaf7f5cSVlastimil Babka } 1438fdaf7f5cSVlastimil Babka } 1439fdaf7f5cSVlastimil Babka 1440748446bbSMel Gorman } 1441748446bbSMel Gorman 1442f9e35b3bSMel Gorman out: 14436bace090SVlastimil Babka /* 14446bace090SVlastimil Babka * Release free pages and update where the free scanner should restart, 14456bace090SVlastimil Babka * so we don't leave any returned pages behind in the next attempt. 14466bace090SVlastimil Babka */ 14476bace090SVlastimil Babka if (cc->nr_freepages > 0) { 14486bace090SVlastimil Babka unsigned long free_pfn = release_freepages(&cc->freepages); 14496bace090SVlastimil Babka 14506bace090SVlastimil Babka cc->nr_freepages = 0; 14516bace090SVlastimil Babka VM_BUG_ON(free_pfn == 0); 14526bace090SVlastimil Babka /* The cached pfn is always the first in a pageblock */ 14536bace090SVlastimil Babka free_pfn &= ~(pageblock_nr_pages-1); 14546bace090SVlastimil Babka /* 14556bace090SVlastimil Babka * Only go back, not forward. The cached pfn might have been 14566bace090SVlastimil Babka * already reset to zone end in compact_finished() 14576bace090SVlastimil Babka */ 14586bace090SVlastimil Babka if (free_pfn > zone->compact_cached_free_pfn) 14596bace090SVlastimil Babka zone->compact_cached_free_pfn = free_pfn; 14606bace090SVlastimil Babka } 1461748446bbSMel Gorman 146216c4a097SJoonsoo Kim trace_mm_compaction_end(start_pfn, cc->migrate_pfn, 146316c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync, ret); 14640eb927c0SMel Gorman 1465748446bbSMel Gorman return ret; 1466748446bbSMel Gorman } 146776ab0f53SMel Gorman 1468e0b9daebSDavid Rientjes static unsigned long compact_zone_order(struct zone *zone, int order, 1469ebff3980SVlastimil Babka gfp_t gfp_mask, enum migrate_mode mode, int *contended, 1470ebff3980SVlastimil Babka int alloc_flags, int classzone_idx) 147156de7263SMel Gorman { 1472e64c5237SShaohua Li unsigned long ret; 147356de7263SMel Gorman struct compact_control cc = { 147456de7263SMel Gorman .nr_freepages = 0, 147556de7263SMel Gorman .nr_migratepages = 0, 147656de7263SMel Gorman .order = order, 14776d7ce559SDavid Rientjes .gfp_mask = gfp_mask, 147856de7263SMel Gorman .zone = zone, 1479e0b9daebSDavid Rientjes .mode = mode, 1480ebff3980SVlastimil Babka .alloc_flags = alloc_flags, 1481ebff3980SVlastimil Babka .classzone_idx = classzone_idx, 148256de7263SMel Gorman }; 148356de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 148456de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 148556de7263SMel Gorman 1486e64c5237SShaohua Li ret = compact_zone(zone, &cc); 1487e64c5237SShaohua Li 1488e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 1489e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 1490e64c5237SShaohua Li 1491e64c5237SShaohua Li *contended = cc.contended; 1492e64c5237SShaohua Li return ret; 149356de7263SMel Gorman } 149456de7263SMel Gorman 14955e771905SMel Gorman int sysctl_extfrag_threshold = 500; 14965e771905SMel Gorman 149756de7263SMel Gorman /** 149856de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 149956de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 15001a6d53a1SVlastimil Babka * @order: The order of the current allocation 15011a6d53a1SVlastimil Babka * @alloc_flags: The allocation flags of the current allocation 15021a6d53a1SVlastimil Babka * @ac: The context of current allocation 1503e0b9daebSDavid Rientjes * @mode: The migration mode for async, sync light, or sync migration 15041f9efdefSVlastimil Babka * @contended: Return value that determines if compaction was aborted due to 15051f9efdefSVlastimil Babka * need_resched() or lock contention 150656de7263SMel Gorman * 150756de7263SMel Gorman * This is the main entry point for direct page compaction. 150856de7263SMel Gorman */ 15091a6d53a1SVlastimil Babka unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 15101a6d53a1SVlastimil Babka int alloc_flags, const struct alloc_context *ac, 15111a6d53a1SVlastimil Babka enum migrate_mode mode, int *contended) 151256de7263SMel Gorman { 151356de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 151456de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 151556de7263SMel Gorman struct zoneref *z; 151656de7263SMel Gorman struct zone *zone; 151753853e2dSVlastimil Babka int rc = COMPACT_DEFERRED; 15181f9efdefSVlastimil Babka int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */ 15191f9efdefSVlastimil Babka 15201f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_NONE; 152156de7263SMel Gorman 15224ffb6335SMel Gorman /* Check if the GFP flags allow compaction */ 1523c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 152453853e2dSVlastimil Babka return COMPACT_SKIPPED; 152556de7263SMel Gorman 1526837d026dSJoonsoo Kim trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode); 1527837d026dSJoonsoo Kim 152856de7263SMel Gorman /* Compact each zone in the list */ 15291a6d53a1SVlastimil Babka for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 15301a6d53a1SVlastimil Babka ac->nodemask) { 153156de7263SMel Gorman int status; 15321f9efdefSVlastimil Babka int zone_contended; 153356de7263SMel Gorman 153453853e2dSVlastimil Babka if (compaction_deferred(zone, order)) 153553853e2dSVlastimil Babka continue; 153653853e2dSVlastimil Babka 1537e0b9daebSDavid Rientjes status = compact_zone_order(zone, order, gfp_mask, mode, 15381a6d53a1SVlastimil Babka &zone_contended, alloc_flags, 15391a6d53a1SVlastimil Babka ac->classzone_idx); 154056de7263SMel Gorman rc = max(status, rc); 15411f9efdefSVlastimil Babka /* 15421f9efdefSVlastimil Babka * It takes at least one zone that wasn't lock contended 15431f9efdefSVlastimil Babka * to clear all_zones_contended. 15441f9efdefSVlastimil Babka */ 15451f9efdefSVlastimil Babka all_zones_contended &= zone_contended; 154656de7263SMel Gorman 15473e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 1548ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 15491a6d53a1SVlastimil Babka ac->classzone_idx, alloc_flags)) { 155053853e2dSVlastimil Babka /* 155153853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 155253853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 155353853e2dSVlastimil Babka * will repeat this with true if allocation indeed 155453853e2dSVlastimil Babka * succeeds in this zone. 155553853e2dSVlastimil Babka */ 155653853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 15571f9efdefSVlastimil Babka /* 15581f9efdefSVlastimil Babka * It is possible that async compaction aborted due to 15591f9efdefSVlastimil Babka * need_resched() and the watermarks were ok thanks to 15601f9efdefSVlastimil Babka * somebody else freeing memory. The allocation can 15611f9efdefSVlastimil Babka * however still fail so we better signal the 15621f9efdefSVlastimil Babka * need_resched() contention anyway (this will not 15631f9efdefSVlastimil Babka * prevent the allocation attempt). 15641f9efdefSVlastimil Babka */ 15651f9efdefSVlastimil Babka if (zone_contended == COMPACT_CONTENDED_SCHED) 15661f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_SCHED; 15671f9efdefSVlastimil Babka 15681f9efdefSVlastimil Babka goto break_loop; 15691f9efdefSVlastimil Babka } 15701f9efdefSVlastimil Babka 1571f8669795SVlastimil Babka if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) { 157253853e2dSVlastimil Babka /* 157353853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 157453853e2dSVlastimil Babka * so we defer compaction there. If it ends up 157553853e2dSVlastimil Babka * succeeding after all, it will be reset. 157653853e2dSVlastimil Babka */ 157753853e2dSVlastimil Babka defer_compaction(zone, order); 157853853e2dSVlastimil Babka } 15791f9efdefSVlastimil Babka 15801f9efdefSVlastimil Babka /* 15811f9efdefSVlastimil Babka * We might have stopped compacting due to need_resched() in 15821f9efdefSVlastimil Babka * async compaction, or due to a fatal signal detected. In that 15831f9efdefSVlastimil Babka * case do not try further zones and signal need_resched() 15841f9efdefSVlastimil Babka * contention. 15851f9efdefSVlastimil Babka */ 15861f9efdefSVlastimil Babka if ((zone_contended == COMPACT_CONTENDED_SCHED) 15871f9efdefSVlastimil Babka || fatal_signal_pending(current)) { 15881f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_SCHED; 15891f9efdefSVlastimil Babka goto break_loop; 159056de7263SMel Gorman } 159156de7263SMel Gorman 15921f9efdefSVlastimil Babka continue; 15931f9efdefSVlastimil Babka break_loop: 15941f9efdefSVlastimil Babka /* 15951f9efdefSVlastimil Babka * We might not have tried all the zones, so be conservative 15961f9efdefSVlastimil Babka * and assume they are not all lock contended. 15971f9efdefSVlastimil Babka */ 15981f9efdefSVlastimil Babka all_zones_contended = 0; 15991f9efdefSVlastimil Babka break; 16001f9efdefSVlastimil Babka } 16011f9efdefSVlastimil Babka 16021f9efdefSVlastimil Babka /* 16031f9efdefSVlastimil Babka * If at least one zone wasn't deferred or skipped, we report if all 16041f9efdefSVlastimil Babka * zones that were tried were lock contended. 16051f9efdefSVlastimil Babka */ 16061f9efdefSVlastimil Babka if (rc > COMPACT_SKIPPED && all_zones_contended) 16071f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_LOCK; 16081f9efdefSVlastimil Babka 160956de7263SMel Gorman return rc; 161056de7263SMel Gorman } 161156de7263SMel Gorman 161256de7263SMel Gorman 161376ab0f53SMel Gorman /* Compact all zones within a node */ 16147103f16dSAndrew Morton static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 161576ab0f53SMel Gorman { 161676ab0f53SMel Gorman int zoneid; 161776ab0f53SMel Gorman struct zone *zone; 161876ab0f53SMel Gorman 161976ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 162076ab0f53SMel Gorman 162176ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 162276ab0f53SMel Gorman if (!populated_zone(zone)) 162376ab0f53SMel Gorman continue; 162476ab0f53SMel Gorman 16257be62de9SRik van Riel cc->nr_freepages = 0; 16267be62de9SRik van Riel cc->nr_migratepages = 0; 16277be62de9SRik van Riel cc->zone = zone; 16287be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 16297be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 163076ab0f53SMel Gorman 1631195b0c60SGioh Kim /* 1632195b0c60SGioh Kim * When called via /proc/sys/vm/compact_memory 1633195b0c60SGioh Kim * this makes sure we compact the whole zone regardless of 1634195b0c60SGioh Kim * cached scanner positions. 1635195b0c60SGioh Kim */ 1636195b0c60SGioh Kim if (cc->order == -1) 1637195b0c60SGioh Kim __reset_isolation_suitable(zone); 1638195b0c60SGioh Kim 1639aad6ec37SDan Carpenter if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 16407be62de9SRik van Riel compact_zone(zone, cc); 164176ab0f53SMel Gorman 1642aff62249SRik van Riel if (cc->order > 0) { 1643de6c60a6SVlastimil Babka if (zone_watermark_ok(zone, cc->order, 1644de6c60a6SVlastimil Babka low_wmark_pages(zone), 0, 0)) 1645de6c60a6SVlastimil Babka compaction_defer_reset(zone, cc->order, false); 1646aff62249SRik van Riel } 1647aff62249SRik van Riel 16487be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->freepages)); 16497be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->migratepages)); 165076ab0f53SMel Gorman } 165176ab0f53SMel Gorman } 165276ab0f53SMel Gorman 16537103f16dSAndrew Morton void compact_pgdat(pg_data_t *pgdat, int order) 16547be62de9SRik van Riel { 16557be62de9SRik van Riel struct compact_control cc = { 16567be62de9SRik van Riel .order = order, 1657e0b9daebSDavid Rientjes .mode = MIGRATE_ASYNC, 16587be62de9SRik van Riel }; 16597be62de9SRik van Riel 16603a7200afSMel Gorman if (!order) 16613a7200afSMel Gorman return; 16623a7200afSMel Gorman 16637103f16dSAndrew Morton __compact_pgdat(pgdat, &cc); 16647be62de9SRik van Riel } 16657be62de9SRik van Riel 16667103f16dSAndrew Morton static void compact_node(int nid) 16677be62de9SRik van Riel { 16687be62de9SRik van Riel struct compact_control cc = { 16697be62de9SRik van Riel .order = -1, 1670e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 167191ca9186SDavid Rientjes .ignore_skip_hint = true, 16727be62de9SRik van Riel }; 16737be62de9SRik van Riel 16747103f16dSAndrew Morton __compact_pgdat(NODE_DATA(nid), &cc); 16757be62de9SRik van Riel } 16767be62de9SRik van Riel 167776ab0f53SMel Gorman /* Compact all nodes in the system */ 16787964c06dSJason Liu static void compact_nodes(void) 167976ab0f53SMel Gorman { 168076ab0f53SMel Gorman int nid; 168176ab0f53SMel Gorman 16828575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 16838575ec29SHugh Dickins lru_add_drain_all(); 16848575ec29SHugh Dickins 168576ab0f53SMel Gorman for_each_online_node(nid) 168676ab0f53SMel Gorman compact_node(nid); 168776ab0f53SMel Gorman } 168876ab0f53SMel Gorman 168976ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 169076ab0f53SMel Gorman int sysctl_compact_memory; 169176ab0f53SMel Gorman 169276ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 169376ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 169476ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 169576ab0f53SMel Gorman { 169676ab0f53SMel Gorman if (write) 16977964c06dSJason Liu compact_nodes(); 169876ab0f53SMel Gorman 169976ab0f53SMel Gorman return 0; 170076ab0f53SMel Gorman } 1701ed4a6d7fSMel Gorman 17025e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 17035e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 17045e771905SMel Gorman { 17055e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 17065e771905SMel Gorman 17075e771905SMel Gorman return 0; 17085e771905SMel Gorman } 17095e771905SMel Gorman 1710ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 171174e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev, 171210fbcf4cSKay Sievers struct device_attribute *attr, 1713ed4a6d7fSMel Gorman const char *buf, size_t count) 1714ed4a6d7fSMel Gorman { 17158575ec29SHugh Dickins int nid = dev->id; 17168575ec29SHugh Dickins 17178575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 17188575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 17198575ec29SHugh Dickins lru_add_drain_all(); 17208575ec29SHugh Dickins 17218575ec29SHugh Dickins compact_node(nid); 17228575ec29SHugh Dickins } 1723ed4a6d7fSMel Gorman 1724ed4a6d7fSMel Gorman return count; 1725ed4a6d7fSMel Gorman } 172610fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1727ed4a6d7fSMel Gorman 1728ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1729ed4a6d7fSMel Gorman { 173010fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1731ed4a6d7fSMel Gorman } 1732ed4a6d7fSMel Gorman 1733ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1734ed4a6d7fSMel Gorman { 173510fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1736ed4a6d7fSMel Gorman } 1737ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1738ff9543fdSMichal Nazarewicz 1739ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 1740