1748446bbSMel Gorman /* 2748446bbSMel Gorman * linux/mm/compaction.c 3748446bbSMel Gorman * 4748446bbSMel Gorman * Memory compaction for the reduction of external fragmentation. Note that 5748446bbSMel Gorman * this heavily depends upon page migration to do all the real heavy 6748446bbSMel Gorman * lifting 7748446bbSMel Gorman * 8748446bbSMel Gorman * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9748446bbSMel Gorman */ 10748446bbSMel Gorman #include <linux/swap.h> 11748446bbSMel Gorman #include <linux/migrate.h> 12748446bbSMel Gorman #include <linux/compaction.h> 13748446bbSMel Gorman #include <linux/mm_inline.h> 14748446bbSMel Gorman #include <linux/backing-dev.h> 1576ab0f53SMel Gorman #include <linux/sysctl.h> 16ed4a6d7fSMel Gorman #include <linux/sysfs.h> 17bf6bddf1SRafael Aquini #include <linux/balloon_compaction.h> 18194159fbSMinchan Kim #include <linux/page-isolation.h> 19748446bbSMel Gorman #include "internal.h" 20748446bbSMel Gorman 21010fc29aSMinchan Kim #ifdef CONFIG_COMPACTION 22010fc29aSMinchan Kim static inline void count_compact_event(enum vm_event_item item) 23010fc29aSMinchan Kim { 24010fc29aSMinchan Kim count_vm_event(item); 25010fc29aSMinchan Kim } 26010fc29aSMinchan Kim 27010fc29aSMinchan Kim static inline void count_compact_events(enum vm_event_item item, long delta) 28010fc29aSMinchan Kim { 29010fc29aSMinchan Kim count_vm_events(item, delta); 30010fc29aSMinchan Kim } 31010fc29aSMinchan Kim #else 32010fc29aSMinchan Kim #define count_compact_event(item) do { } while (0) 33010fc29aSMinchan Kim #define count_compact_events(item, delta) do { } while (0) 34010fc29aSMinchan Kim #endif 35010fc29aSMinchan Kim 36ff9543fdSMichal Nazarewicz #if defined CONFIG_COMPACTION || defined CONFIG_CMA 3716c4a097SJoonsoo Kim #ifdef CONFIG_TRACEPOINTS 3816c4a097SJoonsoo Kim static const char *const compaction_status_string[] = { 3916c4a097SJoonsoo Kim "deferred", 4016c4a097SJoonsoo Kim "skipped", 4116c4a097SJoonsoo Kim "continue", 4216c4a097SJoonsoo Kim "partial", 4316c4a097SJoonsoo Kim "complete", 44*837d026dSJoonsoo Kim "no_suitable_page", 45*837d026dSJoonsoo Kim "not_suitable_zone", 4616c4a097SJoonsoo Kim }; 4716c4a097SJoonsoo Kim #endif 48ff9543fdSMichal Nazarewicz 49b7aba698SMel Gorman #define CREATE_TRACE_POINTS 50b7aba698SMel Gorman #include <trace/events/compaction.h> 51b7aba698SMel Gorman 52748446bbSMel Gorman static unsigned long release_freepages(struct list_head *freelist) 53748446bbSMel Gorman { 54748446bbSMel Gorman struct page *page, *next; 556bace090SVlastimil Babka unsigned long high_pfn = 0; 56748446bbSMel Gorman 57748446bbSMel Gorman list_for_each_entry_safe(page, next, freelist, lru) { 586bace090SVlastimil Babka unsigned long pfn = page_to_pfn(page); 59748446bbSMel Gorman list_del(&page->lru); 60748446bbSMel Gorman __free_page(page); 616bace090SVlastimil Babka if (pfn > high_pfn) 626bace090SVlastimil Babka high_pfn = pfn; 63748446bbSMel Gorman } 64748446bbSMel Gorman 656bace090SVlastimil Babka return high_pfn; 66748446bbSMel Gorman } 67748446bbSMel Gorman 68ff9543fdSMichal Nazarewicz static void map_pages(struct list_head *list) 69ff9543fdSMichal Nazarewicz { 70ff9543fdSMichal Nazarewicz struct page *page; 71ff9543fdSMichal Nazarewicz 72ff9543fdSMichal Nazarewicz list_for_each_entry(page, list, lru) { 73ff9543fdSMichal Nazarewicz arch_alloc_page(page, 0); 74ff9543fdSMichal Nazarewicz kernel_map_pages(page, 1, 1); 75ff9543fdSMichal Nazarewicz } 76ff9543fdSMichal Nazarewicz } 77ff9543fdSMichal Nazarewicz 7847118af0SMichal Nazarewicz static inline bool migrate_async_suitable(int migratetype) 7947118af0SMichal Nazarewicz { 8047118af0SMichal Nazarewicz return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 8147118af0SMichal Nazarewicz } 8247118af0SMichal Nazarewicz 837d49d886SVlastimil Babka /* 847d49d886SVlastimil Babka * Check that the whole (or subset of) a pageblock given by the interval of 857d49d886SVlastimil Babka * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 867d49d886SVlastimil Babka * with the migration of free compaction scanner. The scanners then need to 877d49d886SVlastimil Babka * use only pfn_valid_within() check for arches that allow holes within 887d49d886SVlastimil Babka * pageblocks. 897d49d886SVlastimil Babka * 907d49d886SVlastimil Babka * Return struct page pointer of start_pfn, or NULL if checks were not passed. 917d49d886SVlastimil Babka * 927d49d886SVlastimil Babka * It's possible on some configurations to have a setup like node0 node1 node0 937d49d886SVlastimil Babka * i.e. it's possible that all pages within a zones range of pages do not 947d49d886SVlastimil Babka * belong to a single zone. We assume that a border between node0 and node1 957d49d886SVlastimil Babka * can occur within a single pageblock, but not a node0 node1 node0 967d49d886SVlastimil Babka * interleaving within a single pageblock. It is therefore sufficient to check 977d49d886SVlastimil Babka * the first and last page of a pageblock and avoid checking each individual 987d49d886SVlastimil Babka * page in a pageblock. 997d49d886SVlastimil Babka */ 1007d49d886SVlastimil Babka static struct page *pageblock_pfn_to_page(unsigned long start_pfn, 1017d49d886SVlastimil Babka unsigned long end_pfn, struct zone *zone) 1027d49d886SVlastimil Babka { 1037d49d886SVlastimil Babka struct page *start_page; 1047d49d886SVlastimil Babka struct page *end_page; 1057d49d886SVlastimil Babka 1067d49d886SVlastimil Babka /* end_pfn is one past the range we are checking */ 1077d49d886SVlastimil Babka end_pfn--; 1087d49d886SVlastimil Babka 1097d49d886SVlastimil Babka if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 1107d49d886SVlastimil Babka return NULL; 1117d49d886SVlastimil Babka 1127d49d886SVlastimil Babka start_page = pfn_to_page(start_pfn); 1137d49d886SVlastimil Babka 1147d49d886SVlastimil Babka if (page_zone(start_page) != zone) 1157d49d886SVlastimil Babka return NULL; 1167d49d886SVlastimil Babka 1177d49d886SVlastimil Babka end_page = pfn_to_page(end_pfn); 1187d49d886SVlastimil Babka 1197d49d886SVlastimil Babka /* This gives a shorter code than deriving page_zone(end_page) */ 1207d49d886SVlastimil Babka if (page_zone_id(start_page) != page_zone_id(end_page)) 1217d49d886SVlastimil Babka return NULL; 1227d49d886SVlastimil Babka 1237d49d886SVlastimil Babka return start_page; 1247d49d886SVlastimil Babka } 1257d49d886SVlastimil Babka 126bb13ffebSMel Gorman #ifdef CONFIG_COMPACTION 127bb13ffebSMel Gorman /* Returns true if the pageblock should be scanned for pages to isolate. */ 128bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 129bb13ffebSMel Gorman struct page *page) 130bb13ffebSMel Gorman { 131bb13ffebSMel Gorman if (cc->ignore_skip_hint) 132bb13ffebSMel Gorman return true; 133bb13ffebSMel Gorman 134bb13ffebSMel Gorman return !get_pageblock_skip(page); 135bb13ffebSMel Gorman } 136bb13ffebSMel Gorman 137bb13ffebSMel Gorman /* 138bb13ffebSMel Gorman * This function is called to clear all cached information on pageblocks that 139bb13ffebSMel Gorman * should be skipped for page isolation when the migrate and free page scanner 140bb13ffebSMel Gorman * meet. 141bb13ffebSMel Gorman */ 14262997027SMel Gorman static void __reset_isolation_suitable(struct zone *zone) 143bb13ffebSMel Gorman { 144bb13ffebSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 145108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 146bb13ffebSMel Gorman unsigned long pfn; 147bb13ffebSMel Gorman 14835979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = start_pfn; 14935979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = start_pfn; 150c89511abSMel Gorman zone->compact_cached_free_pfn = end_pfn; 15162997027SMel Gorman zone->compact_blockskip_flush = false; 152bb13ffebSMel Gorman 153bb13ffebSMel Gorman /* Walk the zone and mark every pageblock as suitable for isolation */ 154bb13ffebSMel Gorman for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 155bb13ffebSMel Gorman struct page *page; 156bb13ffebSMel Gorman 157bb13ffebSMel Gorman cond_resched(); 158bb13ffebSMel Gorman 159bb13ffebSMel Gorman if (!pfn_valid(pfn)) 160bb13ffebSMel Gorman continue; 161bb13ffebSMel Gorman 162bb13ffebSMel Gorman page = pfn_to_page(pfn); 163bb13ffebSMel Gorman if (zone != page_zone(page)) 164bb13ffebSMel Gorman continue; 165bb13ffebSMel Gorman 166bb13ffebSMel Gorman clear_pageblock_skip(page); 167bb13ffebSMel Gorman } 168bb13ffebSMel Gorman } 169bb13ffebSMel Gorman 17062997027SMel Gorman void reset_isolation_suitable(pg_data_t *pgdat) 17162997027SMel Gorman { 17262997027SMel Gorman int zoneid; 17362997027SMel Gorman 17462997027SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 17562997027SMel Gorman struct zone *zone = &pgdat->node_zones[zoneid]; 17662997027SMel Gorman if (!populated_zone(zone)) 17762997027SMel Gorman continue; 17862997027SMel Gorman 17962997027SMel Gorman /* Only flush if a full compaction finished recently */ 18062997027SMel Gorman if (zone->compact_blockskip_flush) 18162997027SMel Gorman __reset_isolation_suitable(zone); 18262997027SMel Gorman } 18362997027SMel Gorman } 18462997027SMel Gorman 185bb13ffebSMel Gorman /* 186bb13ffebSMel Gorman * If no pages were isolated then mark this pageblock to be skipped in the 18762997027SMel Gorman * future. The information is later cleared by __reset_isolation_suitable(). 188bb13ffebSMel Gorman */ 189c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 190c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 191edc2ca61SVlastimil Babka bool migrate_scanner) 192bb13ffebSMel Gorman { 193c89511abSMel Gorman struct zone *zone = cc->zone; 19435979ef3SDavid Rientjes unsigned long pfn; 1956815bf3fSJoonsoo Kim 1966815bf3fSJoonsoo Kim if (cc->ignore_skip_hint) 1976815bf3fSJoonsoo Kim return; 1986815bf3fSJoonsoo Kim 199bb13ffebSMel Gorman if (!page) 200bb13ffebSMel Gorman return; 201bb13ffebSMel Gorman 20235979ef3SDavid Rientjes if (nr_isolated) 20335979ef3SDavid Rientjes return; 20435979ef3SDavid Rientjes 205bb13ffebSMel Gorman set_pageblock_skip(page); 206c89511abSMel Gorman 20735979ef3SDavid Rientjes pfn = page_to_pfn(page); 20835979ef3SDavid Rientjes 20935979ef3SDavid Rientjes /* Update where async and sync compaction should restart */ 210c89511abSMel Gorman if (migrate_scanner) { 21135979ef3SDavid Rientjes if (pfn > zone->compact_cached_migrate_pfn[0]) 21235979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = pfn; 213e0b9daebSDavid Rientjes if (cc->mode != MIGRATE_ASYNC && 214e0b9daebSDavid Rientjes pfn > zone->compact_cached_migrate_pfn[1]) 21535979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = pfn; 216c89511abSMel Gorman } else { 21735979ef3SDavid Rientjes if (pfn < zone->compact_cached_free_pfn) 218c89511abSMel Gorman zone->compact_cached_free_pfn = pfn; 219c89511abSMel Gorman } 220c89511abSMel Gorman } 221bb13ffebSMel Gorman #else 222bb13ffebSMel Gorman static inline bool isolation_suitable(struct compact_control *cc, 223bb13ffebSMel Gorman struct page *page) 224bb13ffebSMel Gorman { 225bb13ffebSMel Gorman return true; 226bb13ffebSMel Gorman } 227bb13ffebSMel Gorman 228c89511abSMel Gorman static void update_pageblock_skip(struct compact_control *cc, 229c89511abSMel Gorman struct page *page, unsigned long nr_isolated, 230edc2ca61SVlastimil Babka bool migrate_scanner) 231bb13ffebSMel Gorman { 232bb13ffebSMel Gorman } 233bb13ffebSMel Gorman #endif /* CONFIG_COMPACTION */ 234bb13ffebSMel Gorman 2351f9efdefSVlastimil Babka /* 2368b44d279SVlastimil Babka * Compaction requires the taking of some coarse locks that are potentially 2378b44d279SVlastimil Babka * very heavily contended. For async compaction, back out if the lock cannot 2388b44d279SVlastimil Babka * be taken immediately. For sync compaction, spin on the lock if needed. 2398b44d279SVlastimil Babka * 2408b44d279SVlastimil Babka * Returns true if the lock is held 2418b44d279SVlastimil Babka * Returns false if the lock is not held and compaction should abort 2421f9efdefSVlastimil Babka */ 2438b44d279SVlastimil Babka static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, 2448b44d279SVlastimil Babka struct compact_control *cc) 2458b44d279SVlastimil Babka { 2468b44d279SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 2478b44d279SVlastimil Babka if (!spin_trylock_irqsave(lock, *flags)) { 2488b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_LOCK; 2498b44d279SVlastimil Babka return false; 2508b44d279SVlastimil Babka } 2518b44d279SVlastimil Babka } else { 2528b44d279SVlastimil Babka spin_lock_irqsave(lock, *flags); 2538b44d279SVlastimil Babka } 2541f9efdefSVlastimil Babka 2558b44d279SVlastimil Babka return true; 2562a1402aaSMel Gorman } 2572a1402aaSMel Gorman 25885aa125fSMichal Nazarewicz /* 259c67fe375SMel Gorman * Compaction requires the taking of some coarse locks that are potentially 2608b44d279SVlastimil Babka * very heavily contended. The lock should be periodically unlocked to avoid 2618b44d279SVlastimil Babka * having disabled IRQs for a long time, even when there is nobody waiting on 2628b44d279SVlastimil Babka * the lock. It might also be that allowing the IRQs will result in 2638b44d279SVlastimil Babka * need_resched() becoming true. If scheduling is needed, async compaction 2648b44d279SVlastimil Babka * aborts. Sync compaction schedules. 2658b44d279SVlastimil Babka * Either compaction type will also abort if a fatal signal is pending. 2668b44d279SVlastimil Babka * In either case if the lock was locked, it is dropped and not regained. 267c67fe375SMel Gorman * 2688b44d279SVlastimil Babka * Returns true if compaction should abort due to fatal signal pending, or 2698b44d279SVlastimil Babka * async compaction due to need_resched() 2708b44d279SVlastimil Babka * Returns false when compaction can continue (sync compaction might have 2718b44d279SVlastimil Babka * scheduled) 272c67fe375SMel Gorman */ 2738b44d279SVlastimil Babka static bool compact_unlock_should_abort(spinlock_t *lock, 2748b44d279SVlastimil Babka unsigned long flags, bool *locked, struct compact_control *cc) 275c67fe375SMel Gorman { 2768b44d279SVlastimil Babka if (*locked) { 2778b44d279SVlastimil Babka spin_unlock_irqrestore(lock, flags); 2788b44d279SVlastimil Babka *locked = false; 279c67fe375SMel Gorman } 280c67fe375SMel Gorman 2818b44d279SVlastimil Babka if (fatal_signal_pending(current)) { 2828b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 2838b44d279SVlastimil Babka return true; 2848b44d279SVlastimil Babka } 2858b44d279SVlastimil Babka 2868b44d279SVlastimil Babka if (need_resched()) { 287e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) { 2888b44d279SVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 2898b44d279SVlastimil Babka return true; 290c67fe375SMel Gorman } 291c67fe375SMel Gorman cond_resched(); 292c67fe375SMel Gorman } 293c67fe375SMel Gorman 2948b44d279SVlastimil Babka return false; 295c67fe375SMel Gorman } 296c67fe375SMel Gorman 297be976572SVlastimil Babka /* 298be976572SVlastimil Babka * Aside from avoiding lock contention, compaction also periodically checks 299be976572SVlastimil Babka * need_resched() and either schedules in sync compaction or aborts async 3008b44d279SVlastimil Babka * compaction. This is similar to what compact_unlock_should_abort() does, but 301be976572SVlastimil Babka * is used where no lock is concerned. 302be976572SVlastimil Babka * 303be976572SVlastimil Babka * Returns false when no scheduling was needed, or sync compaction scheduled. 304be976572SVlastimil Babka * Returns true when async compaction should abort. 305be976572SVlastimil Babka */ 306be976572SVlastimil Babka static inline bool compact_should_abort(struct compact_control *cc) 307be976572SVlastimil Babka { 308be976572SVlastimil Babka /* async compaction aborts if contended */ 309be976572SVlastimil Babka if (need_resched()) { 310be976572SVlastimil Babka if (cc->mode == MIGRATE_ASYNC) { 3111f9efdefSVlastimil Babka cc->contended = COMPACT_CONTENDED_SCHED; 312be976572SVlastimil Babka return true; 313be976572SVlastimil Babka } 314be976572SVlastimil Babka 315be976572SVlastimil Babka cond_resched(); 316be976572SVlastimil Babka } 317be976572SVlastimil Babka 318be976572SVlastimil Babka return false; 319be976572SVlastimil Babka } 320be976572SVlastimil Babka 321f40d1e42SMel Gorman /* Returns true if the page is within a block suitable for migration to */ 322f40d1e42SMel Gorman static bool suitable_migration_target(struct page *page) 323f40d1e42SMel Gorman { 3247d348b9eSJoonsoo Kim /* If the page is a large free page, then disallow migration */ 32599c0fd5eSVlastimil Babka if (PageBuddy(page)) { 32699c0fd5eSVlastimil Babka /* 32799c0fd5eSVlastimil Babka * We are checking page_order without zone->lock taken. But 32899c0fd5eSVlastimil Babka * the only small danger is that we skip a potentially suitable 32999c0fd5eSVlastimil Babka * pageblock, so it's not worth to check order for valid range. 33099c0fd5eSVlastimil Babka */ 33199c0fd5eSVlastimil Babka if (page_order_unsafe(page) >= pageblock_order) 3327d348b9eSJoonsoo Kim return false; 33399c0fd5eSVlastimil Babka } 334f40d1e42SMel Gorman 335f40d1e42SMel Gorman /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 3367d348b9eSJoonsoo Kim if (migrate_async_suitable(get_pageblock_migratetype(page))) 337f40d1e42SMel Gorman return true; 338f40d1e42SMel Gorman 339f40d1e42SMel Gorman /* Otherwise skip the block */ 340f40d1e42SMel Gorman return false; 341f40d1e42SMel Gorman } 342f40d1e42SMel Gorman 343c67fe375SMel Gorman /* 3449e4be470SJerome Marchand * Isolate free pages onto a private freelist. If @strict is true, will abort 3459e4be470SJerome Marchand * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 3469e4be470SJerome Marchand * (even though it may still end up isolating some pages). 34785aa125fSMichal Nazarewicz */ 348f40d1e42SMel Gorman static unsigned long isolate_freepages_block(struct compact_control *cc, 349e14c720eSVlastimil Babka unsigned long *start_pfn, 35085aa125fSMichal Nazarewicz unsigned long end_pfn, 35185aa125fSMichal Nazarewicz struct list_head *freelist, 35285aa125fSMichal Nazarewicz bool strict) 353748446bbSMel Gorman { 354b7aba698SMel Gorman int nr_scanned = 0, total_isolated = 0; 355bb13ffebSMel Gorman struct page *cursor, *valid_page = NULL; 356b8b2d825SXiubo Li unsigned long flags = 0; 357f40d1e42SMel Gorman bool locked = false; 358e14c720eSVlastimil Babka unsigned long blockpfn = *start_pfn; 359748446bbSMel Gorman 360748446bbSMel Gorman cursor = pfn_to_page(blockpfn); 361748446bbSMel Gorman 362f40d1e42SMel Gorman /* Isolate free pages. */ 363748446bbSMel Gorman for (; blockpfn < end_pfn; blockpfn++, cursor++) { 364748446bbSMel Gorman int isolated, i; 365748446bbSMel Gorman struct page *page = cursor; 366748446bbSMel Gorman 3678b44d279SVlastimil Babka /* 3688b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 3698b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort if fatal signal 3708b44d279SVlastimil Babka * pending or async compaction detects need_resched() 3718b44d279SVlastimil Babka */ 3728b44d279SVlastimil Babka if (!(blockpfn % SWAP_CLUSTER_MAX) 3738b44d279SVlastimil Babka && compact_unlock_should_abort(&cc->zone->lock, flags, 3748b44d279SVlastimil Babka &locked, cc)) 3758b44d279SVlastimil Babka break; 3768b44d279SVlastimil Babka 377b7aba698SMel Gorman nr_scanned++; 378f40d1e42SMel Gorman if (!pfn_valid_within(blockpfn)) 3792af120bcSLaura Abbott goto isolate_fail; 3802af120bcSLaura Abbott 381bb13ffebSMel Gorman if (!valid_page) 382bb13ffebSMel Gorman valid_page = page; 383f40d1e42SMel Gorman if (!PageBuddy(page)) 3842af120bcSLaura Abbott goto isolate_fail; 385f40d1e42SMel Gorman 386f40d1e42SMel Gorman /* 38769b7189fSVlastimil Babka * If we already hold the lock, we can skip some rechecking. 38869b7189fSVlastimil Babka * Note that if we hold the lock now, checked_pageblock was 38969b7189fSVlastimil Babka * already set in some previous iteration (or strict is true), 39069b7189fSVlastimil Babka * so it is correct to skip the suitable migration target 39169b7189fSVlastimil Babka * recheck as well. 39269b7189fSVlastimil Babka */ 39369b7189fSVlastimil Babka if (!locked) { 39469b7189fSVlastimil Babka /* 395f40d1e42SMel Gorman * The zone lock must be held to isolate freepages. 396f40d1e42SMel Gorman * Unfortunately this is a very coarse lock and can be 397f40d1e42SMel Gorman * heavily contended if there are parallel allocations 398f40d1e42SMel Gorman * or parallel compactions. For async compaction do not 399f40d1e42SMel Gorman * spin on the lock and we acquire the lock as late as 400f40d1e42SMel Gorman * possible. 401f40d1e42SMel Gorman */ 4028b44d279SVlastimil Babka locked = compact_trylock_irqsave(&cc->zone->lock, 4038b44d279SVlastimil Babka &flags, cc); 404f40d1e42SMel Gorman if (!locked) 405f40d1e42SMel Gorman break; 406f40d1e42SMel Gorman 407f40d1e42SMel Gorman /* Recheck this is a buddy page under lock */ 408f40d1e42SMel Gorman if (!PageBuddy(page)) 4092af120bcSLaura Abbott goto isolate_fail; 41069b7189fSVlastimil Babka } 411748446bbSMel Gorman 412748446bbSMel Gorman /* Found a free page, break it into order-0 pages */ 413748446bbSMel Gorman isolated = split_free_page(page); 414748446bbSMel Gorman total_isolated += isolated; 415748446bbSMel Gorman for (i = 0; i < isolated; i++) { 416748446bbSMel Gorman list_add(&page->lru, freelist); 417748446bbSMel Gorman page++; 418748446bbSMel Gorman } 419748446bbSMel Gorman 420748446bbSMel Gorman /* If a page was split, advance to the end of it */ 421748446bbSMel Gorman if (isolated) { 422748446bbSMel Gorman blockpfn += isolated - 1; 423748446bbSMel Gorman cursor += isolated - 1; 4242af120bcSLaura Abbott continue; 425748446bbSMel Gorman } 4262af120bcSLaura Abbott 4272af120bcSLaura Abbott isolate_fail: 4282af120bcSLaura Abbott if (strict) 4292af120bcSLaura Abbott break; 4302af120bcSLaura Abbott else 4312af120bcSLaura Abbott continue; 4322af120bcSLaura Abbott 433748446bbSMel Gorman } 434748446bbSMel Gorman 435e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 436e34d85f0SJoonsoo Kim nr_scanned, total_isolated); 437e34d85f0SJoonsoo Kim 438e14c720eSVlastimil Babka /* Record how far we have got within the block */ 439e14c720eSVlastimil Babka *start_pfn = blockpfn; 440e14c720eSVlastimil Babka 441f40d1e42SMel Gorman /* 442f40d1e42SMel Gorman * If strict isolation is requested by CMA then check that all the 443f40d1e42SMel Gorman * pages requested were isolated. If there were any failures, 0 is 444f40d1e42SMel Gorman * returned and CMA will fail. 445f40d1e42SMel Gorman */ 4462af120bcSLaura Abbott if (strict && blockpfn < end_pfn) 447f40d1e42SMel Gorman total_isolated = 0; 448f40d1e42SMel Gorman 449f40d1e42SMel Gorman if (locked) 450f40d1e42SMel Gorman spin_unlock_irqrestore(&cc->zone->lock, flags); 451f40d1e42SMel Gorman 452bb13ffebSMel Gorman /* Update the pageblock-skip if the whole pageblock was scanned */ 453bb13ffebSMel Gorman if (blockpfn == end_pfn) 454edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, total_isolated, false); 455bb13ffebSMel Gorman 456010fc29aSMinchan Kim count_compact_events(COMPACTFREE_SCANNED, nr_scanned); 457397487dbSMel Gorman if (total_isolated) 458010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, total_isolated); 459748446bbSMel Gorman return total_isolated; 460748446bbSMel Gorman } 461748446bbSMel Gorman 46285aa125fSMichal Nazarewicz /** 46385aa125fSMichal Nazarewicz * isolate_freepages_range() - isolate free pages. 46485aa125fSMichal Nazarewicz * @start_pfn: The first PFN to start isolating. 46585aa125fSMichal Nazarewicz * @end_pfn: The one-past-last PFN. 46685aa125fSMichal Nazarewicz * 46785aa125fSMichal Nazarewicz * Non-free pages, invalid PFNs, or zone boundaries within the 46885aa125fSMichal Nazarewicz * [start_pfn, end_pfn) range are considered errors, cause function to 46985aa125fSMichal Nazarewicz * undo its actions and return zero. 47085aa125fSMichal Nazarewicz * 47185aa125fSMichal Nazarewicz * Otherwise, function returns one-past-the-last PFN of isolated page 47285aa125fSMichal Nazarewicz * (which may be greater then end_pfn if end fell in a middle of 47385aa125fSMichal Nazarewicz * a free page). 47485aa125fSMichal Nazarewicz */ 475ff9543fdSMichal Nazarewicz unsigned long 476bb13ffebSMel Gorman isolate_freepages_range(struct compact_control *cc, 477bb13ffebSMel Gorman unsigned long start_pfn, unsigned long end_pfn) 47885aa125fSMichal Nazarewicz { 479f40d1e42SMel Gorman unsigned long isolated, pfn, block_end_pfn; 48085aa125fSMichal Nazarewicz LIST_HEAD(freelist); 48185aa125fSMichal Nazarewicz 4827d49d886SVlastimil Babka pfn = start_pfn; 48385aa125fSMichal Nazarewicz block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 4847d49d886SVlastimil Babka 4857d49d886SVlastimil Babka for (; pfn < end_pfn; pfn += isolated, 4867d49d886SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 487e14c720eSVlastimil Babka /* Protect pfn from changing by isolate_freepages_block */ 488e14c720eSVlastimil Babka unsigned long isolate_start_pfn = pfn; 4897d49d886SVlastimil Babka 49085aa125fSMichal Nazarewicz block_end_pfn = min(block_end_pfn, end_pfn); 49185aa125fSMichal Nazarewicz 49258420016SJoonsoo Kim /* 49358420016SJoonsoo Kim * pfn could pass the block_end_pfn if isolated freepage 49458420016SJoonsoo Kim * is more than pageblock order. In this case, we adjust 49558420016SJoonsoo Kim * scanning range to right one. 49658420016SJoonsoo Kim */ 49758420016SJoonsoo Kim if (pfn >= block_end_pfn) { 49858420016SJoonsoo Kim block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 49958420016SJoonsoo Kim block_end_pfn = min(block_end_pfn, end_pfn); 50058420016SJoonsoo Kim } 50158420016SJoonsoo Kim 5027d49d886SVlastimil Babka if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 5037d49d886SVlastimil Babka break; 5047d49d886SVlastimil Babka 505e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 506e14c720eSVlastimil Babka block_end_pfn, &freelist, true); 50785aa125fSMichal Nazarewicz 50885aa125fSMichal Nazarewicz /* 50985aa125fSMichal Nazarewicz * In strict mode, isolate_freepages_block() returns 0 if 51085aa125fSMichal Nazarewicz * there are any holes in the block (ie. invalid PFNs or 51185aa125fSMichal Nazarewicz * non-free pages). 51285aa125fSMichal Nazarewicz */ 51385aa125fSMichal Nazarewicz if (!isolated) 51485aa125fSMichal Nazarewicz break; 51585aa125fSMichal Nazarewicz 51685aa125fSMichal Nazarewicz /* 51785aa125fSMichal Nazarewicz * If we managed to isolate pages, it is always (1 << n) * 51885aa125fSMichal Nazarewicz * pageblock_nr_pages for some non-negative n. (Max order 51985aa125fSMichal Nazarewicz * page may span two pageblocks). 52085aa125fSMichal Nazarewicz */ 52185aa125fSMichal Nazarewicz } 52285aa125fSMichal Nazarewicz 52385aa125fSMichal Nazarewicz /* split_free_page does not map the pages */ 52485aa125fSMichal Nazarewicz map_pages(&freelist); 52585aa125fSMichal Nazarewicz 52685aa125fSMichal Nazarewicz if (pfn < end_pfn) { 52785aa125fSMichal Nazarewicz /* Loop terminated early, cleanup. */ 52885aa125fSMichal Nazarewicz release_freepages(&freelist); 52985aa125fSMichal Nazarewicz return 0; 53085aa125fSMichal Nazarewicz } 53185aa125fSMichal Nazarewicz 53285aa125fSMichal Nazarewicz /* We don't use freelists for anything. */ 53385aa125fSMichal Nazarewicz return pfn; 53485aa125fSMichal Nazarewicz } 53585aa125fSMichal Nazarewicz 536748446bbSMel Gorman /* Update the number of anon and file isolated pages in the zone */ 537edc2ca61SVlastimil Babka static void acct_isolated(struct zone *zone, struct compact_control *cc) 538748446bbSMel Gorman { 539748446bbSMel Gorman struct page *page; 540b9e84ac1SMinchan Kim unsigned int count[2] = { 0, }; 541748446bbSMel Gorman 542edc2ca61SVlastimil Babka if (list_empty(&cc->migratepages)) 543edc2ca61SVlastimil Babka return; 544edc2ca61SVlastimil Babka 545b9e84ac1SMinchan Kim list_for_each_entry(page, &cc->migratepages, lru) 546b9e84ac1SMinchan Kim count[!!page_is_file_cache(page)]++; 547748446bbSMel Gorman 548c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 549c67fe375SMel Gorman mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 550c67fe375SMel Gorman } 551748446bbSMel Gorman 552748446bbSMel Gorman /* Similar to reclaim, but different enough that they don't share logic */ 553748446bbSMel Gorman static bool too_many_isolated(struct zone *zone) 554748446bbSMel Gorman { 555bc693045SMinchan Kim unsigned long active, inactive, isolated; 556748446bbSMel Gorman 557748446bbSMel Gorman inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 558748446bbSMel Gorman zone_page_state(zone, NR_INACTIVE_ANON); 559bc693045SMinchan Kim active = zone_page_state(zone, NR_ACTIVE_FILE) + 560bc693045SMinchan Kim zone_page_state(zone, NR_ACTIVE_ANON); 561748446bbSMel Gorman isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 562748446bbSMel Gorman zone_page_state(zone, NR_ISOLATED_ANON); 563748446bbSMel Gorman 564bc693045SMinchan Kim return isolated > (inactive + active) / 2; 565748446bbSMel Gorman } 566748446bbSMel Gorman 5672fe86e00SMichal Nazarewicz /** 568edc2ca61SVlastimil Babka * isolate_migratepages_block() - isolate all migrate-able pages within 569edc2ca61SVlastimil Babka * a single pageblock 5702fe86e00SMichal Nazarewicz * @cc: Compaction control structure. 571edc2ca61SVlastimil Babka * @low_pfn: The first PFN to isolate 572edc2ca61SVlastimil Babka * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 573edc2ca61SVlastimil Babka * @isolate_mode: Isolation mode to be used. 5742fe86e00SMichal Nazarewicz * 5752fe86e00SMichal Nazarewicz * Isolate all pages that can be migrated from the range specified by 576edc2ca61SVlastimil Babka * [low_pfn, end_pfn). The range is expected to be within same pageblock. 577edc2ca61SVlastimil Babka * Returns zero if there is a fatal signal pending, otherwise PFN of the 578edc2ca61SVlastimil Babka * first page that was not scanned (which may be both less, equal to or more 579edc2ca61SVlastimil Babka * than end_pfn). 5802fe86e00SMichal Nazarewicz * 581edc2ca61SVlastimil Babka * The pages are isolated on cc->migratepages list (not required to be empty), 582edc2ca61SVlastimil Babka * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field 583edc2ca61SVlastimil Babka * is neither read nor updated. 584748446bbSMel Gorman */ 585edc2ca61SVlastimil Babka static unsigned long 586edc2ca61SVlastimil Babka isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 587edc2ca61SVlastimil Babka unsigned long end_pfn, isolate_mode_t isolate_mode) 588748446bbSMel Gorman { 589edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 590b7aba698SMel Gorman unsigned long nr_scanned = 0, nr_isolated = 0; 591748446bbSMel Gorman struct list_head *migratelist = &cc->migratepages; 592fa9add64SHugh Dickins struct lruvec *lruvec; 593b8b2d825SXiubo Li unsigned long flags = 0; 5942a1402aaSMel Gorman bool locked = false; 595bb13ffebSMel Gorman struct page *page = NULL, *valid_page = NULL; 596e34d85f0SJoonsoo Kim unsigned long start_pfn = low_pfn; 597748446bbSMel Gorman 598748446bbSMel Gorman /* 599748446bbSMel Gorman * Ensure that there are not too many pages isolated from the LRU 600748446bbSMel Gorman * list by either parallel reclaimers or compaction. If there are, 601748446bbSMel Gorman * delay for some time until fewer pages are isolated 602748446bbSMel Gorman */ 603748446bbSMel Gorman while (unlikely(too_many_isolated(zone))) { 604f9e35b3bSMel Gorman /* async migration should just abort */ 605e0b9daebSDavid Rientjes if (cc->mode == MIGRATE_ASYNC) 6062fe86e00SMichal Nazarewicz return 0; 607f9e35b3bSMel Gorman 608748446bbSMel Gorman congestion_wait(BLK_RW_ASYNC, HZ/10); 609748446bbSMel Gorman 610748446bbSMel Gorman if (fatal_signal_pending(current)) 6112fe86e00SMichal Nazarewicz return 0; 612748446bbSMel Gorman } 613748446bbSMel Gorman 614be976572SVlastimil Babka if (compact_should_abort(cc)) 615aeef4b83SDavid Rientjes return 0; 616aeef4b83SDavid Rientjes 617748446bbSMel Gorman /* Time to isolate some pages for migration */ 618748446bbSMel Gorman for (; low_pfn < end_pfn; low_pfn++) { 6198b44d279SVlastimil Babka /* 6208b44d279SVlastimil Babka * Periodically drop the lock (if held) regardless of its 6218b44d279SVlastimil Babka * contention, to give chance to IRQs. Abort async compaction 6228b44d279SVlastimil Babka * if contended. 6238b44d279SVlastimil Babka */ 6248b44d279SVlastimil Babka if (!(low_pfn % SWAP_CLUSTER_MAX) 6258b44d279SVlastimil Babka && compact_unlock_should_abort(&zone->lru_lock, flags, 6268b44d279SVlastimil Babka &locked, cc)) 6278b44d279SVlastimil Babka break; 628b2eef8c0SAndrea Arcangeli 629748446bbSMel Gorman if (!pfn_valid_within(low_pfn)) 630748446bbSMel Gorman continue; 631b7aba698SMel Gorman nr_scanned++; 632748446bbSMel Gorman 633748446bbSMel Gorman page = pfn_to_page(low_pfn); 634dc908600SMel Gorman 635bb13ffebSMel Gorman if (!valid_page) 636bb13ffebSMel Gorman valid_page = page; 637bb13ffebSMel Gorman 638c122b208SJoonsoo Kim /* 63999c0fd5eSVlastimil Babka * Skip if free. We read page order here without zone lock 64099c0fd5eSVlastimil Babka * which is generally unsafe, but the race window is small and 64199c0fd5eSVlastimil Babka * the worst thing that can happen is that we skip some 64299c0fd5eSVlastimil Babka * potential isolation targets. 6436c14466cSMel Gorman */ 64499c0fd5eSVlastimil Babka if (PageBuddy(page)) { 64599c0fd5eSVlastimil Babka unsigned long freepage_order = page_order_unsafe(page); 64699c0fd5eSVlastimil Babka 64799c0fd5eSVlastimil Babka /* 64899c0fd5eSVlastimil Babka * Without lock, we cannot be sure that what we got is 64999c0fd5eSVlastimil Babka * a valid page order. Consider only values in the 65099c0fd5eSVlastimil Babka * valid order range to prevent low_pfn overflow. 65199c0fd5eSVlastimil Babka */ 65299c0fd5eSVlastimil Babka if (freepage_order > 0 && freepage_order < MAX_ORDER) 65399c0fd5eSVlastimil Babka low_pfn += (1UL << freepage_order) - 1; 654748446bbSMel Gorman continue; 65599c0fd5eSVlastimil Babka } 656748446bbSMel Gorman 6579927af74SMel Gorman /* 658bf6bddf1SRafael Aquini * Check may be lockless but that's ok as we recheck later. 659bf6bddf1SRafael Aquini * It's possible to migrate LRU pages and balloon pages 660bf6bddf1SRafael Aquini * Skip any other type of page 661bf6bddf1SRafael Aquini */ 662bf6bddf1SRafael Aquini if (!PageLRU(page)) { 663bf6bddf1SRafael Aquini if (unlikely(balloon_page_movable(page))) { 664d6d86c0aSKonstantin Khlebnikov if (balloon_page_isolate(page)) { 665bf6bddf1SRafael Aquini /* Successfully isolated */ 666b6c75016SJoonsoo Kim goto isolate_success; 667bf6bddf1SRafael Aquini } 668bf6bddf1SRafael Aquini } 669bc835011SAndrea Arcangeli continue; 670bf6bddf1SRafael Aquini } 671bc835011SAndrea Arcangeli 672bc835011SAndrea Arcangeli /* 6732a1402aaSMel Gorman * PageLRU is set. lru_lock normally excludes isolation 6742a1402aaSMel Gorman * splitting and collapsing (collapsing has already happened 6752a1402aaSMel Gorman * if PageLRU is set) but the lock is not necessarily taken 6762a1402aaSMel Gorman * here and it is wasteful to take it just to check transhuge. 6772a1402aaSMel Gorman * Check TransHuge without lock and skip the whole pageblock if 6782a1402aaSMel Gorman * it's either a transhuge or hugetlbfs page, as calling 6792a1402aaSMel Gorman * compound_order() without preventing THP from splitting the 6802a1402aaSMel Gorman * page underneath us may return surprising results. 681bc835011SAndrea Arcangeli */ 682bc835011SAndrea Arcangeli if (PageTransHuge(page)) { 6832a1402aaSMel Gorman if (!locked) 684edc2ca61SVlastimil Babka low_pfn = ALIGN(low_pfn + 1, 685edc2ca61SVlastimil Babka pageblock_nr_pages) - 1; 686edc2ca61SVlastimil Babka else 6872a1402aaSMel Gorman low_pfn += (1 << compound_order(page)) - 1; 688edc2ca61SVlastimil Babka 6892a1402aaSMel Gorman continue; 6902a1402aaSMel Gorman } 6912a1402aaSMel Gorman 692119d6d59SDavid Rientjes /* 693119d6d59SDavid Rientjes * Migration will fail if an anonymous page is pinned in memory, 694119d6d59SDavid Rientjes * so avoid taking lru_lock and isolating it unnecessarily in an 695119d6d59SDavid Rientjes * admittedly racy check. 696119d6d59SDavid Rientjes */ 697119d6d59SDavid Rientjes if (!page_mapping(page) && 698119d6d59SDavid Rientjes page_count(page) > page_mapcount(page)) 699119d6d59SDavid Rientjes continue; 700119d6d59SDavid Rientjes 70169b7189fSVlastimil Babka /* If we already hold the lock, we can skip some rechecking */ 70269b7189fSVlastimil Babka if (!locked) { 7038b44d279SVlastimil Babka locked = compact_trylock_irqsave(&zone->lru_lock, 7048b44d279SVlastimil Babka &flags, cc); 7058b44d279SVlastimil Babka if (!locked) 7062a1402aaSMel Gorman break; 7072a1402aaSMel Gorman 7082a1402aaSMel Gorman /* Recheck PageLRU and PageTransHuge under lock */ 7092a1402aaSMel Gorman if (!PageLRU(page)) 7102a1402aaSMel Gorman continue; 7112a1402aaSMel Gorman if (PageTransHuge(page)) { 712bc835011SAndrea Arcangeli low_pfn += (1 << compound_order(page)) - 1; 713bc835011SAndrea Arcangeli continue; 714bc835011SAndrea Arcangeli } 71569b7189fSVlastimil Babka } 716bc835011SAndrea Arcangeli 717fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 718fa9add64SHugh Dickins 719748446bbSMel Gorman /* Try isolate the page */ 720edc2ca61SVlastimil Babka if (__isolate_lru_page(page, isolate_mode) != 0) 721748446bbSMel Gorman continue; 722748446bbSMel Gorman 723309381feSSasha Levin VM_BUG_ON_PAGE(PageTransCompound(page), page); 724bc835011SAndrea Arcangeli 725748446bbSMel Gorman /* Successfully isolated */ 726fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_lru(page)); 727b6c75016SJoonsoo Kim 728b6c75016SJoonsoo Kim isolate_success: 729748446bbSMel Gorman list_add(&page->lru, migratelist); 730748446bbSMel Gorman cc->nr_migratepages++; 731b7aba698SMel Gorman nr_isolated++; 732748446bbSMel Gorman 733748446bbSMel Gorman /* Avoid isolating too much */ 73431b8384aSHillf Danton if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 73531b8384aSHillf Danton ++low_pfn; 736748446bbSMel Gorman break; 737748446bbSMel Gorman } 73831b8384aSHillf Danton } 739748446bbSMel Gorman 74099c0fd5eSVlastimil Babka /* 74199c0fd5eSVlastimil Babka * The PageBuddy() check could have potentially brought us outside 74299c0fd5eSVlastimil Babka * the range to be scanned. 74399c0fd5eSVlastimil Babka */ 74499c0fd5eSVlastimil Babka if (unlikely(low_pfn > end_pfn)) 74599c0fd5eSVlastimil Babka low_pfn = end_pfn; 74699c0fd5eSVlastimil Babka 747c67fe375SMel Gorman if (locked) 748c67fe375SMel Gorman spin_unlock_irqrestore(&zone->lru_lock, flags); 749748446bbSMel Gorman 75050b5b094SVlastimil Babka /* 75150b5b094SVlastimil Babka * Update the pageblock-skip information and cached scanner pfn, 75250b5b094SVlastimil Babka * if the whole pageblock was scanned without isolating any page. 75350b5b094SVlastimil Babka */ 75435979ef3SDavid Rientjes if (low_pfn == end_pfn) 755edc2ca61SVlastimil Babka update_pageblock_skip(cc, valid_page, nr_isolated, true); 756bb13ffebSMel Gorman 757e34d85f0SJoonsoo Kim trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 758e34d85f0SJoonsoo Kim nr_scanned, nr_isolated); 759b7aba698SMel Gorman 760010fc29aSMinchan Kim count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned); 761397487dbSMel Gorman if (nr_isolated) 762010fc29aSMinchan Kim count_compact_events(COMPACTISOLATED, nr_isolated); 763397487dbSMel Gorman 7642fe86e00SMichal Nazarewicz return low_pfn; 7652fe86e00SMichal Nazarewicz } 7662fe86e00SMichal Nazarewicz 767edc2ca61SVlastimil Babka /** 768edc2ca61SVlastimil Babka * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 769edc2ca61SVlastimil Babka * @cc: Compaction control structure. 770edc2ca61SVlastimil Babka * @start_pfn: The first PFN to start isolating. 771edc2ca61SVlastimil Babka * @end_pfn: The one-past-last PFN. 772edc2ca61SVlastimil Babka * 773edc2ca61SVlastimil Babka * Returns zero if isolation fails fatally due to e.g. pending signal. 774edc2ca61SVlastimil Babka * Otherwise, function returns one-past-the-last PFN of isolated page 775edc2ca61SVlastimil Babka * (which may be greater than end_pfn if end fell in a middle of a THP page). 776edc2ca61SVlastimil Babka */ 777edc2ca61SVlastimil Babka unsigned long 778edc2ca61SVlastimil Babka isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 779edc2ca61SVlastimil Babka unsigned long end_pfn) 780edc2ca61SVlastimil Babka { 781edc2ca61SVlastimil Babka unsigned long pfn, block_end_pfn; 782edc2ca61SVlastimil Babka 783edc2ca61SVlastimil Babka /* Scan block by block. First and last block may be incomplete */ 784edc2ca61SVlastimil Babka pfn = start_pfn; 785edc2ca61SVlastimil Babka block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 786edc2ca61SVlastimil Babka 787edc2ca61SVlastimil Babka for (; pfn < end_pfn; pfn = block_end_pfn, 788edc2ca61SVlastimil Babka block_end_pfn += pageblock_nr_pages) { 789edc2ca61SVlastimil Babka 790edc2ca61SVlastimil Babka block_end_pfn = min(block_end_pfn, end_pfn); 791edc2ca61SVlastimil Babka 7927d49d886SVlastimil Babka if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 793edc2ca61SVlastimil Babka continue; 794edc2ca61SVlastimil Babka 795edc2ca61SVlastimil Babka pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 796edc2ca61SVlastimil Babka ISOLATE_UNEVICTABLE); 797edc2ca61SVlastimil Babka 798edc2ca61SVlastimil Babka /* 799edc2ca61SVlastimil Babka * In case of fatal failure, release everything that might 800edc2ca61SVlastimil Babka * have been isolated in the previous iteration, and signal 801edc2ca61SVlastimil Babka * the failure back to caller. 802edc2ca61SVlastimil Babka */ 803edc2ca61SVlastimil Babka if (!pfn) { 804edc2ca61SVlastimil Babka putback_movable_pages(&cc->migratepages); 805edc2ca61SVlastimil Babka cc->nr_migratepages = 0; 806edc2ca61SVlastimil Babka break; 807edc2ca61SVlastimil Babka } 8086ea41c0cSJoonsoo Kim 8096ea41c0cSJoonsoo Kim if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) 8106ea41c0cSJoonsoo Kim break; 811edc2ca61SVlastimil Babka } 812edc2ca61SVlastimil Babka acct_isolated(cc->zone, cc); 813edc2ca61SVlastimil Babka 814edc2ca61SVlastimil Babka return pfn; 815edc2ca61SVlastimil Babka } 816edc2ca61SVlastimil Babka 817ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 818ff9543fdSMichal Nazarewicz #ifdef CONFIG_COMPACTION 819ff9543fdSMichal Nazarewicz /* 820ff9543fdSMichal Nazarewicz * Based on information in the current compact_control, find blocks 821ff9543fdSMichal Nazarewicz * suitable for isolating free pages from and then isolate them. 822ff9543fdSMichal Nazarewicz */ 823edc2ca61SVlastimil Babka static void isolate_freepages(struct compact_control *cc) 824ff9543fdSMichal Nazarewicz { 825edc2ca61SVlastimil Babka struct zone *zone = cc->zone; 826ff9543fdSMichal Nazarewicz struct page *page; 827c96b9e50SVlastimil Babka unsigned long block_start_pfn; /* start of current pageblock */ 828e14c720eSVlastimil Babka unsigned long isolate_start_pfn; /* exact pfn we start at */ 829c96b9e50SVlastimil Babka unsigned long block_end_pfn; /* end of current pageblock */ 830c96b9e50SVlastimil Babka unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 831ff9543fdSMichal Nazarewicz int nr_freepages = cc->nr_freepages; 832ff9543fdSMichal Nazarewicz struct list_head *freelist = &cc->freepages; 8332fe86e00SMichal Nazarewicz 834ff9543fdSMichal Nazarewicz /* 835ff9543fdSMichal Nazarewicz * Initialise the free scanner. The starting point is where we last 83649e068f0SVlastimil Babka * successfully isolated from, zone-cached value, or the end of the 837e14c720eSVlastimil Babka * zone when isolating for the first time. For looping we also need 838e14c720eSVlastimil Babka * this pfn aligned down to the pageblock boundary, because we do 839c96b9e50SVlastimil Babka * block_start_pfn -= pageblock_nr_pages in the for loop. 840c96b9e50SVlastimil Babka * For ending point, take care when isolating in last pageblock of a 841c96b9e50SVlastimil Babka * a zone which ends in the middle of a pageblock. 84249e068f0SVlastimil Babka * The low boundary is the end of the pageblock the migration scanner 84349e068f0SVlastimil Babka * is using. 844ff9543fdSMichal Nazarewicz */ 845e14c720eSVlastimil Babka isolate_start_pfn = cc->free_pfn; 846c96b9e50SVlastimil Babka block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1); 847c96b9e50SVlastimil Babka block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 848c96b9e50SVlastimil Babka zone_end_pfn(zone)); 8497ed695e0SVlastimil Babka low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); 8502fe86e00SMichal Nazarewicz 851ff9543fdSMichal Nazarewicz /* 852ff9543fdSMichal Nazarewicz * Isolate free pages until enough are available to migrate the 853ff9543fdSMichal Nazarewicz * pages on cc->migratepages. We stop searching if the migrate 854ff9543fdSMichal Nazarewicz * and free page scanners meet or enough free pages are isolated. 855ff9543fdSMichal Nazarewicz */ 856c96b9e50SVlastimil Babka for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages; 857c96b9e50SVlastimil Babka block_end_pfn = block_start_pfn, 858e14c720eSVlastimil Babka block_start_pfn -= pageblock_nr_pages, 859e14c720eSVlastimil Babka isolate_start_pfn = block_start_pfn) { 860ff9543fdSMichal Nazarewicz unsigned long isolated; 861ff9543fdSMichal Nazarewicz 862f6ea3adbSDavid Rientjes /* 863f6ea3adbSDavid Rientjes * This can iterate a massively long zone without finding any 864f6ea3adbSDavid Rientjes * suitable migration targets, so periodically check if we need 865be976572SVlastimil Babka * to schedule, or even abort async compaction. 866f6ea3adbSDavid Rientjes */ 867be976572SVlastimil Babka if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 868be976572SVlastimil Babka && compact_should_abort(cc)) 869be976572SVlastimil Babka break; 870f6ea3adbSDavid Rientjes 8717d49d886SVlastimil Babka page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 8727d49d886SVlastimil Babka zone); 8737d49d886SVlastimil Babka if (!page) 874ff9543fdSMichal Nazarewicz continue; 875ff9543fdSMichal Nazarewicz 876ff9543fdSMichal Nazarewicz /* Check the block is suitable for migration */ 87768e3e926SLinus Torvalds if (!suitable_migration_target(page)) 878ff9543fdSMichal Nazarewicz continue; 87968e3e926SLinus Torvalds 880bb13ffebSMel Gorman /* If isolation recently failed, do not retry */ 881bb13ffebSMel Gorman if (!isolation_suitable(cc, page)) 882bb13ffebSMel Gorman continue; 883bb13ffebSMel Gorman 884e14c720eSVlastimil Babka /* Found a block suitable for isolating free pages from. */ 885e14c720eSVlastimil Babka isolated = isolate_freepages_block(cc, &isolate_start_pfn, 886c96b9e50SVlastimil Babka block_end_pfn, freelist, false); 887ff9543fdSMichal Nazarewicz nr_freepages += isolated; 888ff9543fdSMichal Nazarewicz 889ff9543fdSMichal Nazarewicz /* 890e14c720eSVlastimil Babka * Remember where the free scanner should restart next time, 891e14c720eSVlastimil Babka * which is where isolate_freepages_block() left off. 892e14c720eSVlastimil Babka * But if it scanned the whole pageblock, isolate_start_pfn 893e14c720eSVlastimil Babka * now points at block_end_pfn, which is the start of the next 894e14c720eSVlastimil Babka * pageblock. 895e14c720eSVlastimil Babka * In that case we will however want to restart at the start 896e14c720eSVlastimil Babka * of the previous pageblock. 897e14c720eSVlastimil Babka */ 898e14c720eSVlastimil Babka cc->free_pfn = (isolate_start_pfn < block_end_pfn) ? 899e14c720eSVlastimil Babka isolate_start_pfn : 900e14c720eSVlastimil Babka block_start_pfn - pageblock_nr_pages; 901e14c720eSVlastimil Babka 902e14c720eSVlastimil Babka /* 903be976572SVlastimil Babka * isolate_freepages_block() might have aborted due to async 904be976572SVlastimil Babka * compaction being contended 905be976572SVlastimil Babka */ 906be976572SVlastimil Babka if (cc->contended) 907be976572SVlastimil Babka break; 908c89511abSMel Gorman } 909ff9543fdSMichal Nazarewicz 910ff9543fdSMichal Nazarewicz /* split_free_page does not map the pages */ 911ff9543fdSMichal Nazarewicz map_pages(freelist); 912ff9543fdSMichal Nazarewicz 9137ed695e0SVlastimil Babka /* 9147ed695e0SVlastimil Babka * If we crossed the migrate scanner, we want to keep it that way 9157ed695e0SVlastimil Babka * so that compact_finished() may detect this 9167ed695e0SVlastimil Babka */ 917c96b9e50SVlastimil Babka if (block_start_pfn < low_pfn) 918e9ade569SVlastimil Babka cc->free_pfn = cc->migrate_pfn; 919c96b9e50SVlastimil Babka 920ff9543fdSMichal Nazarewicz cc->nr_freepages = nr_freepages; 921748446bbSMel Gorman } 922748446bbSMel Gorman 923748446bbSMel Gorman /* 924748446bbSMel Gorman * This is a migrate-callback that "allocates" freepages by taking pages 925748446bbSMel Gorman * from the isolated freelists in the block we are migrating to. 926748446bbSMel Gorman */ 927748446bbSMel Gorman static struct page *compaction_alloc(struct page *migratepage, 928748446bbSMel Gorman unsigned long data, 929748446bbSMel Gorman int **result) 930748446bbSMel Gorman { 931748446bbSMel Gorman struct compact_control *cc = (struct compact_control *)data; 932748446bbSMel Gorman struct page *freepage; 933748446bbSMel Gorman 934be976572SVlastimil Babka /* 935be976572SVlastimil Babka * Isolate free pages if necessary, and if we are not aborting due to 936be976572SVlastimil Babka * contention. 937be976572SVlastimil Babka */ 938748446bbSMel Gorman if (list_empty(&cc->freepages)) { 939be976572SVlastimil Babka if (!cc->contended) 940edc2ca61SVlastimil Babka isolate_freepages(cc); 941748446bbSMel Gorman 942748446bbSMel Gorman if (list_empty(&cc->freepages)) 943748446bbSMel Gorman return NULL; 944748446bbSMel Gorman } 945748446bbSMel Gorman 946748446bbSMel Gorman freepage = list_entry(cc->freepages.next, struct page, lru); 947748446bbSMel Gorman list_del(&freepage->lru); 948748446bbSMel Gorman cc->nr_freepages--; 949748446bbSMel Gorman 950748446bbSMel Gorman return freepage; 951748446bbSMel Gorman } 952748446bbSMel Gorman 953748446bbSMel Gorman /* 954d53aea3dSDavid Rientjes * This is a migrate-callback that "frees" freepages back to the isolated 955d53aea3dSDavid Rientjes * freelist. All pages on the freelist are from the same zone, so there is no 956d53aea3dSDavid Rientjes * special handling needed for NUMA. 957d53aea3dSDavid Rientjes */ 958d53aea3dSDavid Rientjes static void compaction_free(struct page *page, unsigned long data) 959d53aea3dSDavid Rientjes { 960d53aea3dSDavid Rientjes struct compact_control *cc = (struct compact_control *)data; 961d53aea3dSDavid Rientjes 962d53aea3dSDavid Rientjes list_add(&page->lru, &cc->freepages); 963d53aea3dSDavid Rientjes cc->nr_freepages++; 964d53aea3dSDavid Rientjes } 965d53aea3dSDavid Rientjes 966ff9543fdSMichal Nazarewicz /* possible outcome of isolate_migratepages */ 967ff9543fdSMichal Nazarewicz typedef enum { 968ff9543fdSMichal Nazarewicz ISOLATE_ABORT, /* Abort compaction now */ 969ff9543fdSMichal Nazarewicz ISOLATE_NONE, /* No pages isolated, continue scanning */ 970ff9543fdSMichal Nazarewicz ISOLATE_SUCCESS, /* Pages isolated, migrate */ 971ff9543fdSMichal Nazarewicz } isolate_migrate_t; 972ff9543fdSMichal Nazarewicz 973ff9543fdSMichal Nazarewicz /* 974edc2ca61SVlastimil Babka * Isolate all pages that can be migrated from the first suitable block, 975edc2ca61SVlastimil Babka * starting at the block pointed to by the migrate scanner pfn within 976edc2ca61SVlastimil Babka * compact_control. 977ff9543fdSMichal Nazarewicz */ 978ff9543fdSMichal Nazarewicz static isolate_migrate_t isolate_migratepages(struct zone *zone, 979ff9543fdSMichal Nazarewicz struct compact_control *cc) 980ff9543fdSMichal Nazarewicz { 981ff9543fdSMichal Nazarewicz unsigned long low_pfn, end_pfn; 982edc2ca61SVlastimil Babka struct page *page; 983edc2ca61SVlastimil Babka const isolate_mode_t isolate_mode = 984edc2ca61SVlastimil Babka (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); 985ff9543fdSMichal Nazarewicz 986edc2ca61SVlastimil Babka /* 987edc2ca61SVlastimil Babka * Start at where we last stopped, or beginning of the zone as 988edc2ca61SVlastimil Babka * initialized by compact_zone() 989edc2ca61SVlastimil Babka */ 990edc2ca61SVlastimil Babka low_pfn = cc->migrate_pfn; 991ff9543fdSMichal Nazarewicz 992ff9543fdSMichal Nazarewicz /* Only scan within a pageblock boundary */ 993a9aacbccSMel Gorman end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); 994ff9543fdSMichal Nazarewicz 995edc2ca61SVlastimil Babka /* 996edc2ca61SVlastimil Babka * Iterate over whole pageblocks until we find the first suitable. 997edc2ca61SVlastimil Babka * Do not cross the free scanner. 998edc2ca61SVlastimil Babka */ 999edc2ca61SVlastimil Babka for (; end_pfn <= cc->free_pfn; 1000edc2ca61SVlastimil Babka low_pfn = end_pfn, end_pfn += pageblock_nr_pages) { 1001edc2ca61SVlastimil Babka 1002edc2ca61SVlastimil Babka /* 1003edc2ca61SVlastimil Babka * This can potentially iterate a massively long zone with 1004edc2ca61SVlastimil Babka * many pageblocks unsuitable, so periodically check if we 1005edc2ca61SVlastimil Babka * need to schedule, or even abort async compaction. 1006edc2ca61SVlastimil Babka */ 1007edc2ca61SVlastimil Babka if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 1008edc2ca61SVlastimil Babka && compact_should_abort(cc)) 1009edc2ca61SVlastimil Babka break; 1010edc2ca61SVlastimil Babka 10117d49d886SVlastimil Babka page = pageblock_pfn_to_page(low_pfn, end_pfn, zone); 10127d49d886SVlastimil Babka if (!page) 1013edc2ca61SVlastimil Babka continue; 1014edc2ca61SVlastimil Babka 1015edc2ca61SVlastimil Babka /* If isolation recently failed, do not retry */ 1016edc2ca61SVlastimil Babka if (!isolation_suitable(cc, page)) 1017edc2ca61SVlastimil Babka continue; 1018edc2ca61SVlastimil Babka 1019edc2ca61SVlastimil Babka /* 1020edc2ca61SVlastimil Babka * For async compaction, also only scan in MOVABLE blocks. 1021edc2ca61SVlastimil Babka * Async compaction is optimistic to see if the minimum amount 1022edc2ca61SVlastimil Babka * of work satisfies the allocation. 1023edc2ca61SVlastimil Babka */ 1024edc2ca61SVlastimil Babka if (cc->mode == MIGRATE_ASYNC && 1025edc2ca61SVlastimil Babka !migrate_async_suitable(get_pageblock_migratetype(page))) 1026edc2ca61SVlastimil Babka continue; 1027ff9543fdSMichal Nazarewicz 1028ff9543fdSMichal Nazarewicz /* Perform the isolation */ 1029edc2ca61SVlastimil Babka low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn, 1030edc2ca61SVlastimil Babka isolate_mode); 1031edc2ca61SVlastimil Babka 1032e64c5237SShaohua Li if (!low_pfn || cc->contended) 1033ff9543fdSMichal Nazarewicz return ISOLATE_ABORT; 1034ff9543fdSMichal Nazarewicz 1035edc2ca61SVlastimil Babka /* 1036edc2ca61SVlastimil Babka * Either we isolated something and proceed with migration. Or 1037edc2ca61SVlastimil Babka * we failed and compact_zone should decide if we should 1038edc2ca61SVlastimil Babka * continue or not. 1039edc2ca61SVlastimil Babka */ 1040edc2ca61SVlastimil Babka break; 1041edc2ca61SVlastimil Babka } 1042edc2ca61SVlastimil Babka 1043edc2ca61SVlastimil Babka acct_isolated(zone, cc); 10441d5bfe1fSVlastimil Babka /* 10451d5bfe1fSVlastimil Babka * Record where migration scanner will be restarted. If we end up in 10461d5bfe1fSVlastimil Babka * the same pageblock as the free scanner, make the scanners fully 10471d5bfe1fSVlastimil Babka * meet so that compact_finished() terminates compaction. 10481d5bfe1fSVlastimil Babka */ 10491d5bfe1fSVlastimil Babka cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn; 1050ff9543fdSMichal Nazarewicz 1051edc2ca61SVlastimil Babka return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1052ff9543fdSMichal Nazarewicz } 1053ff9543fdSMichal Nazarewicz 1054*837d026dSJoonsoo Kim static int __compact_finished(struct zone *zone, struct compact_control *cc, 10556d7ce559SDavid Rientjes const int migratetype) 1056748446bbSMel Gorman { 10578fb74b9fSMel Gorman unsigned int order; 10585a03b051SAndrea Arcangeli unsigned long watermark; 105956de7263SMel Gorman 1060be976572SVlastimil Babka if (cc->contended || fatal_signal_pending(current)) 1061748446bbSMel Gorman return COMPACT_PARTIAL; 1062748446bbSMel Gorman 1063753341a4SMel Gorman /* Compaction run completes if the migrate and free scanner meet */ 1064bb13ffebSMel Gorman if (cc->free_pfn <= cc->migrate_pfn) { 106555b7c4c9SVlastimil Babka /* Let the next compaction start anew. */ 106635979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 106735979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 106855b7c4c9SVlastimil Babka zone->compact_cached_free_pfn = zone_end_pfn(zone); 106955b7c4c9SVlastimil Babka 107062997027SMel Gorman /* 107162997027SMel Gorman * Mark that the PG_migrate_skip information should be cleared 107262997027SMel Gorman * by kswapd when it goes to sleep. kswapd does not set the 107362997027SMel Gorman * flag itself as the decision to be clear should be directly 107462997027SMel Gorman * based on an allocation request. 107562997027SMel Gorman */ 107662997027SMel Gorman if (!current_is_kswapd()) 107762997027SMel Gorman zone->compact_blockskip_flush = true; 107862997027SMel Gorman 1079748446bbSMel Gorman return COMPACT_COMPLETE; 1080bb13ffebSMel Gorman } 1081748446bbSMel Gorman 108282478fb7SJohannes Weiner /* 108382478fb7SJohannes Weiner * order == -1 is expected when compacting via 108482478fb7SJohannes Weiner * /proc/sys/vm/compact_memory 108582478fb7SJohannes Weiner */ 108656de7263SMel Gorman if (cc->order == -1) 108756de7263SMel Gorman return COMPACT_CONTINUE; 108856de7263SMel Gorman 10893957c776SMichal Hocko /* Compaction run is not finished if the watermark is not met */ 10903957c776SMichal Hocko watermark = low_wmark_pages(zone); 10913957c776SMichal Hocko 1092ebff3980SVlastimil Babka if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx, 1093ebff3980SVlastimil Babka cc->alloc_flags)) 10943957c776SMichal Hocko return COMPACT_CONTINUE; 10953957c776SMichal Hocko 109656de7263SMel Gorman /* Direct compactor: Is a suitable page free? */ 109756de7263SMel Gorman for (order = cc->order; order < MAX_ORDER; order++) { 10988fb74b9fSMel Gorman struct free_area *area = &zone->free_area[order]; 10998fb74b9fSMel Gorman 110056de7263SMel Gorman /* Job done if page is free of the right migratetype */ 11016d7ce559SDavid Rientjes if (!list_empty(&area->free_list[migratetype])) 110256de7263SMel Gorman return COMPACT_PARTIAL; 110356de7263SMel Gorman 110456de7263SMel Gorman /* Job done if allocation would set block type */ 11051fb3f8caSMel Gorman if (cc->order >= pageblock_order && area->nr_free) 110656de7263SMel Gorman return COMPACT_PARTIAL; 110756de7263SMel Gorman } 110856de7263SMel Gorman 1109*837d026dSJoonsoo Kim return COMPACT_NO_SUITABLE_PAGE; 1110*837d026dSJoonsoo Kim } 1111*837d026dSJoonsoo Kim 1112*837d026dSJoonsoo Kim static int compact_finished(struct zone *zone, struct compact_control *cc, 1113*837d026dSJoonsoo Kim const int migratetype) 1114*837d026dSJoonsoo Kim { 1115*837d026dSJoonsoo Kim int ret; 1116*837d026dSJoonsoo Kim 1117*837d026dSJoonsoo Kim ret = __compact_finished(zone, cc, migratetype); 1118*837d026dSJoonsoo Kim trace_mm_compaction_finished(zone, cc->order, ret); 1119*837d026dSJoonsoo Kim if (ret == COMPACT_NO_SUITABLE_PAGE) 1120*837d026dSJoonsoo Kim ret = COMPACT_CONTINUE; 1121*837d026dSJoonsoo Kim 1122*837d026dSJoonsoo Kim return ret; 1123748446bbSMel Gorman } 1124748446bbSMel Gorman 11253e7d3449SMel Gorman /* 11263e7d3449SMel Gorman * compaction_suitable: Is this suitable to run compaction on this zone now? 11273e7d3449SMel Gorman * Returns 11283e7d3449SMel Gorman * COMPACT_SKIPPED - If there are too few free pages for compaction 11293e7d3449SMel Gorman * COMPACT_PARTIAL - If the allocation would succeed without compaction 11303e7d3449SMel Gorman * COMPACT_CONTINUE - If compaction should run now 11313e7d3449SMel Gorman */ 1132*837d026dSJoonsoo Kim static unsigned long __compaction_suitable(struct zone *zone, int order, 1133ebff3980SVlastimil Babka int alloc_flags, int classzone_idx) 11343e7d3449SMel Gorman { 11353e7d3449SMel Gorman int fragindex; 11363e7d3449SMel Gorman unsigned long watermark; 11373e7d3449SMel Gorman 11383e7d3449SMel Gorman /* 11393957c776SMichal Hocko * order == -1 is expected when compacting via 11403957c776SMichal Hocko * /proc/sys/vm/compact_memory 11413957c776SMichal Hocko */ 11423957c776SMichal Hocko if (order == -1) 11433957c776SMichal Hocko return COMPACT_CONTINUE; 11443957c776SMichal Hocko 1145ebff3980SVlastimil Babka watermark = low_wmark_pages(zone); 1146ebff3980SVlastimil Babka /* 1147ebff3980SVlastimil Babka * If watermarks for high-order allocation are already met, there 1148ebff3980SVlastimil Babka * should be no need for compaction at all. 1149ebff3980SVlastimil Babka */ 1150ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, watermark, classzone_idx, 1151ebff3980SVlastimil Babka alloc_flags)) 1152ebff3980SVlastimil Babka return COMPACT_PARTIAL; 1153ebff3980SVlastimil Babka 11543957c776SMichal Hocko /* 11553e7d3449SMel Gorman * Watermarks for order-0 must be met for compaction. Note the 2UL. 11563e7d3449SMel Gorman * This is because during migration, copies of pages need to be 11573e7d3449SMel Gorman * allocated and for a short time, the footprint is higher 11583e7d3449SMel Gorman */ 1159ebff3980SVlastimil Babka watermark += (2UL << order); 1160ebff3980SVlastimil Babka if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags)) 11613e7d3449SMel Gorman return COMPACT_SKIPPED; 11623e7d3449SMel Gorman 11633e7d3449SMel Gorman /* 11643e7d3449SMel Gorman * fragmentation index determines if allocation failures are due to 11653e7d3449SMel Gorman * low memory or external fragmentation 11663e7d3449SMel Gorman * 1167ebff3980SVlastimil Babka * index of -1000 would imply allocations might succeed depending on 1168ebff3980SVlastimil Babka * watermarks, but we already failed the high-order watermark check 11693e7d3449SMel Gorman * index towards 0 implies failure is due to lack of memory 11703e7d3449SMel Gorman * index towards 1000 implies failure is due to fragmentation 11713e7d3449SMel Gorman * 11723e7d3449SMel Gorman * Only compact if a failure would be due to fragmentation. 11733e7d3449SMel Gorman */ 11743e7d3449SMel Gorman fragindex = fragmentation_index(zone, order); 11753e7d3449SMel Gorman if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 1176*837d026dSJoonsoo Kim return COMPACT_NOT_SUITABLE_ZONE; 11773e7d3449SMel Gorman 11783e7d3449SMel Gorman return COMPACT_CONTINUE; 11793e7d3449SMel Gorman } 11803e7d3449SMel Gorman 1181*837d026dSJoonsoo Kim unsigned long compaction_suitable(struct zone *zone, int order, 1182*837d026dSJoonsoo Kim int alloc_flags, int classzone_idx) 1183*837d026dSJoonsoo Kim { 1184*837d026dSJoonsoo Kim unsigned long ret; 1185*837d026dSJoonsoo Kim 1186*837d026dSJoonsoo Kim ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx); 1187*837d026dSJoonsoo Kim trace_mm_compaction_suitable(zone, order, ret); 1188*837d026dSJoonsoo Kim if (ret == COMPACT_NOT_SUITABLE_ZONE) 1189*837d026dSJoonsoo Kim ret = COMPACT_SKIPPED; 1190*837d026dSJoonsoo Kim 1191*837d026dSJoonsoo Kim return ret; 1192*837d026dSJoonsoo Kim } 1193*837d026dSJoonsoo Kim 1194748446bbSMel Gorman static int compact_zone(struct zone *zone, struct compact_control *cc) 1195748446bbSMel Gorman { 1196748446bbSMel Gorman int ret; 1197c89511abSMel Gorman unsigned long start_pfn = zone->zone_start_pfn; 1198108bcc96SCody P Schafer unsigned long end_pfn = zone_end_pfn(zone); 11996d7ce559SDavid Rientjes const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); 1200e0b9daebSDavid Rientjes const bool sync = cc->mode != MIGRATE_ASYNC; 1201fdaf7f5cSVlastimil Babka unsigned long last_migrated_pfn = 0; 1202748446bbSMel Gorman 1203ebff3980SVlastimil Babka ret = compaction_suitable(zone, cc->order, cc->alloc_flags, 1204ebff3980SVlastimil Babka cc->classzone_idx); 12053e7d3449SMel Gorman switch (ret) { 12063e7d3449SMel Gorman case COMPACT_PARTIAL: 12073e7d3449SMel Gorman case COMPACT_SKIPPED: 12083e7d3449SMel Gorman /* Compaction is likely to fail */ 12093e7d3449SMel Gorman return ret; 12103e7d3449SMel Gorman case COMPACT_CONTINUE: 12113e7d3449SMel Gorman /* Fall through to compaction */ 12123e7d3449SMel Gorman ; 12133e7d3449SMel Gorman } 12143e7d3449SMel Gorman 1215c89511abSMel Gorman /* 1216d3132e4bSVlastimil Babka * Clear pageblock skip if there were failures recently and compaction 1217d3132e4bSVlastimil Babka * is about to be retried after being deferred. kswapd does not do 1218d3132e4bSVlastimil Babka * this reset as it'll reset the cached information when going to sleep. 1219d3132e4bSVlastimil Babka */ 1220d3132e4bSVlastimil Babka if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) 1221d3132e4bSVlastimil Babka __reset_isolation_suitable(zone); 1222d3132e4bSVlastimil Babka 1223d3132e4bSVlastimil Babka /* 1224c89511abSMel Gorman * Setup to move all movable pages to the end of the zone. Used cached 1225c89511abSMel Gorman * information on where the scanners should start but check that it 1226c89511abSMel Gorman * is initialised by ensuring the values are within zone boundaries. 1227c89511abSMel Gorman */ 1228e0b9daebSDavid Rientjes cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; 1229c89511abSMel Gorman cc->free_pfn = zone->compact_cached_free_pfn; 1230c89511abSMel Gorman if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { 1231c89511abSMel Gorman cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); 1232c89511abSMel Gorman zone->compact_cached_free_pfn = cc->free_pfn; 1233c89511abSMel Gorman } 1234c89511abSMel Gorman if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { 1235c89511abSMel Gorman cc->migrate_pfn = start_pfn; 123635979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 123735979ef3SDavid Rientjes zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 1238c89511abSMel Gorman } 1239748446bbSMel Gorman 124016c4a097SJoonsoo Kim trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, 124116c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync); 12420eb927c0SMel Gorman 1243748446bbSMel Gorman migrate_prep_local(); 1244748446bbSMel Gorman 12456d7ce559SDavid Rientjes while ((ret = compact_finished(zone, cc, migratetype)) == 12466d7ce559SDavid Rientjes COMPACT_CONTINUE) { 12479d502c1cSMinchan Kim int err; 1248fdaf7f5cSVlastimil Babka unsigned long isolate_start_pfn = cc->migrate_pfn; 1249748446bbSMel Gorman 1250f9e35b3bSMel Gorman switch (isolate_migratepages(zone, cc)) { 1251f9e35b3bSMel Gorman case ISOLATE_ABORT: 1252f9e35b3bSMel Gorman ret = COMPACT_PARTIAL; 12535733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 1254e64c5237SShaohua Li cc->nr_migratepages = 0; 1255f9e35b3bSMel Gorman goto out; 1256f9e35b3bSMel Gorman case ISOLATE_NONE: 1257fdaf7f5cSVlastimil Babka /* 1258fdaf7f5cSVlastimil Babka * We haven't isolated and migrated anything, but 1259fdaf7f5cSVlastimil Babka * there might still be unflushed migrations from 1260fdaf7f5cSVlastimil Babka * previous cc->order aligned block. 1261fdaf7f5cSVlastimil Babka */ 1262fdaf7f5cSVlastimil Babka goto check_drain; 1263f9e35b3bSMel Gorman case ISOLATE_SUCCESS: 1264f9e35b3bSMel Gorman ; 1265f9e35b3bSMel Gorman } 1266748446bbSMel Gorman 1267d53aea3dSDavid Rientjes err = migrate_pages(&cc->migratepages, compaction_alloc, 1268e0b9daebSDavid Rientjes compaction_free, (unsigned long)cc, cc->mode, 12697b2a2d4aSMel Gorman MR_COMPACTION); 1270748446bbSMel Gorman 1271f8c9301fSVlastimil Babka trace_mm_compaction_migratepages(cc->nr_migratepages, err, 1272f8c9301fSVlastimil Babka &cc->migratepages); 1273748446bbSMel Gorman 1274f8c9301fSVlastimil Babka /* All pages were either migrated or will be released */ 1275f8c9301fSVlastimil Babka cc->nr_migratepages = 0; 12769d502c1cSMinchan Kim if (err) { 12775733c7d1SRafael Aquini putback_movable_pages(&cc->migratepages); 12787ed695e0SVlastimil Babka /* 12797ed695e0SVlastimil Babka * migrate_pages() may return -ENOMEM when scanners meet 12807ed695e0SVlastimil Babka * and we want compact_finished() to detect it 12817ed695e0SVlastimil Babka */ 12827ed695e0SVlastimil Babka if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) { 12834bf2bba3SDavid Rientjes ret = COMPACT_PARTIAL; 12844bf2bba3SDavid Rientjes goto out; 1285748446bbSMel Gorman } 12864bf2bba3SDavid Rientjes } 1287fdaf7f5cSVlastimil Babka 1288fdaf7f5cSVlastimil Babka /* 1289fdaf7f5cSVlastimil Babka * Record where we could have freed pages by migration and not 1290fdaf7f5cSVlastimil Babka * yet flushed them to buddy allocator. We use the pfn that 1291fdaf7f5cSVlastimil Babka * isolate_migratepages() started from in this loop iteration 1292fdaf7f5cSVlastimil Babka * - this is the lowest page that could have been isolated and 1293fdaf7f5cSVlastimil Babka * then freed by migration. 1294fdaf7f5cSVlastimil Babka */ 1295fdaf7f5cSVlastimil Babka if (!last_migrated_pfn) 1296fdaf7f5cSVlastimil Babka last_migrated_pfn = isolate_start_pfn; 1297fdaf7f5cSVlastimil Babka 1298fdaf7f5cSVlastimil Babka check_drain: 1299fdaf7f5cSVlastimil Babka /* 1300fdaf7f5cSVlastimil Babka * Has the migration scanner moved away from the previous 1301fdaf7f5cSVlastimil Babka * cc->order aligned block where we migrated from? If yes, 1302fdaf7f5cSVlastimil Babka * flush the pages that were freed, so that they can merge and 1303fdaf7f5cSVlastimil Babka * compact_finished() can detect immediately if allocation 1304fdaf7f5cSVlastimil Babka * would succeed. 1305fdaf7f5cSVlastimil Babka */ 1306fdaf7f5cSVlastimil Babka if (cc->order > 0 && last_migrated_pfn) { 1307fdaf7f5cSVlastimil Babka int cpu; 1308fdaf7f5cSVlastimil Babka unsigned long current_block_start = 1309fdaf7f5cSVlastimil Babka cc->migrate_pfn & ~((1UL << cc->order) - 1); 1310fdaf7f5cSVlastimil Babka 1311fdaf7f5cSVlastimil Babka if (last_migrated_pfn < current_block_start) { 1312fdaf7f5cSVlastimil Babka cpu = get_cpu(); 1313fdaf7f5cSVlastimil Babka lru_add_drain_cpu(cpu); 1314fdaf7f5cSVlastimil Babka drain_local_pages(zone); 1315fdaf7f5cSVlastimil Babka put_cpu(); 1316fdaf7f5cSVlastimil Babka /* No more flushing until we migrate again */ 1317fdaf7f5cSVlastimil Babka last_migrated_pfn = 0; 1318fdaf7f5cSVlastimil Babka } 1319fdaf7f5cSVlastimil Babka } 1320fdaf7f5cSVlastimil Babka 1321748446bbSMel Gorman } 1322748446bbSMel Gorman 1323f9e35b3bSMel Gorman out: 13246bace090SVlastimil Babka /* 13256bace090SVlastimil Babka * Release free pages and update where the free scanner should restart, 13266bace090SVlastimil Babka * so we don't leave any returned pages behind in the next attempt. 13276bace090SVlastimil Babka */ 13286bace090SVlastimil Babka if (cc->nr_freepages > 0) { 13296bace090SVlastimil Babka unsigned long free_pfn = release_freepages(&cc->freepages); 13306bace090SVlastimil Babka 13316bace090SVlastimil Babka cc->nr_freepages = 0; 13326bace090SVlastimil Babka VM_BUG_ON(free_pfn == 0); 13336bace090SVlastimil Babka /* The cached pfn is always the first in a pageblock */ 13346bace090SVlastimil Babka free_pfn &= ~(pageblock_nr_pages-1); 13356bace090SVlastimil Babka /* 13366bace090SVlastimil Babka * Only go back, not forward. The cached pfn might have been 13376bace090SVlastimil Babka * already reset to zone end in compact_finished() 13386bace090SVlastimil Babka */ 13396bace090SVlastimil Babka if (free_pfn > zone->compact_cached_free_pfn) 13406bace090SVlastimil Babka zone->compact_cached_free_pfn = free_pfn; 13416bace090SVlastimil Babka } 1342748446bbSMel Gorman 134316c4a097SJoonsoo Kim trace_mm_compaction_end(start_pfn, cc->migrate_pfn, 134416c4a097SJoonsoo Kim cc->free_pfn, end_pfn, sync, ret); 13450eb927c0SMel Gorman 1346748446bbSMel Gorman return ret; 1347748446bbSMel Gorman } 134876ab0f53SMel Gorman 1349e0b9daebSDavid Rientjes static unsigned long compact_zone_order(struct zone *zone, int order, 1350ebff3980SVlastimil Babka gfp_t gfp_mask, enum migrate_mode mode, int *contended, 1351ebff3980SVlastimil Babka int alloc_flags, int classzone_idx) 135256de7263SMel Gorman { 1353e64c5237SShaohua Li unsigned long ret; 135456de7263SMel Gorman struct compact_control cc = { 135556de7263SMel Gorman .nr_freepages = 0, 135656de7263SMel Gorman .nr_migratepages = 0, 135756de7263SMel Gorman .order = order, 13586d7ce559SDavid Rientjes .gfp_mask = gfp_mask, 135956de7263SMel Gorman .zone = zone, 1360e0b9daebSDavid Rientjes .mode = mode, 1361ebff3980SVlastimil Babka .alloc_flags = alloc_flags, 1362ebff3980SVlastimil Babka .classzone_idx = classzone_idx, 136356de7263SMel Gorman }; 136456de7263SMel Gorman INIT_LIST_HEAD(&cc.freepages); 136556de7263SMel Gorman INIT_LIST_HEAD(&cc.migratepages); 136656de7263SMel Gorman 1367e64c5237SShaohua Li ret = compact_zone(zone, &cc); 1368e64c5237SShaohua Li 1369e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.freepages)); 1370e64c5237SShaohua Li VM_BUG_ON(!list_empty(&cc.migratepages)); 1371e64c5237SShaohua Li 1372e64c5237SShaohua Li *contended = cc.contended; 1373e64c5237SShaohua Li return ret; 137456de7263SMel Gorman } 137556de7263SMel Gorman 13765e771905SMel Gorman int sysctl_extfrag_threshold = 500; 13775e771905SMel Gorman 137856de7263SMel Gorman /** 137956de7263SMel Gorman * try_to_compact_pages - Direct compact to satisfy a high-order allocation 138056de7263SMel Gorman * @gfp_mask: The GFP mask of the current allocation 13811a6d53a1SVlastimil Babka * @order: The order of the current allocation 13821a6d53a1SVlastimil Babka * @alloc_flags: The allocation flags of the current allocation 13831a6d53a1SVlastimil Babka * @ac: The context of current allocation 1384e0b9daebSDavid Rientjes * @mode: The migration mode for async, sync light, or sync migration 13851f9efdefSVlastimil Babka * @contended: Return value that determines if compaction was aborted due to 13861f9efdefSVlastimil Babka * need_resched() or lock contention 138756de7263SMel Gorman * 138856de7263SMel Gorman * This is the main entry point for direct page compaction. 138956de7263SMel Gorman */ 13901a6d53a1SVlastimil Babka unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 13911a6d53a1SVlastimil Babka int alloc_flags, const struct alloc_context *ac, 13921a6d53a1SVlastimil Babka enum migrate_mode mode, int *contended) 139356de7263SMel Gorman { 139456de7263SMel Gorman int may_enter_fs = gfp_mask & __GFP_FS; 139556de7263SMel Gorman int may_perform_io = gfp_mask & __GFP_IO; 139656de7263SMel Gorman struct zoneref *z; 139756de7263SMel Gorman struct zone *zone; 139853853e2dSVlastimil Babka int rc = COMPACT_DEFERRED; 13991f9efdefSVlastimil Babka int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */ 14001f9efdefSVlastimil Babka 14011f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_NONE; 140256de7263SMel Gorman 14034ffb6335SMel Gorman /* Check if the GFP flags allow compaction */ 1404c5a73c3dSAndrea Arcangeli if (!order || !may_enter_fs || !may_perform_io) 140553853e2dSVlastimil Babka return COMPACT_SKIPPED; 140656de7263SMel Gorman 1407*837d026dSJoonsoo Kim trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode); 1408*837d026dSJoonsoo Kim 140956de7263SMel Gorman /* Compact each zone in the list */ 14101a6d53a1SVlastimil Babka for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 14111a6d53a1SVlastimil Babka ac->nodemask) { 141256de7263SMel Gorman int status; 14131f9efdefSVlastimil Babka int zone_contended; 141456de7263SMel Gorman 141553853e2dSVlastimil Babka if (compaction_deferred(zone, order)) 141653853e2dSVlastimil Babka continue; 141753853e2dSVlastimil Babka 1418e0b9daebSDavid Rientjes status = compact_zone_order(zone, order, gfp_mask, mode, 14191a6d53a1SVlastimil Babka &zone_contended, alloc_flags, 14201a6d53a1SVlastimil Babka ac->classzone_idx); 142156de7263SMel Gorman rc = max(status, rc); 14221f9efdefSVlastimil Babka /* 14231f9efdefSVlastimil Babka * It takes at least one zone that wasn't lock contended 14241f9efdefSVlastimil Babka * to clear all_zones_contended. 14251f9efdefSVlastimil Babka */ 14261f9efdefSVlastimil Babka all_zones_contended &= zone_contended; 142756de7263SMel Gorman 14283e7d3449SMel Gorman /* If a normal allocation would succeed, stop compacting */ 1429ebff3980SVlastimil Babka if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 14301a6d53a1SVlastimil Babka ac->classzone_idx, alloc_flags)) { 143153853e2dSVlastimil Babka /* 143253853e2dSVlastimil Babka * We think the allocation will succeed in this zone, 143353853e2dSVlastimil Babka * but it is not certain, hence the false. The caller 143453853e2dSVlastimil Babka * will repeat this with true if allocation indeed 143553853e2dSVlastimil Babka * succeeds in this zone. 143653853e2dSVlastimil Babka */ 143753853e2dSVlastimil Babka compaction_defer_reset(zone, order, false); 14381f9efdefSVlastimil Babka /* 14391f9efdefSVlastimil Babka * It is possible that async compaction aborted due to 14401f9efdefSVlastimil Babka * need_resched() and the watermarks were ok thanks to 14411f9efdefSVlastimil Babka * somebody else freeing memory. The allocation can 14421f9efdefSVlastimil Babka * however still fail so we better signal the 14431f9efdefSVlastimil Babka * need_resched() contention anyway (this will not 14441f9efdefSVlastimil Babka * prevent the allocation attempt). 14451f9efdefSVlastimil Babka */ 14461f9efdefSVlastimil Babka if (zone_contended == COMPACT_CONTENDED_SCHED) 14471f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_SCHED; 14481f9efdefSVlastimil Babka 14491f9efdefSVlastimil Babka goto break_loop; 14501f9efdefSVlastimil Babka } 14511f9efdefSVlastimil Babka 1452f8669795SVlastimil Babka if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) { 145353853e2dSVlastimil Babka /* 145453853e2dSVlastimil Babka * We think that allocation won't succeed in this zone 145553853e2dSVlastimil Babka * so we defer compaction there. If it ends up 145653853e2dSVlastimil Babka * succeeding after all, it will be reset. 145753853e2dSVlastimil Babka */ 145853853e2dSVlastimil Babka defer_compaction(zone, order); 145953853e2dSVlastimil Babka } 14601f9efdefSVlastimil Babka 14611f9efdefSVlastimil Babka /* 14621f9efdefSVlastimil Babka * We might have stopped compacting due to need_resched() in 14631f9efdefSVlastimil Babka * async compaction, or due to a fatal signal detected. In that 14641f9efdefSVlastimil Babka * case do not try further zones and signal need_resched() 14651f9efdefSVlastimil Babka * contention. 14661f9efdefSVlastimil Babka */ 14671f9efdefSVlastimil Babka if ((zone_contended == COMPACT_CONTENDED_SCHED) 14681f9efdefSVlastimil Babka || fatal_signal_pending(current)) { 14691f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_SCHED; 14701f9efdefSVlastimil Babka goto break_loop; 147156de7263SMel Gorman } 147256de7263SMel Gorman 14731f9efdefSVlastimil Babka continue; 14741f9efdefSVlastimil Babka break_loop: 14751f9efdefSVlastimil Babka /* 14761f9efdefSVlastimil Babka * We might not have tried all the zones, so be conservative 14771f9efdefSVlastimil Babka * and assume they are not all lock contended. 14781f9efdefSVlastimil Babka */ 14791f9efdefSVlastimil Babka all_zones_contended = 0; 14801f9efdefSVlastimil Babka break; 14811f9efdefSVlastimil Babka } 14821f9efdefSVlastimil Babka 14831f9efdefSVlastimil Babka /* 14841f9efdefSVlastimil Babka * If at least one zone wasn't deferred or skipped, we report if all 14851f9efdefSVlastimil Babka * zones that were tried were lock contended. 14861f9efdefSVlastimil Babka */ 14871f9efdefSVlastimil Babka if (rc > COMPACT_SKIPPED && all_zones_contended) 14881f9efdefSVlastimil Babka *contended = COMPACT_CONTENDED_LOCK; 14891f9efdefSVlastimil Babka 149056de7263SMel Gorman return rc; 149156de7263SMel Gorman } 149256de7263SMel Gorman 149356de7263SMel Gorman 149476ab0f53SMel Gorman /* Compact all zones within a node */ 14957103f16dSAndrew Morton static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 149676ab0f53SMel Gorman { 149776ab0f53SMel Gorman int zoneid; 149876ab0f53SMel Gorman struct zone *zone; 149976ab0f53SMel Gorman 150076ab0f53SMel Gorman for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 150176ab0f53SMel Gorman 150276ab0f53SMel Gorman zone = &pgdat->node_zones[zoneid]; 150376ab0f53SMel Gorman if (!populated_zone(zone)) 150476ab0f53SMel Gorman continue; 150576ab0f53SMel Gorman 15067be62de9SRik van Riel cc->nr_freepages = 0; 15077be62de9SRik van Riel cc->nr_migratepages = 0; 15087be62de9SRik van Riel cc->zone = zone; 15097be62de9SRik van Riel INIT_LIST_HEAD(&cc->freepages); 15107be62de9SRik van Riel INIT_LIST_HEAD(&cc->migratepages); 151176ab0f53SMel Gorman 1512aad6ec37SDan Carpenter if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 15137be62de9SRik van Riel compact_zone(zone, cc); 151476ab0f53SMel Gorman 1515aff62249SRik van Riel if (cc->order > 0) { 1516de6c60a6SVlastimil Babka if (zone_watermark_ok(zone, cc->order, 1517de6c60a6SVlastimil Babka low_wmark_pages(zone), 0, 0)) 1518de6c60a6SVlastimil Babka compaction_defer_reset(zone, cc->order, false); 1519aff62249SRik van Riel } 1520aff62249SRik van Riel 15217be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->freepages)); 15227be62de9SRik van Riel VM_BUG_ON(!list_empty(&cc->migratepages)); 152376ab0f53SMel Gorman } 152476ab0f53SMel Gorman } 152576ab0f53SMel Gorman 15267103f16dSAndrew Morton void compact_pgdat(pg_data_t *pgdat, int order) 15277be62de9SRik van Riel { 15287be62de9SRik van Riel struct compact_control cc = { 15297be62de9SRik van Riel .order = order, 1530e0b9daebSDavid Rientjes .mode = MIGRATE_ASYNC, 15317be62de9SRik van Riel }; 15327be62de9SRik van Riel 15333a7200afSMel Gorman if (!order) 15343a7200afSMel Gorman return; 15353a7200afSMel Gorman 15367103f16dSAndrew Morton __compact_pgdat(pgdat, &cc); 15377be62de9SRik van Riel } 15387be62de9SRik van Riel 15397103f16dSAndrew Morton static void compact_node(int nid) 15407be62de9SRik van Riel { 15417be62de9SRik van Riel struct compact_control cc = { 15427be62de9SRik van Riel .order = -1, 1543e0b9daebSDavid Rientjes .mode = MIGRATE_SYNC, 154491ca9186SDavid Rientjes .ignore_skip_hint = true, 15457be62de9SRik van Riel }; 15467be62de9SRik van Riel 15477103f16dSAndrew Morton __compact_pgdat(NODE_DATA(nid), &cc); 15487be62de9SRik van Riel } 15497be62de9SRik van Riel 155076ab0f53SMel Gorman /* Compact all nodes in the system */ 15517964c06dSJason Liu static void compact_nodes(void) 155276ab0f53SMel Gorman { 155376ab0f53SMel Gorman int nid; 155476ab0f53SMel Gorman 15558575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 15568575ec29SHugh Dickins lru_add_drain_all(); 15578575ec29SHugh Dickins 155876ab0f53SMel Gorman for_each_online_node(nid) 155976ab0f53SMel Gorman compact_node(nid); 156076ab0f53SMel Gorman } 156176ab0f53SMel Gorman 156276ab0f53SMel Gorman /* The written value is actually unused, all memory is compacted */ 156376ab0f53SMel Gorman int sysctl_compact_memory; 156476ab0f53SMel Gorman 156576ab0f53SMel Gorman /* This is the entry point for compacting all nodes via /proc/sys/vm */ 156676ab0f53SMel Gorman int sysctl_compaction_handler(struct ctl_table *table, int write, 156776ab0f53SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 156876ab0f53SMel Gorman { 156976ab0f53SMel Gorman if (write) 15707964c06dSJason Liu compact_nodes(); 157176ab0f53SMel Gorman 157276ab0f53SMel Gorman return 0; 157376ab0f53SMel Gorman } 1574ed4a6d7fSMel Gorman 15755e771905SMel Gorman int sysctl_extfrag_handler(struct ctl_table *table, int write, 15765e771905SMel Gorman void __user *buffer, size_t *length, loff_t *ppos) 15775e771905SMel Gorman { 15785e771905SMel Gorman proc_dointvec_minmax(table, write, buffer, length, ppos); 15795e771905SMel Gorman 15805e771905SMel Gorman return 0; 15815e771905SMel Gorman } 15825e771905SMel Gorman 1583ed4a6d7fSMel Gorman #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 158474e77fb9SRashika Kheria static ssize_t sysfs_compact_node(struct device *dev, 158510fbcf4cSKay Sievers struct device_attribute *attr, 1586ed4a6d7fSMel Gorman const char *buf, size_t count) 1587ed4a6d7fSMel Gorman { 15888575ec29SHugh Dickins int nid = dev->id; 15898575ec29SHugh Dickins 15908575ec29SHugh Dickins if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 15918575ec29SHugh Dickins /* Flush pending updates to the LRU lists */ 15928575ec29SHugh Dickins lru_add_drain_all(); 15938575ec29SHugh Dickins 15948575ec29SHugh Dickins compact_node(nid); 15958575ec29SHugh Dickins } 1596ed4a6d7fSMel Gorman 1597ed4a6d7fSMel Gorman return count; 1598ed4a6d7fSMel Gorman } 159910fbcf4cSKay Sievers static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1600ed4a6d7fSMel Gorman 1601ed4a6d7fSMel Gorman int compaction_register_node(struct node *node) 1602ed4a6d7fSMel Gorman { 160310fbcf4cSKay Sievers return device_create_file(&node->dev, &dev_attr_compact); 1604ed4a6d7fSMel Gorman } 1605ed4a6d7fSMel Gorman 1606ed4a6d7fSMel Gorman void compaction_unregister_node(struct node *node) 1607ed4a6d7fSMel Gorman { 160810fbcf4cSKay Sievers return device_remove_file(&node->dev, &dev_attr_compact); 1609ed4a6d7fSMel Gorman } 1610ed4a6d7fSMel Gorman #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1611ff9543fdSMichal Nazarewicz 1612ff9543fdSMichal Nazarewicz #endif /* CONFIG_COMPACTION */ 1613