1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/compaction.c 4 * 5 * Memory compaction for the reduction of external fragmentation. Note that 6 * this heavily depends upon page migration to do all the real heavy 7 * lifting 8 * 9 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 10 */ 11 #include <linux/cpu.h> 12 #include <linux/swap.h> 13 #include <linux/migrate.h> 14 #include <linux/compaction.h> 15 #include <linux/mm_inline.h> 16 #include <linux/sched/signal.h> 17 #include <linux/backing-dev.h> 18 #include <linux/sysctl.h> 19 #include <linux/sysfs.h> 20 #include <linux/page-isolation.h> 21 #include <linux/kasan.h> 22 #include <linux/kthread.h> 23 #include <linux/freezer.h> 24 #include <linux/page_owner.h> 25 #include <linux/psi.h> 26 #include "internal.h" 27 28 #ifdef CONFIG_COMPACTION 29 /* 30 * Fragmentation score check interval for proactive compaction purposes. 31 */ 32 #define HPAGE_FRAG_CHECK_INTERVAL_MSEC (500) 33 34 static inline void count_compact_event(enum vm_event_item item) 35 { 36 count_vm_event(item); 37 } 38 39 static inline void count_compact_events(enum vm_event_item item, long delta) 40 { 41 count_vm_events(item, delta); 42 } 43 #else 44 #define count_compact_event(item) do { } while (0) 45 #define count_compact_events(item, delta) do { } while (0) 46 #endif 47 48 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 49 50 #define CREATE_TRACE_POINTS 51 #include <trace/events/compaction.h> 52 53 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) 54 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) 55 #define pageblock_start_pfn(pfn) block_start_pfn(pfn, pageblock_order) 56 #define pageblock_end_pfn(pfn) block_end_pfn(pfn, pageblock_order) 57 58 /* 59 * Page order with-respect-to which proactive compaction 60 * calculates external fragmentation, which is used as 61 * the "fragmentation score" of a node/zone. 62 */ 63 #if defined CONFIG_TRANSPARENT_HUGEPAGE 64 #define COMPACTION_HPAGE_ORDER HPAGE_PMD_ORDER 65 #elif defined CONFIG_HUGETLBFS 66 #define COMPACTION_HPAGE_ORDER HUGETLB_PAGE_ORDER 67 #else 68 #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT) 69 #endif 70 71 static unsigned long release_freepages(struct list_head *freelist) 72 { 73 struct page *page, *next; 74 unsigned long high_pfn = 0; 75 76 list_for_each_entry_safe(page, next, freelist, lru) { 77 unsigned long pfn = page_to_pfn(page); 78 list_del(&page->lru); 79 __free_page(page); 80 if (pfn > high_pfn) 81 high_pfn = pfn; 82 } 83 84 return high_pfn; 85 } 86 87 static void split_map_pages(struct list_head *list) 88 { 89 unsigned int i, order, nr_pages; 90 struct page *page, *next; 91 LIST_HEAD(tmp_list); 92 93 list_for_each_entry_safe(page, next, list, lru) { 94 list_del(&page->lru); 95 96 order = page_private(page); 97 nr_pages = 1 << order; 98 99 post_alloc_hook(page, order, __GFP_MOVABLE); 100 if (order) 101 split_page(page, order); 102 103 for (i = 0; i < nr_pages; i++) { 104 list_add(&page->lru, &tmp_list); 105 page++; 106 } 107 } 108 109 list_splice(&tmp_list, list); 110 } 111 112 #ifdef CONFIG_COMPACTION 113 bool PageMovable(struct page *page) 114 { 115 const struct movable_operations *mops; 116 117 VM_BUG_ON_PAGE(!PageLocked(page), page); 118 if (!__PageMovable(page)) 119 return false; 120 121 mops = page_movable_ops(page); 122 if (mops) 123 return true; 124 125 return false; 126 } 127 EXPORT_SYMBOL(PageMovable); 128 129 void __SetPageMovable(struct page *page, const struct movable_operations *mops) 130 { 131 VM_BUG_ON_PAGE(!PageLocked(page), page); 132 VM_BUG_ON_PAGE((unsigned long)mops & PAGE_MAPPING_MOVABLE, page); 133 page->mapping = (void *)((unsigned long)mops | PAGE_MAPPING_MOVABLE); 134 } 135 EXPORT_SYMBOL(__SetPageMovable); 136 137 void __ClearPageMovable(struct page *page) 138 { 139 VM_BUG_ON_PAGE(!PageMovable(page), page); 140 /* 141 * This page still has the type of a movable page, but it's 142 * actually not movable any more. 143 */ 144 page->mapping = (void *)PAGE_MAPPING_MOVABLE; 145 } 146 EXPORT_SYMBOL(__ClearPageMovable); 147 148 /* Do not skip compaction more than 64 times */ 149 #define COMPACT_MAX_DEFER_SHIFT 6 150 151 /* 152 * Compaction is deferred when compaction fails to result in a page 153 * allocation success. 1 << compact_defer_shift, compactions are skipped up 154 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT 155 */ 156 static void defer_compaction(struct zone *zone, int order) 157 { 158 zone->compact_considered = 0; 159 zone->compact_defer_shift++; 160 161 if (order < zone->compact_order_failed) 162 zone->compact_order_failed = order; 163 164 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) 165 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; 166 167 trace_mm_compaction_defer_compaction(zone, order); 168 } 169 170 /* Returns true if compaction should be skipped this time */ 171 static bool compaction_deferred(struct zone *zone, int order) 172 { 173 unsigned long defer_limit = 1UL << zone->compact_defer_shift; 174 175 if (order < zone->compact_order_failed) 176 return false; 177 178 /* Avoid possible overflow */ 179 if (++zone->compact_considered >= defer_limit) { 180 zone->compact_considered = defer_limit; 181 return false; 182 } 183 184 trace_mm_compaction_deferred(zone, order); 185 186 return true; 187 } 188 189 /* 190 * Update defer tracking counters after successful compaction of given order, 191 * which means an allocation either succeeded (alloc_success == true) or is 192 * expected to succeed. 193 */ 194 void compaction_defer_reset(struct zone *zone, int order, 195 bool alloc_success) 196 { 197 if (alloc_success) { 198 zone->compact_considered = 0; 199 zone->compact_defer_shift = 0; 200 } 201 if (order >= zone->compact_order_failed) 202 zone->compact_order_failed = order + 1; 203 204 trace_mm_compaction_defer_reset(zone, order); 205 } 206 207 /* Returns true if restarting compaction after many failures */ 208 static bool compaction_restarting(struct zone *zone, int order) 209 { 210 if (order < zone->compact_order_failed) 211 return false; 212 213 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && 214 zone->compact_considered >= 1UL << zone->compact_defer_shift; 215 } 216 217 /* Returns true if the pageblock should be scanned for pages to isolate. */ 218 static inline bool isolation_suitable(struct compact_control *cc, 219 struct page *page) 220 { 221 if (cc->ignore_skip_hint) 222 return true; 223 224 return !get_pageblock_skip(page); 225 } 226 227 static void reset_cached_positions(struct zone *zone) 228 { 229 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 230 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 231 zone->compact_cached_free_pfn = 232 pageblock_start_pfn(zone_end_pfn(zone) - 1); 233 } 234 235 /* 236 * Compound pages of >= pageblock_order should consistently be skipped until 237 * released. It is always pointless to compact pages of such order (if they are 238 * migratable), and the pageblocks they occupy cannot contain any free pages. 239 */ 240 static bool pageblock_skip_persistent(struct page *page) 241 { 242 if (!PageCompound(page)) 243 return false; 244 245 page = compound_head(page); 246 247 if (compound_order(page) >= pageblock_order) 248 return true; 249 250 return false; 251 } 252 253 static bool 254 __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, 255 bool check_target) 256 { 257 struct page *page = pfn_to_online_page(pfn); 258 struct page *block_page; 259 struct page *end_page; 260 unsigned long block_pfn; 261 262 if (!page) 263 return false; 264 if (zone != page_zone(page)) 265 return false; 266 if (pageblock_skip_persistent(page)) 267 return false; 268 269 /* 270 * If skip is already cleared do no further checking once the 271 * restart points have been set. 272 */ 273 if (check_source && check_target && !get_pageblock_skip(page)) 274 return true; 275 276 /* 277 * If clearing skip for the target scanner, do not select a 278 * non-movable pageblock as the starting point. 279 */ 280 if (!check_source && check_target && 281 get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 282 return false; 283 284 /* Ensure the start of the pageblock or zone is online and valid */ 285 block_pfn = pageblock_start_pfn(pfn); 286 block_pfn = max(block_pfn, zone->zone_start_pfn); 287 block_page = pfn_to_online_page(block_pfn); 288 if (block_page) { 289 page = block_page; 290 pfn = block_pfn; 291 } 292 293 /* Ensure the end of the pageblock or zone is online and valid */ 294 block_pfn = pageblock_end_pfn(pfn) - 1; 295 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); 296 end_page = pfn_to_online_page(block_pfn); 297 if (!end_page) 298 return false; 299 300 /* 301 * Only clear the hint if a sample indicates there is either a 302 * free page or an LRU page in the block. One or other condition 303 * is necessary for the block to be a migration source/target. 304 */ 305 do { 306 if (check_source && PageLRU(page)) { 307 clear_pageblock_skip(page); 308 return true; 309 } 310 311 if (check_target && PageBuddy(page)) { 312 clear_pageblock_skip(page); 313 return true; 314 } 315 316 page += (1 << PAGE_ALLOC_COSTLY_ORDER); 317 } while (page <= end_page); 318 319 return false; 320 } 321 322 /* 323 * This function is called to clear all cached information on pageblocks that 324 * should be skipped for page isolation when the migrate and free page scanner 325 * meet. 326 */ 327 static void __reset_isolation_suitable(struct zone *zone) 328 { 329 unsigned long migrate_pfn = zone->zone_start_pfn; 330 unsigned long free_pfn = zone_end_pfn(zone) - 1; 331 unsigned long reset_migrate = free_pfn; 332 unsigned long reset_free = migrate_pfn; 333 bool source_set = false; 334 bool free_set = false; 335 336 if (!zone->compact_blockskip_flush) 337 return; 338 339 zone->compact_blockskip_flush = false; 340 341 /* 342 * Walk the zone and update pageblock skip information. Source looks 343 * for PageLRU while target looks for PageBuddy. When the scanner 344 * is found, both PageBuddy and PageLRU are checked as the pageblock 345 * is suitable as both source and target. 346 */ 347 for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages, 348 free_pfn -= pageblock_nr_pages) { 349 cond_resched(); 350 351 /* Update the migrate PFN */ 352 if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && 353 migrate_pfn < reset_migrate) { 354 source_set = true; 355 reset_migrate = migrate_pfn; 356 zone->compact_init_migrate_pfn = reset_migrate; 357 zone->compact_cached_migrate_pfn[0] = reset_migrate; 358 zone->compact_cached_migrate_pfn[1] = reset_migrate; 359 } 360 361 /* Update the free PFN */ 362 if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && 363 free_pfn > reset_free) { 364 free_set = true; 365 reset_free = free_pfn; 366 zone->compact_init_free_pfn = reset_free; 367 zone->compact_cached_free_pfn = reset_free; 368 } 369 } 370 371 /* Leave no distance if no suitable block was reset */ 372 if (reset_migrate >= reset_free) { 373 zone->compact_cached_migrate_pfn[0] = migrate_pfn; 374 zone->compact_cached_migrate_pfn[1] = migrate_pfn; 375 zone->compact_cached_free_pfn = free_pfn; 376 } 377 } 378 379 void reset_isolation_suitable(pg_data_t *pgdat) 380 { 381 int zoneid; 382 383 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 384 struct zone *zone = &pgdat->node_zones[zoneid]; 385 if (!populated_zone(zone)) 386 continue; 387 388 /* Only flush if a full compaction finished recently */ 389 if (zone->compact_blockskip_flush) 390 __reset_isolation_suitable(zone); 391 } 392 } 393 394 /* 395 * Sets the pageblock skip bit if it was clear. Note that this is a hint as 396 * locks are not required for read/writers. Returns true if it was already set. 397 */ 398 static bool test_and_set_skip(struct compact_control *cc, struct page *page, 399 unsigned long pfn) 400 { 401 bool skip; 402 403 /* Do no update if skip hint is being ignored */ 404 if (cc->ignore_skip_hint) 405 return false; 406 407 if (!IS_ALIGNED(pfn, pageblock_nr_pages)) 408 return false; 409 410 skip = get_pageblock_skip(page); 411 if (!skip && !cc->no_set_skip_hint) 412 set_pageblock_skip(page); 413 414 return skip; 415 } 416 417 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 418 { 419 struct zone *zone = cc->zone; 420 421 pfn = pageblock_end_pfn(pfn); 422 423 /* Set for isolation rather than compaction */ 424 if (cc->no_set_skip_hint) 425 return; 426 427 if (pfn > zone->compact_cached_migrate_pfn[0]) 428 zone->compact_cached_migrate_pfn[0] = pfn; 429 if (cc->mode != MIGRATE_ASYNC && 430 pfn > zone->compact_cached_migrate_pfn[1]) 431 zone->compact_cached_migrate_pfn[1] = pfn; 432 } 433 434 /* 435 * If no pages were isolated then mark this pageblock to be skipped in the 436 * future. The information is later cleared by __reset_isolation_suitable(). 437 */ 438 static void update_pageblock_skip(struct compact_control *cc, 439 struct page *page, unsigned long pfn) 440 { 441 struct zone *zone = cc->zone; 442 443 if (cc->no_set_skip_hint) 444 return; 445 446 if (!page) 447 return; 448 449 set_pageblock_skip(page); 450 451 /* Update where async and sync compaction should restart */ 452 if (pfn < zone->compact_cached_free_pfn) 453 zone->compact_cached_free_pfn = pfn; 454 } 455 #else 456 static inline bool isolation_suitable(struct compact_control *cc, 457 struct page *page) 458 { 459 return true; 460 } 461 462 static inline bool pageblock_skip_persistent(struct page *page) 463 { 464 return false; 465 } 466 467 static inline void update_pageblock_skip(struct compact_control *cc, 468 struct page *page, unsigned long pfn) 469 { 470 } 471 472 static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) 473 { 474 } 475 476 static bool test_and_set_skip(struct compact_control *cc, struct page *page, 477 unsigned long pfn) 478 { 479 return false; 480 } 481 #endif /* CONFIG_COMPACTION */ 482 483 /* 484 * Compaction requires the taking of some coarse locks that are potentially 485 * very heavily contended. For async compaction, trylock and record if the 486 * lock is contended. The lock will still be acquired but compaction will 487 * abort when the current block is finished regardless of success rate. 488 * Sync compaction acquires the lock. 489 * 490 * Always returns true which makes it easier to track lock state in callers. 491 */ 492 static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, 493 struct compact_control *cc) 494 __acquires(lock) 495 { 496 /* Track if the lock is contended in async mode */ 497 if (cc->mode == MIGRATE_ASYNC && !cc->contended) { 498 if (spin_trylock_irqsave(lock, *flags)) 499 return true; 500 501 cc->contended = true; 502 } 503 504 spin_lock_irqsave(lock, *flags); 505 return true; 506 } 507 508 /* 509 * Compaction requires the taking of some coarse locks that are potentially 510 * very heavily contended. The lock should be periodically unlocked to avoid 511 * having disabled IRQs for a long time, even when there is nobody waiting on 512 * the lock. It might also be that allowing the IRQs will result in 513 * need_resched() becoming true. If scheduling is needed, compaction schedules. 514 * Either compaction type will also abort if a fatal signal is pending. 515 * In either case if the lock was locked, it is dropped and not regained. 516 * 517 * Returns true if compaction should abort due to fatal signal pending. 518 * Returns false when compaction can continue. 519 */ 520 static bool compact_unlock_should_abort(spinlock_t *lock, 521 unsigned long flags, bool *locked, struct compact_control *cc) 522 { 523 if (*locked) { 524 spin_unlock_irqrestore(lock, flags); 525 *locked = false; 526 } 527 528 if (fatal_signal_pending(current)) { 529 cc->contended = true; 530 return true; 531 } 532 533 cond_resched(); 534 535 return false; 536 } 537 538 /* 539 * Isolate free pages onto a private freelist. If @strict is true, will abort 540 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 541 * (even though it may still end up isolating some pages). 542 */ 543 static unsigned long isolate_freepages_block(struct compact_control *cc, 544 unsigned long *start_pfn, 545 unsigned long end_pfn, 546 struct list_head *freelist, 547 unsigned int stride, 548 bool strict) 549 { 550 int nr_scanned = 0, total_isolated = 0; 551 struct page *cursor; 552 unsigned long flags = 0; 553 bool locked = false; 554 unsigned long blockpfn = *start_pfn; 555 unsigned int order; 556 557 /* Strict mode is for isolation, speed is secondary */ 558 if (strict) 559 stride = 1; 560 561 cursor = pfn_to_page(blockpfn); 562 563 /* Isolate free pages. */ 564 for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) { 565 int isolated; 566 struct page *page = cursor; 567 568 /* 569 * Periodically drop the lock (if held) regardless of its 570 * contention, to give chance to IRQs. Abort if fatal signal 571 * pending. 572 */ 573 if (!(blockpfn % COMPACT_CLUSTER_MAX) 574 && compact_unlock_should_abort(&cc->zone->lock, flags, 575 &locked, cc)) 576 break; 577 578 nr_scanned++; 579 580 /* 581 * For compound pages such as THP and hugetlbfs, we can save 582 * potentially a lot of iterations if we skip them at once. 583 * The check is racy, but we can consider only valid values 584 * and the only danger is skipping too much. 585 */ 586 if (PageCompound(page)) { 587 const unsigned int order = compound_order(page); 588 589 if (likely(order < MAX_ORDER)) { 590 blockpfn += (1UL << order) - 1; 591 cursor += (1UL << order) - 1; 592 } 593 goto isolate_fail; 594 } 595 596 if (!PageBuddy(page)) 597 goto isolate_fail; 598 599 /* If we already hold the lock, we can skip some rechecking. */ 600 if (!locked) { 601 locked = compact_lock_irqsave(&cc->zone->lock, 602 &flags, cc); 603 604 /* Recheck this is a buddy page under lock */ 605 if (!PageBuddy(page)) 606 goto isolate_fail; 607 } 608 609 /* Found a free page, will break it into order-0 pages */ 610 order = buddy_order(page); 611 isolated = __isolate_free_page(page, order); 612 if (!isolated) 613 break; 614 set_page_private(page, order); 615 616 nr_scanned += isolated - 1; 617 total_isolated += isolated; 618 cc->nr_freepages += isolated; 619 list_add_tail(&page->lru, freelist); 620 621 if (!strict && cc->nr_migratepages <= cc->nr_freepages) { 622 blockpfn += isolated; 623 break; 624 } 625 /* Advance to the end of split page */ 626 blockpfn += isolated - 1; 627 cursor += isolated - 1; 628 continue; 629 630 isolate_fail: 631 if (strict) 632 break; 633 else 634 continue; 635 636 } 637 638 if (locked) 639 spin_unlock_irqrestore(&cc->zone->lock, flags); 640 641 /* 642 * There is a tiny chance that we have read bogus compound_order(), 643 * so be careful to not go outside of the pageblock. 644 */ 645 if (unlikely(blockpfn > end_pfn)) 646 blockpfn = end_pfn; 647 648 trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn, 649 nr_scanned, total_isolated); 650 651 /* Record how far we have got within the block */ 652 *start_pfn = blockpfn; 653 654 /* 655 * If strict isolation is requested by CMA then check that all the 656 * pages requested were isolated. If there were any failures, 0 is 657 * returned and CMA will fail. 658 */ 659 if (strict && blockpfn < end_pfn) 660 total_isolated = 0; 661 662 cc->total_free_scanned += nr_scanned; 663 if (total_isolated) 664 count_compact_events(COMPACTISOLATED, total_isolated); 665 return total_isolated; 666 } 667 668 /** 669 * isolate_freepages_range() - isolate free pages. 670 * @cc: Compaction control structure. 671 * @start_pfn: The first PFN to start isolating. 672 * @end_pfn: The one-past-last PFN. 673 * 674 * Non-free pages, invalid PFNs, or zone boundaries within the 675 * [start_pfn, end_pfn) range are considered errors, cause function to 676 * undo its actions and return zero. 677 * 678 * Otherwise, function returns one-past-the-last PFN of isolated page 679 * (which may be greater then end_pfn if end fell in a middle of 680 * a free page). 681 */ 682 unsigned long 683 isolate_freepages_range(struct compact_control *cc, 684 unsigned long start_pfn, unsigned long end_pfn) 685 { 686 unsigned long isolated, pfn, block_start_pfn, block_end_pfn; 687 LIST_HEAD(freelist); 688 689 pfn = start_pfn; 690 block_start_pfn = pageblock_start_pfn(pfn); 691 if (block_start_pfn < cc->zone->zone_start_pfn) 692 block_start_pfn = cc->zone->zone_start_pfn; 693 block_end_pfn = pageblock_end_pfn(pfn); 694 695 for (; pfn < end_pfn; pfn += isolated, 696 block_start_pfn = block_end_pfn, 697 block_end_pfn += pageblock_nr_pages) { 698 /* Protect pfn from changing by isolate_freepages_block */ 699 unsigned long isolate_start_pfn = pfn; 700 701 block_end_pfn = min(block_end_pfn, end_pfn); 702 703 /* 704 * pfn could pass the block_end_pfn if isolated freepage 705 * is more than pageblock order. In this case, we adjust 706 * scanning range to right one. 707 */ 708 if (pfn >= block_end_pfn) { 709 block_start_pfn = pageblock_start_pfn(pfn); 710 block_end_pfn = pageblock_end_pfn(pfn); 711 block_end_pfn = min(block_end_pfn, end_pfn); 712 } 713 714 if (!pageblock_pfn_to_page(block_start_pfn, 715 block_end_pfn, cc->zone)) 716 break; 717 718 isolated = isolate_freepages_block(cc, &isolate_start_pfn, 719 block_end_pfn, &freelist, 0, true); 720 721 /* 722 * In strict mode, isolate_freepages_block() returns 0 if 723 * there are any holes in the block (ie. invalid PFNs or 724 * non-free pages). 725 */ 726 if (!isolated) 727 break; 728 729 /* 730 * If we managed to isolate pages, it is always (1 << n) * 731 * pageblock_nr_pages for some non-negative n. (Max order 732 * page may span two pageblocks). 733 */ 734 } 735 736 /* __isolate_free_page() does not map the pages */ 737 split_map_pages(&freelist); 738 739 if (pfn < end_pfn) { 740 /* Loop terminated early, cleanup. */ 741 release_freepages(&freelist); 742 return 0; 743 } 744 745 /* We don't use freelists for anything. */ 746 return pfn; 747 } 748 749 /* Similar to reclaim, but different enough that they don't share logic */ 750 static bool too_many_isolated(pg_data_t *pgdat) 751 { 752 bool too_many; 753 754 unsigned long active, inactive, isolated; 755 756 inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + 757 node_page_state(pgdat, NR_INACTIVE_ANON); 758 active = node_page_state(pgdat, NR_ACTIVE_FILE) + 759 node_page_state(pgdat, NR_ACTIVE_ANON); 760 isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + 761 node_page_state(pgdat, NR_ISOLATED_ANON); 762 763 too_many = isolated > (inactive + active) / 2; 764 if (!too_many) 765 wake_throttle_isolated(pgdat); 766 767 return too_many; 768 } 769 770 /** 771 * isolate_migratepages_block() - isolate all migrate-able pages within 772 * a single pageblock 773 * @cc: Compaction control structure. 774 * @low_pfn: The first PFN to isolate 775 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 776 * @mode: Isolation mode to be used. 777 * 778 * Isolate all pages that can be migrated from the range specified by 779 * [low_pfn, end_pfn). The range is expected to be within same pageblock. 780 * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion, 781 * -ENOMEM in case we could not allocate a page, or 0. 782 * cc->migrate_pfn will contain the next pfn to scan. 783 * 784 * The pages are isolated on cc->migratepages list (not required to be empty), 785 * and cc->nr_migratepages is updated accordingly. 786 */ 787 static int 788 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 789 unsigned long end_pfn, isolate_mode_t mode) 790 { 791 pg_data_t *pgdat = cc->zone->zone_pgdat; 792 unsigned long nr_scanned = 0, nr_isolated = 0; 793 struct lruvec *lruvec; 794 unsigned long flags = 0; 795 struct lruvec *locked = NULL; 796 struct page *page = NULL, *valid_page = NULL; 797 struct address_space *mapping; 798 unsigned long start_pfn = low_pfn; 799 bool skip_on_failure = false; 800 unsigned long next_skip_pfn = 0; 801 bool skip_updated = false; 802 int ret = 0; 803 804 cc->migrate_pfn = low_pfn; 805 806 /* 807 * Ensure that there are not too many pages isolated from the LRU 808 * list by either parallel reclaimers or compaction. If there are, 809 * delay for some time until fewer pages are isolated 810 */ 811 while (unlikely(too_many_isolated(pgdat))) { 812 /* stop isolation if there are still pages not migrated */ 813 if (cc->nr_migratepages) 814 return -EAGAIN; 815 816 /* async migration should just abort */ 817 if (cc->mode == MIGRATE_ASYNC) 818 return -EAGAIN; 819 820 reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED); 821 822 if (fatal_signal_pending(current)) 823 return -EINTR; 824 } 825 826 cond_resched(); 827 828 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { 829 skip_on_failure = true; 830 next_skip_pfn = block_end_pfn(low_pfn, cc->order); 831 } 832 833 /* Time to isolate some pages for migration */ 834 for (; low_pfn < end_pfn; low_pfn++) { 835 836 if (skip_on_failure && low_pfn >= next_skip_pfn) { 837 /* 838 * We have isolated all migration candidates in the 839 * previous order-aligned block, and did not skip it due 840 * to failure. We should migrate the pages now and 841 * hopefully succeed compaction. 842 */ 843 if (nr_isolated) 844 break; 845 846 /* 847 * We failed to isolate in the previous order-aligned 848 * block. Set the new boundary to the end of the 849 * current block. Note we can't simply increase 850 * next_skip_pfn by 1 << order, as low_pfn might have 851 * been incremented by a higher number due to skipping 852 * a compound or a high-order buddy page in the 853 * previous loop iteration. 854 */ 855 next_skip_pfn = block_end_pfn(low_pfn, cc->order); 856 } 857 858 /* 859 * Periodically drop the lock (if held) regardless of its 860 * contention, to give chance to IRQs. Abort completely if 861 * a fatal signal is pending. 862 */ 863 if (!(low_pfn % COMPACT_CLUSTER_MAX)) { 864 if (locked) { 865 unlock_page_lruvec_irqrestore(locked, flags); 866 locked = NULL; 867 } 868 869 if (fatal_signal_pending(current)) { 870 cc->contended = true; 871 ret = -EINTR; 872 873 goto fatal_pending; 874 } 875 876 cond_resched(); 877 } 878 879 nr_scanned++; 880 881 page = pfn_to_page(low_pfn); 882 883 /* 884 * Check if the pageblock has already been marked skipped. 885 * Only the aligned PFN is checked as the caller isolates 886 * COMPACT_CLUSTER_MAX at a time so the second call must 887 * not falsely conclude that the block should be skipped. 888 */ 889 if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) { 890 if (!isolation_suitable(cc, page)) { 891 low_pfn = end_pfn; 892 page = NULL; 893 goto isolate_abort; 894 } 895 valid_page = page; 896 } 897 898 if (PageHuge(page) && cc->alloc_contig) { 899 ret = isolate_or_dissolve_huge_page(page, &cc->migratepages); 900 901 /* 902 * Fail isolation in case isolate_or_dissolve_huge_page() 903 * reports an error. In case of -ENOMEM, abort right away. 904 */ 905 if (ret < 0) { 906 /* Do not report -EBUSY down the chain */ 907 if (ret == -EBUSY) 908 ret = 0; 909 low_pfn += compound_nr(page) - 1; 910 goto isolate_fail; 911 } 912 913 if (PageHuge(page)) { 914 /* 915 * Hugepage was successfully isolated and placed 916 * on the cc->migratepages list. 917 */ 918 low_pfn += compound_nr(page) - 1; 919 goto isolate_success_no_list; 920 } 921 922 /* 923 * Ok, the hugepage was dissolved. Now these pages are 924 * Buddy and cannot be re-allocated because they are 925 * isolated. Fall-through as the check below handles 926 * Buddy pages. 927 */ 928 } 929 930 /* 931 * Skip if free. We read page order here without zone lock 932 * which is generally unsafe, but the race window is small and 933 * the worst thing that can happen is that we skip some 934 * potential isolation targets. 935 */ 936 if (PageBuddy(page)) { 937 unsigned long freepage_order = buddy_order_unsafe(page); 938 939 /* 940 * Without lock, we cannot be sure that what we got is 941 * a valid page order. Consider only values in the 942 * valid order range to prevent low_pfn overflow. 943 */ 944 if (freepage_order > 0 && freepage_order < MAX_ORDER) 945 low_pfn += (1UL << freepage_order) - 1; 946 continue; 947 } 948 949 /* 950 * Regardless of being on LRU, compound pages such as THP and 951 * hugetlbfs are not to be compacted unless we are attempting 952 * an allocation much larger than the huge page size (eg CMA). 953 * We can potentially save a lot of iterations if we skip them 954 * at once. The check is racy, but we can consider only valid 955 * values and the only danger is skipping too much. 956 */ 957 if (PageCompound(page) && !cc->alloc_contig) { 958 const unsigned int order = compound_order(page); 959 960 if (likely(order < MAX_ORDER)) 961 low_pfn += (1UL << order) - 1; 962 goto isolate_fail; 963 } 964 965 /* 966 * Check may be lockless but that's ok as we recheck later. 967 * It's possible to migrate LRU and non-lru movable pages. 968 * Skip any other type of page 969 */ 970 if (!PageLRU(page)) { 971 /* 972 * __PageMovable can return false positive so we need 973 * to verify it under page_lock. 974 */ 975 if (unlikely(__PageMovable(page)) && 976 !PageIsolated(page)) { 977 if (locked) { 978 unlock_page_lruvec_irqrestore(locked, flags); 979 locked = NULL; 980 } 981 982 if (!isolate_movable_page(page, mode)) 983 goto isolate_success; 984 } 985 986 goto isolate_fail; 987 } 988 989 /* 990 * Migration will fail if an anonymous page is pinned in memory, 991 * so avoid taking lru_lock and isolating it unnecessarily in an 992 * admittedly racy check. 993 */ 994 mapping = page_mapping(page); 995 if (!mapping && page_count(page) > page_mapcount(page)) 996 goto isolate_fail; 997 998 /* 999 * Only allow to migrate anonymous pages in GFP_NOFS context 1000 * because those do not depend on fs locks. 1001 */ 1002 if (!(cc->gfp_mask & __GFP_FS) && mapping) 1003 goto isolate_fail; 1004 1005 /* 1006 * Be careful not to clear PageLRU until after we're 1007 * sure the page is not being freed elsewhere -- the 1008 * page release code relies on it. 1009 */ 1010 if (unlikely(!get_page_unless_zero(page))) 1011 goto isolate_fail; 1012 1013 /* Only take pages on LRU: a check now makes later tests safe */ 1014 if (!PageLRU(page)) 1015 goto isolate_fail_put; 1016 1017 /* Compaction might skip unevictable pages but CMA takes them */ 1018 if (!(mode & ISOLATE_UNEVICTABLE) && PageUnevictable(page)) 1019 goto isolate_fail_put; 1020 1021 /* 1022 * To minimise LRU disruption, the caller can indicate with 1023 * ISOLATE_ASYNC_MIGRATE that it only wants to isolate pages 1024 * it will be able to migrate without blocking - clean pages 1025 * for the most part. PageWriteback would require blocking. 1026 */ 1027 if ((mode & ISOLATE_ASYNC_MIGRATE) && PageWriteback(page)) 1028 goto isolate_fail_put; 1029 1030 if ((mode & ISOLATE_ASYNC_MIGRATE) && PageDirty(page)) { 1031 bool migrate_dirty; 1032 1033 /* 1034 * Only pages without mappings or that have a 1035 * ->migrate_folio callback are possible to migrate 1036 * without blocking. However, we can be racing with 1037 * truncation so it's necessary to lock the page 1038 * to stabilise the mapping as truncation holds 1039 * the page lock until after the page is removed 1040 * from the page cache. 1041 */ 1042 if (!trylock_page(page)) 1043 goto isolate_fail_put; 1044 1045 mapping = page_mapping(page); 1046 migrate_dirty = !mapping || 1047 mapping->a_ops->migrate_folio; 1048 unlock_page(page); 1049 if (!migrate_dirty) 1050 goto isolate_fail_put; 1051 } 1052 1053 /* Try isolate the page */ 1054 if (!TestClearPageLRU(page)) 1055 goto isolate_fail_put; 1056 1057 lruvec = folio_lruvec(page_folio(page)); 1058 1059 /* If we already hold the lock, we can skip some rechecking */ 1060 if (lruvec != locked) { 1061 if (locked) 1062 unlock_page_lruvec_irqrestore(locked, flags); 1063 1064 compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); 1065 locked = lruvec; 1066 1067 lruvec_memcg_debug(lruvec, page_folio(page)); 1068 1069 /* Try get exclusive access under lock */ 1070 if (!skip_updated) { 1071 skip_updated = true; 1072 if (test_and_set_skip(cc, page, low_pfn)) 1073 goto isolate_abort; 1074 } 1075 1076 /* 1077 * Page become compound since the non-locked check, 1078 * and it's on LRU. It can only be a THP so the order 1079 * is safe to read and it's 0 for tail pages. 1080 */ 1081 if (unlikely(PageCompound(page) && !cc->alloc_contig)) { 1082 low_pfn += compound_nr(page) - 1; 1083 SetPageLRU(page); 1084 goto isolate_fail_put; 1085 } 1086 } 1087 1088 /* The whole page is taken off the LRU; skip the tail pages. */ 1089 if (PageCompound(page)) 1090 low_pfn += compound_nr(page) - 1; 1091 1092 /* Successfully isolated */ 1093 del_page_from_lru_list(page, lruvec); 1094 mod_node_page_state(page_pgdat(page), 1095 NR_ISOLATED_ANON + page_is_file_lru(page), 1096 thp_nr_pages(page)); 1097 1098 isolate_success: 1099 list_add(&page->lru, &cc->migratepages); 1100 isolate_success_no_list: 1101 cc->nr_migratepages += compound_nr(page); 1102 nr_isolated += compound_nr(page); 1103 nr_scanned += compound_nr(page) - 1; 1104 1105 /* 1106 * Avoid isolating too much unless this block is being 1107 * rescanned (e.g. dirty/writeback pages, parallel allocation) 1108 * or a lock is contended. For contention, isolate quickly to 1109 * potentially remove one source of contention. 1110 */ 1111 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && 1112 !cc->rescan && !cc->contended) { 1113 ++low_pfn; 1114 break; 1115 } 1116 1117 continue; 1118 1119 isolate_fail_put: 1120 /* Avoid potential deadlock in freeing page under lru_lock */ 1121 if (locked) { 1122 unlock_page_lruvec_irqrestore(locked, flags); 1123 locked = NULL; 1124 } 1125 put_page(page); 1126 1127 isolate_fail: 1128 if (!skip_on_failure && ret != -ENOMEM) 1129 continue; 1130 1131 /* 1132 * We have isolated some pages, but then failed. Release them 1133 * instead of migrating, as we cannot form the cc->order buddy 1134 * page anyway. 1135 */ 1136 if (nr_isolated) { 1137 if (locked) { 1138 unlock_page_lruvec_irqrestore(locked, flags); 1139 locked = NULL; 1140 } 1141 putback_movable_pages(&cc->migratepages); 1142 cc->nr_migratepages = 0; 1143 nr_isolated = 0; 1144 } 1145 1146 if (low_pfn < next_skip_pfn) { 1147 low_pfn = next_skip_pfn - 1; 1148 /* 1149 * The check near the loop beginning would have updated 1150 * next_skip_pfn too, but this is a bit simpler. 1151 */ 1152 next_skip_pfn += 1UL << cc->order; 1153 } 1154 1155 if (ret == -ENOMEM) 1156 break; 1157 } 1158 1159 /* 1160 * The PageBuddy() check could have potentially brought us outside 1161 * the range to be scanned. 1162 */ 1163 if (unlikely(low_pfn > end_pfn)) 1164 low_pfn = end_pfn; 1165 1166 page = NULL; 1167 1168 isolate_abort: 1169 if (locked) 1170 unlock_page_lruvec_irqrestore(locked, flags); 1171 if (page) { 1172 SetPageLRU(page); 1173 put_page(page); 1174 } 1175 1176 /* 1177 * Updated the cached scanner pfn once the pageblock has been scanned 1178 * Pages will either be migrated in which case there is no point 1179 * scanning in the near future or migration failed in which case the 1180 * failure reason may persist. The block is marked for skipping if 1181 * there were no pages isolated in the block or if the block is 1182 * rescanned twice in a row. 1183 */ 1184 if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) { 1185 if (valid_page && !skip_updated) 1186 set_pageblock_skip(valid_page); 1187 update_cached_migrate(cc, low_pfn); 1188 } 1189 1190 trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn, 1191 nr_scanned, nr_isolated); 1192 1193 fatal_pending: 1194 cc->total_migrate_scanned += nr_scanned; 1195 if (nr_isolated) 1196 count_compact_events(COMPACTISOLATED, nr_isolated); 1197 1198 cc->migrate_pfn = low_pfn; 1199 1200 return ret; 1201 } 1202 1203 /** 1204 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 1205 * @cc: Compaction control structure. 1206 * @start_pfn: The first PFN to start isolating. 1207 * @end_pfn: The one-past-last PFN. 1208 * 1209 * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM 1210 * in case we could not allocate a page, or 0. 1211 */ 1212 int 1213 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 1214 unsigned long end_pfn) 1215 { 1216 unsigned long pfn, block_start_pfn, block_end_pfn; 1217 int ret = 0; 1218 1219 /* Scan block by block. First and last block may be incomplete */ 1220 pfn = start_pfn; 1221 block_start_pfn = pageblock_start_pfn(pfn); 1222 if (block_start_pfn < cc->zone->zone_start_pfn) 1223 block_start_pfn = cc->zone->zone_start_pfn; 1224 block_end_pfn = pageblock_end_pfn(pfn); 1225 1226 for (; pfn < end_pfn; pfn = block_end_pfn, 1227 block_start_pfn = block_end_pfn, 1228 block_end_pfn += pageblock_nr_pages) { 1229 1230 block_end_pfn = min(block_end_pfn, end_pfn); 1231 1232 if (!pageblock_pfn_to_page(block_start_pfn, 1233 block_end_pfn, cc->zone)) 1234 continue; 1235 1236 ret = isolate_migratepages_block(cc, pfn, block_end_pfn, 1237 ISOLATE_UNEVICTABLE); 1238 1239 if (ret) 1240 break; 1241 1242 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) 1243 break; 1244 } 1245 1246 return ret; 1247 } 1248 1249 #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 1250 #ifdef CONFIG_COMPACTION 1251 1252 static bool suitable_migration_source(struct compact_control *cc, 1253 struct page *page) 1254 { 1255 int block_mt; 1256 1257 if (pageblock_skip_persistent(page)) 1258 return false; 1259 1260 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) 1261 return true; 1262 1263 block_mt = get_pageblock_migratetype(page); 1264 1265 if (cc->migratetype == MIGRATE_MOVABLE) 1266 return is_migrate_movable(block_mt); 1267 else 1268 return block_mt == cc->migratetype; 1269 } 1270 1271 /* Returns true if the page is within a block suitable for migration to */ 1272 static bool suitable_migration_target(struct compact_control *cc, 1273 struct page *page) 1274 { 1275 /* If the page is a large free page, then disallow migration */ 1276 if (PageBuddy(page)) { 1277 /* 1278 * We are checking page_order without zone->lock taken. But 1279 * the only small danger is that we skip a potentially suitable 1280 * pageblock, so it's not worth to check order for valid range. 1281 */ 1282 if (buddy_order_unsafe(page) >= pageblock_order) 1283 return false; 1284 } 1285 1286 if (cc->ignore_block_suitable) 1287 return true; 1288 1289 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 1290 if (is_migrate_movable(get_pageblock_migratetype(page))) 1291 return true; 1292 1293 /* Otherwise skip the block */ 1294 return false; 1295 } 1296 1297 static inline unsigned int 1298 freelist_scan_limit(struct compact_control *cc) 1299 { 1300 unsigned short shift = BITS_PER_LONG - 1; 1301 1302 return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; 1303 } 1304 1305 /* 1306 * Test whether the free scanner has reached the same or lower pageblock than 1307 * the migration scanner, and compaction should thus terminate. 1308 */ 1309 static inline bool compact_scanners_met(struct compact_control *cc) 1310 { 1311 return (cc->free_pfn >> pageblock_order) 1312 <= (cc->migrate_pfn >> pageblock_order); 1313 } 1314 1315 /* 1316 * Used when scanning for a suitable migration target which scans freelists 1317 * in reverse. Reorders the list such as the unscanned pages are scanned 1318 * first on the next iteration of the free scanner 1319 */ 1320 static void 1321 move_freelist_head(struct list_head *freelist, struct page *freepage) 1322 { 1323 LIST_HEAD(sublist); 1324 1325 if (!list_is_last(freelist, &freepage->lru)) { 1326 list_cut_before(&sublist, freelist, &freepage->lru); 1327 list_splice_tail(&sublist, freelist); 1328 } 1329 } 1330 1331 /* 1332 * Similar to move_freelist_head except used by the migration scanner 1333 * when scanning forward. It's possible for these list operations to 1334 * move against each other if they search the free list exactly in 1335 * lockstep. 1336 */ 1337 static void 1338 move_freelist_tail(struct list_head *freelist, struct page *freepage) 1339 { 1340 LIST_HEAD(sublist); 1341 1342 if (!list_is_first(freelist, &freepage->lru)) { 1343 list_cut_position(&sublist, freelist, &freepage->lru); 1344 list_splice_tail(&sublist, freelist); 1345 } 1346 } 1347 1348 static void 1349 fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated) 1350 { 1351 unsigned long start_pfn, end_pfn; 1352 struct page *page; 1353 1354 /* Do not search around if there are enough pages already */ 1355 if (cc->nr_freepages >= cc->nr_migratepages) 1356 return; 1357 1358 /* Minimise scanning during async compaction */ 1359 if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) 1360 return; 1361 1362 /* Pageblock boundaries */ 1363 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); 1364 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); 1365 1366 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); 1367 if (!page) 1368 return; 1369 1370 /* Scan before */ 1371 if (start_pfn != pfn) { 1372 isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false); 1373 if (cc->nr_freepages >= cc->nr_migratepages) 1374 return; 1375 } 1376 1377 /* Scan after */ 1378 start_pfn = pfn + nr_isolated; 1379 if (start_pfn < end_pfn) 1380 isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); 1381 1382 /* Skip this pageblock in the future as it's full or nearly full */ 1383 if (cc->nr_freepages < cc->nr_migratepages) 1384 set_pageblock_skip(page); 1385 } 1386 1387 /* Search orders in round-robin fashion */ 1388 static int next_search_order(struct compact_control *cc, int order) 1389 { 1390 order--; 1391 if (order < 0) 1392 order = cc->order - 1; 1393 1394 /* Search wrapped around? */ 1395 if (order == cc->search_order) { 1396 cc->search_order--; 1397 if (cc->search_order < 0) 1398 cc->search_order = cc->order - 1; 1399 return -1; 1400 } 1401 1402 return order; 1403 } 1404 1405 static unsigned long 1406 fast_isolate_freepages(struct compact_control *cc) 1407 { 1408 unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); 1409 unsigned int nr_scanned = 0; 1410 unsigned long low_pfn, min_pfn, highest = 0; 1411 unsigned long nr_isolated = 0; 1412 unsigned long distance; 1413 struct page *page = NULL; 1414 bool scan_start = false; 1415 int order; 1416 1417 /* Full compaction passes in a negative order */ 1418 if (cc->order <= 0) 1419 return cc->free_pfn; 1420 1421 /* 1422 * If starting the scan, use a deeper search and use the highest 1423 * PFN found if a suitable one is not found. 1424 */ 1425 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { 1426 limit = pageblock_nr_pages >> 1; 1427 scan_start = true; 1428 } 1429 1430 /* 1431 * Preferred point is in the top quarter of the scan space but take 1432 * a pfn from the top half if the search is problematic. 1433 */ 1434 distance = (cc->free_pfn - cc->migrate_pfn); 1435 low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); 1436 min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); 1437 1438 if (WARN_ON_ONCE(min_pfn > low_pfn)) 1439 low_pfn = min_pfn; 1440 1441 /* 1442 * Search starts from the last successful isolation order or the next 1443 * order to search after a previous failure 1444 */ 1445 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); 1446 1447 for (order = cc->search_order; 1448 !page && order >= 0; 1449 order = next_search_order(cc, order)) { 1450 struct free_area *area = &cc->zone->free_area[order]; 1451 struct list_head *freelist; 1452 struct page *freepage; 1453 unsigned long flags; 1454 unsigned int order_scanned = 0; 1455 unsigned long high_pfn = 0; 1456 1457 if (!area->nr_free) 1458 continue; 1459 1460 spin_lock_irqsave(&cc->zone->lock, flags); 1461 freelist = &area->free_list[MIGRATE_MOVABLE]; 1462 list_for_each_entry_reverse(freepage, freelist, lru) { 1463 unsigned long pfn; 1464 1465 order_scanned++; 1466 nr_scanned++; 1467 pfn = page_to_pfn(freepage); 1468 1469 if (pfn >= highest) 1470 highest = max(pageblock_start_pfn(pfn), 1471 cc->zone->zone_start_pfn); 1472 1473 if (pfn >= low_pfn) { 1474 cc->fast_search_fail = 0; 1475 cc->search_order = order; 1476 page = freepage; 1477 break; 1478 } 1479 1480 if (pfn >= min_pfn && pfn > high_pfn) { 1481 high_pfn = pfn; 1482 1483 /* Shorten the scan if a candidate is found */ 1484 limit >>= 1; 1485 } 1486 1487 if (order_scanned >= limit) 1488 break; 1489 } 1490 1491 /* Use a minimum pfn if a preferred one was not found */ 1492 if (!page && high_pfn) { 1493 page = pfn_to_page(high_pfn); 1494 1495 /* Update freepage for the list reorder below */ 1496 freepage = page; 1497 } 1498 1499 /* Reorder to so a future search skips recent pages */ 1500 move_freelist_head(freelist, freepage); 1501 1502 /* Isolate the page if available */ 1503 if (page) { 1504 if (__isolate_free_page(page, order)) { 1505 set_page_private(page, order); 1506 nr_isolated = 1 << order; 1507 nr_scanned += nr_isolated - 1; 1508 cc->nr_freepages += nr_isolated; 1509 list_add_tail(&page->lru, &cc->freepages); 1510 count_compact_events(COMPACTISOLATED, nr_isolated); 1511 } else { 1512 /* If isolation fails, abort the search */ 1513 order = cc->search_order + 1; 1514 page = NULL; 1515 } 1516 } 1517 1518 spin_unlock_irqrestore(&cc->zone->lock, flags); 1519 1520 /* 1521 * Smaller scan on next order so the total scan is related 1522 * to freelist_scan_limit. 1523 */ 1524 if (order_scanned >= limit) 1525 limit = max(1U, limit >> 1); 1526 } 1527 1528 if (!page) { 1529 cc->fast_search_fail++; 1530 if (scan_start) { 1531 /* 1532 * Use the highest PFN found above min. If one was 1533 * not found, be pessimistic for direct compaction 1534 * and use the min mark. 1535 */ 1536 if (highest >= min_pfn) { 1537 page = pfn_to_page(highest); 1538 cc->free_pfn = highest; 1539 } else { 1540 if (cc->direct_compaction && pfn_valid(min_pfn)) { 1541 page = pageblock_pfn_to_page(min_pfn, 1542 min(pageblock_end_pfn(min_pfn), 1543 zone_end_pfn(cc->zone)), 1544 cc->zone); 1545 cc->free_pfn = min_pfn; 1546 } 1547 } 1548 } 1549 } 1550 1551 if (highest && highest >= cc->zone->compact_cached_free_pfn) { 1552 highest -= pageblock_nr_pages; 1553 cc->zone->compact_cached_free_pfn = highest; 1554 } 1555 1556 cc->total_free_scanned += nr_scanned; 1557 if (!page) 1558 return cc->free_pfn; 1559 1560 low_pfn = page_to_pfn(page); 1561 fast_isolate_around(cc, low_pfn, nr_isolated); 1562 return low_pfn; 1563 } 1564 1565 /* 1566 * Based on information in the current compact_control, find blocks 1567 * suitable for isolating free pages from and then isolate them. 1568 */ 1569 static void isolate_freepages(struct compact_control *cc) 1570 { 1571 struct zone *zone = cc->zone; 1572 struct page *page; 1573 unsigned long block_start_pfn; /* start of current pageblock */ 1574 unsigned long isolate_start_pfn; /* exact pfn we start at */ 1575 unsigned long block_end_pfn; /* end of current pageblock */ 1576 unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 1577 struct list_head *freelist = &cc->freepages; 1578 unsigned int stride; 1579 1580 /* Try a small search of the free lists for a candidate */ 1581 fast_isolate_freepages(cc); 1582 if (cc->nr_freepages) 1583 goto splitmap; 1584 1585 /* 1586 * Initialise the free scanner. The starting point is where we last 1587 * successfully isolated from, zone-cached value, or the end of the 1588 * zone when isolating for the first time. For looping we also need 1589 * this pfn aligned down to the pageblock boundary, because we do 1590 * block_start_pfn -= pageblock_nr_pages in the for loop. 1591 * For ending point, take care when isolating in last pageblock of a 1592 * zone which ends in the middle of a pageblock. 1593 * The low boundary is the end of the pageblock the migration scanner 1594 * is using. 1595 */ 1596 isolate_start_pfn = cc->free_pfn; 1597 block_start_pfn = pageblock_start_pfn(isolate_start_pfn); 1598 block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 1599 zone_end_pfn(zone)); 1600 low_pfn = pageblock_end_pfn(cc->migrate_pfn); 1601 stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; 1602 1603 /* 1604 * Isolate free pages until enough are available to migrate the 1605 * pages on cc->migratepages. We stop searching if the migrate 1606 * and free page scanners meet or enough free pages are isolated. 1607 */ 1608 for (; block_start_pfn >= low_pfn; 1609 block_end_pfn = block_start_pfn, 1610 block_start_pfn -= pageblock_nr_pages, 1611 isolate_start_pfn = block_start_pfn) { 1612 unsigned long nr_isolated; 1613 1614 /* 1615 * This can iterate a massively long zone without finding any 1616 * suitable migration targets, so periodically check resched. 1617 */ 1618 if (!(block_start_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages))) 1619 cond_resched(); 1620 1621 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 1622 zone); 1623 if (!page) 1624 continue; 1625 1626 /* Check the block is suitable for migration */ 1627 if (!suitable_migration_target(cc, page)) 1628 continue; 1629 1630 /* If isolation recently failed, do not retry */ 1631 if (!isolation_suitable(cc, page)) 1632 continue; 1633 1634 /* Found a block suitable for isolating free pages from. */ 1635 nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn, 1636 block_end_pfn, freelist, stride, false); 1637 1638 /* Update the skip hint if the full pageblock was scanned */ 1639 if (isolate_start_pfn == block_end_pfn) 1640 update_pageblock_skip(cc, page, block_start_pfn); 1641 1642 /* Are enough freepages isolated? */ 1643 if (cc->nr_freepages >= cc->nr_migratepages) { 1644 if (isolate_start_pfn >= block_end_pfn) { 1645 /* 1646 * Restart at previous pageblock if more 1647 * freepages can be isolated next time. 1648 */ 1649 isolate_start_pfn = 1650 block_start_pfn - pageblock_nr_pages; 1651 } 1652 break; 1653 } else if (isolate_start_pfn < block_end_pfn) { 1654 /* 1655 * If isolation failed early, do not continue 1656 * needlessly. 1657 */ 1658 break; 1659 } 1660 1661 /* Adjust stride depending on isolation */ 1662 if (nr_isolated) { 1663 stride = 1; 1664 continue; 1665 } 1666 stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1); 1667 } 1668 1669 /* 1670 * Record where the free scanner will restart next time. Either we 1671 * broke from the loop and set isolate_start_pfn based on the last 1672 * call to isolate_freepages_block(), or we met the migration scanner 1673 * and the loop terminated due to isolate_start_pfn < low_pfn 1674 */ 1675 cc->free_pfn = isolate_start_pfn; 1676 1677 splitmap: 1678 /* __isolate_free_page() does not map the pages */ 1679 split_map_pages(freelist); 1680 } 1681 1682 /* 1683 * This is a migrate-callback that "allocates" freepages by taking pages 1684 * from the isolated freelists in the block we are migrating to. 1685 */ 1686 static struct page *compaction_alloc(struct page *migratepage, 1687 unsigned long data) 1688 { 1689 struct compact_control *cc = (struct compact_control *)data; 1690 struct page *freepage; 1691 1692 if (list_empty(&cc->freepages)) { 1693 isolate_freepages(cc); 1694 1695 if (list_empty(&cc->freepages)) 1696 return NULL; 1697 } 1698 1699 freepage = list_entry(cc->freepages.next, struct page, lru); 1700 list_del(&freepage->lru); 1701 cc->nr_freepages--; 1702 1703 return freepage; 1704 } 1705 1706 /* 1707 * This is a migrate-callback that "frees" freepages back to the isolated 1708 * freelist. All pages on the freelist are from the same zone, so there is no 1709 * special handling needed for NUMA. 1710 */ 1711 static void compaction_free(struct page *page, unsigned long data) 1712 { 1713 struct compact_control *cc = (struct compact_control *)data; 1714 1715 list_add(&page->lru, &cc->freepages); 1716 cc->nr_freepages++; 1717 } 1718 1719 /* possible outcome of isolate_migratepages */ 1720 typedef enum { 1721 ISOLATE_ABORT, /* Abort compaction now */ 1722 ISOLATE_NONE, /* No pages isolated, continue scanning */ 1723 ISOLATE_SUCCESS, /* Pages isolated, migrate */ 1724 } isolate_migrate_t; 1725 1726 /* 1727 * Allow userspace to control policy on scanning the unevictable LRU for 1728 * compactable pages. 1729 */ 1730 int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNEVICTABLE_DEFAULT; 1731 1732 static inline void 1733 update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) 1734 { 1735 if (cc->fast_start_pfn == ULONG_MAX) 1736 return; 1737 1738 if (!cc->fast_start_pfn) 1739 cc->fast_start_pfn = pfn; 1740 1741 cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); 1742 } 1743 1744 static inline unsigned long 1745 reinit_migrate_pfn(struct compact_control *cc) 1746 { 1747 if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) 1748 return cc->migrate_pfn; 1749 1750 cc->migrate_pfn = cc->fast_start_pfn; 1751 cc->fast_start_pfn = ULONG_MAX; 1752 1753 return cc->migrate_pfn; 1754 } 1755 1756 /* 1757 * Briefly search the free lists for a migration source that already has 1758 * some free pages to reduce the number of pages that need migration 1759 * before a pageblock is free. 1760 */ 1761 static unsigned long fast_find_migrateblock(struct compact_control *cc) 1762 { 1763 unsigned int limit = freelist_scan_limit(cc); 1764 unsigned int nr_scanned = 0; 1765 unsigned long distance; 1766 unsigned long pfn = cc->migrate_pfn; 1767 unsigned long high_pfn; 1768 int order; 1769 bool found_block = false; 1770 1771 /* Skip hints are relied on to avoid repeats on the fast search */ 1772 if (cc->ignore_skip_hint) 1773 return pfn; 1774 1775 /* 1776 * If the migrate_pfn is not at the start of a zone or the start 1777 * of a pageblock then assume this is a continuation of a previous 1778 * scan restarted due to COMPACT_CLUSTER_MAX. 1779 */ 1780 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) 1781 return pfn; 1782 1783 /* 1784 * For smaller orders, just linearly scan as the number of pages 1785 * to migrate should be relatively small and does not necessarily 1786 * justify freeing up a large block for a small allocation. 1787 */ 1788 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) 1789 return pfn; 1790 1791 /* 1792 * Only allow kcompactd and direct requests for movable pages to 1793 * quickly clear out a MOVABLE pageblock for allocation. This 1794 * reduces the risk that a large movable pageblock is freed for 1795 * an unmovable/reclaimable small allocation. 1796 */ 1797 if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) 1798 return pfn; 1799 1800 /* 1801 * When starting the migration scanner, pick any pageblock within the 1802 * first half of the search space. Otherwise try and pick a pageblock 1803 * within the first eighth to reduce the chances that a migration 1804 * target later becomes a source. 1805 */ 1806 distance = (cc->free_pfn - cc->migrate_pfn) >> 1; 1807 if (cc->migrate_pfn != cc->zone->zone_start_pfn) 1808 distance >>= 2; 1809 high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); 1810 1811 for (order = cc->order - 1; 1812 order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; 1813 order--) { 1814 struct free_area *area = &cc->zone->free_area[order]; 1815 struct list_head *freelist; 1816 unsigned long flags; 1817 struct page *freepage; 1818 1819 if (!area->nr_free) 1820 continue; 1821 1822 spin_lock_irqsave(&cc->zone->lock, flags); 1823 freelist = &area->free_list[MIGRATE_MOVABLE]; 1824 list_for_each_entry(freepage, freelist, lru) { 1825 unsigned long free_pfn; 1826 1827 if (nr_scanned++ >= limit) { 1828 move_freelist_tail(freelist, freepage); 1829 break; 1830 } 1831 1832 free_pfn = page_to_pfn(freepage); 1833 if (free_pfn < high_pfn) { 1834 /* 1835 * Avoid if skipped recently. Ideally it would 1836 * move to the tail but even safe iteration of 1837 * the list assumes an entry is deleted, not 1838 * reordered. 1839 */ 1840 if (get_pageblock_skip(freepage)) 1841 continue; 1842 1843 /* Reorder to so a future search skips recent pages */ 1844 move_freelist_tail(freelist, freepage); 1845 1846 update_fast_start_pfn(cc, free_pfn); 1847 pfn = pageblock_start_pfn(free_pfn); 1848 if (pfn < cc->zone->zone_start_pfn) 1849 pfn = cc->zone->zone_start_pfn; 1850 cc->fast_search_fail = 0; 1851 found_block = true; 1852 set_pageblock_skip(freepage); 1853 break; 1854 } 1855 } 1856 spin_unlock_irqrestore(&cc->zone->lock, flags); 1857 } 1858 1859 cc->total_migrate_scanned += nr_scanned; 1860 1861 /* 1862 * If fast scanning failed then use a cached entry for a page block 1863 * that had free pages as the basis for starting a linear scan. 1864 */ 1865 if (!found_block) { 1866 cc->fast_search_fail++; 1867 pfn = reinit_migrate_pfn(cc); 1868 } 1869 return pfn; 1870 } 1871 1872 /* 1873 * Isolate all pages that can be migrated from the first suitable block, 1874 * starting at the block pointed to by the migrate scanner pfn within 1875 * compact_control. 1876 */ 1877 static isolate_migrate_t isolate_migratepages(struct compact_control *cc) 1878 { 1879 unsigned long block_start_pfn; 1880 unsigned long block_end_pfn; 1881 unsigned long low_pfn; 1882 struct page *page; 1883 const isolate_mode_t isolate_mode = 1884 (sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) | 1885 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); 1886 bool fast_find_block; 1887 1888 /* 1889 * Start at where we last stopped, or beginning of the zone as 1890 * initialized by compact_zone(). The first failure will use 1891 * the lowest PFN as the starting point for linear scanning. 1892 */ 1893 low_pfn = fast_find_migrateblock(cc); 1894 block_start_pfn = pageblock_start_pfn(low_pfn); 1895 if (block_start_pfn < cc->zone->zone_start_pfn) 1896 block_start_pfn = cc->zone->zone_start_pfn; 1897 1898 /* 1899 * fast_find_migrateblock marks a pageblock skipped so to avoid 1900 * the isolation_suitable check below, check whether the fast 1901 * search was successful. 1902 */ 1903 fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; 1904 1905 /* Only scan within a pageblock boundary */ 1906 block_end_pfn = pageblock_end_pfn(low_pfn); 1907 1908 /* 1909 * Iterate over whole pageblocks until we find the first suitable. 1910 * Do not cross the free scanner. 1911 */ 1912 for (; block_end_pfn <= cc->free_pfn; 1913 fast_find_block = false, 1914 cc->migrate_pfn = low_pfn = block_end_pfn, 1915 block_start_pfn = block_end_pfn, 1916 block_end_pfn += pageblock_nr_pages) { 1917 1918 /* 1919 * This can potentially iterate a massively long zone with 1920 * many pageblocks unsuitable, so periodically check if we 1921 * need to schedule. 1922 */ 1923 if (!(low_pfn % (COMPACT_CLUSTER_MAX * pageblock_nr_pages))) 1924 cond_resched(); 1925 1926 page = pageblock_pfn_to_page(block_start_pfn, 1927 block_end_pfn, cc->zone); 1928 if (!page) 1929 continue; 1930 1931 /* 1932 * If isolation recently failed, do not retry. Only check the 1933 * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock 1934 * to be visited multiple times. Assume skip was checked 1935 * before making it "skip" so other compaction instances do 1936 * not scan the same block. 1937 */ 1938 if (IS_ALIGNED(low_pfn, pageblock_nr_pages) && 1939 !fast_find_block && !isolation_suitable(cc, page)) 1940 continue; 1941 1942 /* 1943 * For async direct compaction, only scan the pageblocks of the 1944 * same migratetype without huge pages. Async direct compaction 1945 * is optimistic to see if the minimum amount of work satisfies 1946 * the allocation. The cached PFN is updated as it's possible 1947 * that all remaining blocks between source and target are 1948 * unsuitable and the compaction scanners fail to meet. 1949 */ 1950 if (!suitable_migration_source(cc, page)) { 1951 update_cached_migrate(cc, block_end_pfn); 1952 continue; 1953 } 1954 1955 /* Perform the isolation */ 1956 if (isolate_migratepages_block(cc, low_pfn, block_end_pfn, 1957 isolate_mode)) 1958 return ISOLATE_ABORT; 1959 1960 /* 1961 * Either we isolated something and proceed with migration. Or 1962 * we failed and compact_zone should decide if we should 1963 * continue or not. 1964 */ 1965 break; 1966 } 1967 1968 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1969 } 1970 1971 /* 1972 * order == -1 is expected when compacting via 1973 * /proc/sys/vm/compact_memory 1974 */ 1975 static inline bool is_via_compact_memory(int order) 1976 { 1977 return order == -1; 1978 } 1979 1980 static bool kswapd_is_running(pg_data_t *pgdat) 1981 { 1982 return pgdat->kswapd && task_is_running(pgdat->kswapd); 1983 } 1984 1985 /* 1986 * A zone's fragmentation score is the external fragmentation wrt to the 1987 * COMPACTION_HPAGE_ORDER. It returns a value in the range [0, 100]. 1988 */ 1989 static unsigned int fragmentation_score_zone(struct zone *zone) 1990 { 1991 return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); 1992 } 1993 1994 /* 1995 * A weighted zone's fragmentation score is the external fragmentation 1996 * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It 1997 * returns a value in the range [0, 100]. 1998 * 1999 * The scaling factor ensures that proactive compaction focuses on larger 2000 * zones like ZONE_NORMAL, rather than smaller, specialized zones like 2001 * ZONE_DMA32. For smaller zones, the score value remains close to zero, 2002 * and thus never exceeds the high threshold for proactive compaction. 2003 */ 2004 static unsigned int fragmentation_score_zone_weighted(struct zone *zone) 2005 { 2006 unsigned long score; 2007 2008 score = zone->present_pages * fragmentation_score_zone(zone); 2009 return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); 2010 } 2011 2012 /* 2013 * The per-node proactive (background) compaction process is started by its 2014 * corresponding kcompactd thread when the node's fragmentation score 2015 * exceeds the high threshold. The compaction process remains active till 2016 * the node's score falls below the low threshold, or one of the back-off 2017 * conditions is met. 2018 */ 2019 static unsigned int fragmentation_score_node(pg_data_t *pgdat) 2020 { 2021 unsigned int score = 0; 2022 int zoneid; 2023 2024 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2025 struct zone *zone; 2026 2027 zone = &pgdat->node_zones[zoneid]; 2028 score += fragmentation_score_zone_weighted(zone); 2029 } 2030 2031 return score; 2032 } 2033 2034 static unsigned int fragmentation_score_wmark(pg_data_t *pgdat, bool low) 2035 { 2036 unsigned int wmark_low; 2037 2038 /* 2039 * Cap the low watermark to avoid excessive compaction 2040 * activity in case a user sets the proactiveness tunable 2041 * close to 100 (maximum). 2042 */ 2043 wmark_low = max(100U - sysctl_compaction_proactiveness, 5U); 2044 return low ? wmark_low : min(wmark_low + 10, 100U); 2045 } 2046 2047 static bool should_proactive_compact_node(pg_data_t *pgdat) 2048 { 2049 int wmark_high; 2050 2051 if (!sysctl_compaction_proactiveness || kswapd_is_running(pgdat)) 2052 return false; 2053 2054 wmark_high = fragmentation_score_wmark(pgdat, false); 2055 return fragmentation_score_node(pgdat) > wmark_high; 2056 } 2057 2058 static enum compact_result __compact_finished(struct compact_control *cc) 2059 { 2060 unsigned int order; 2061 const int migratetype = cc->migratetype; 2062 int ret; 2063 2064 /* Compaction run completes if the migrate and free scanner meet */ 2065 if (compact_scanners_met(cc)) { 2066 /* Let the next compaction start anew. */ 2067 reset_cached_positions(cc->zone); 2068 2069 /* 2070 * Mark that the PG_migrate_skip information should be cleared 2071 * by kswapd when it goes to sleep. kcompactd does not set the 2072 * flag itself as the decision to be clear should be directly 2073 * based on an allocation request. 2074 */ 2075 if (cc->direct_compaction) 2076 cc->zone->compact_blockskip_flush = true; 2077 2078 if (cc->whole_zone) 2079 return COMPACT_COMPLETE; 2080 else 2081 return COMPACT_PARTIAL_SKIPPED; 2082 } 2083 2084 if (cc->proactive_compaction) { 2085 int score, wmark_low; 2086 pg_data_t *pgdat; 2087 2088 pgdat = cc->zone->zone_pgdat; 2089 if (kswapd_is_running(pgdat)) 2090 return COMPACT_PARTIAL_SKIPPED; 2091 2092 score = fragmentation_score_zone(cc->zone); 2093 wmark_low = fragmentation_score_wmark(pgdat, true); 2094 2095 if (score > wmark_low) 2096 ret = COMPACT_CONTINUE; 2097 else 2098 ret = COMPACT_SUCCESS; 2099 2100 goto out; 2101 } 2102 2103 if (is_via_compact_memory(cc->order)) 2104 return COMPACT_CONTINUE; 2105 2106 /* 2107 * Always finish scanning a pageblock to reduce the possibility of 2108 * fallbacks in the future. This is particularly important when 2109 * migration source is unmovable/reclaimable but it's not worth 2110 * special casing. 2111 */ 2112 if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages)) 2113 return COMPACT_CONTINUE; 2114 2115 /* Direct compactor: Is a suitable page free? */ 2116 ret = COMPACT_NO_SUITABLE_PAGE; 2117 for (order = cc->order; order < MAX_ORDER; order++) { 2118 struct free_area *area = &cc->zone->free_area[order]; 2119 bool can_steal; 2120 2121 /* Job done if page is free of the right migratetype */ 2122 if (!free_area_empty(area, migratetype)) 2123 return COMPACT_SUCCESS; 2124 2125 #ifdef CONFIG_CMA 2126 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */ 2127 if (migratetype == MIGRATE_MOVABLE && 2128 !free_area_empty(area, MIGRATE_CMA)) 2129 return COMPACT_SUCCESS; 2130 #endif 2131 /* 2132 * Job done if allocation would steal freepages from 2133 * other migratetype buddy lists. 2134 */ 2135 if (find_suitable_fallback(area, order, migratetype, 2136 true, &can_steal) != -1) 2137 /* 2138 * Movable pages are OK in any pageblock. If we are 2139 * stealing for a non-movable allocation, make sure 2140 * we finish compacting the current pageblock first 2141 * (which is assured by the above migrate_pfn align 2142 * check) so it is as free as possible and we won't 2143 * have to steal another one soon. 2144 */ 2145 return COMPACT_SUCCESS; 2146 } 2147 2148 out: 2149 if (cc->contended || fatal_signal_pending(current)) 2150 ret = COMPACT_CONTENDED; 2151 2152 return ret; 2153 } 2154 2155 static enum compact_result compact_finished(struct compact_control *cc) 2156 { 2157 int ret; 2158 2159 ret = __compact_finished(cc); 2160 trace_mm_compaction_finished(cc->zone, cc->order, ret); 2161 if (ret == COMPACT_NO_SUITABLE_PAGE) 2162 ret = COMPACT_CONTINUE; 2163 2164 return ret; 2165 } 2166 2167 static enum compact_result __compaction_suitable(struct zone *zone, int order, 2168 unsigned int alloc_flags, 2169 int highest_zoneidx, 2170 unsigned long wmark_target) 2171 { 2172 unsigned long watermark; 2173 2174 if (is_via_compact_memory(order)) 2175 return COMPACT_CONTINUE; 2176 2177 watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); 2178 /* 2179 * If watermarks for high-order allocation are already met, there 2180 * should be no need for compaction at all. 2181 */ 2182 if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, 2183 alloc_flags)) 2184 return COMPACT_SUCCESS; 2185 2186 /* 2187 * Watermarks for order-0 must be met for compaction to be able to 2188 * isolate free pages for migration targets. This means that the 2189 * watermark and alloc_flags have to match, or be more pessimistic than 2190 * the check in __isolate_free_page(). We don't use the direct 2191 * compactor's alloc_flags, as they are not relevant for freepage 2192 * isolation. We however do use the direct compactor's highest_zoneidx 2193 * to skip over zones where lowmem reserves would prevent allocation 2194 * even if compaction succeeds. 2195 * For costly orders, we require low watermark instead of min for 2196 * compaction to proceed to increase its chances. 2197 * ALLOC_CMA is used, as pages in CMA pageblocks are considered 2198 * suitable migration targets 2199 */ 2200 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? 2201 low_wmark_pages(zone) : min_wmark_pages(zone); 2202 watermark += compact_gap(order); 2203 if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx, 2204 ALLOC_CMA, wmark_target)) 2205 return COMPACT_SKIPPED; 2206 2207 return COMPACT_CONTINUE; 2208 } 2209 2210 /* 2211 * compaction_suitable: Is this suitable to run compaction on this zone now? 2212 * Returns 2213 * COMPACT_SKIPPED - If there are too few free pages for compaction 2214 * COMPACT_SUCCESS - If the allocation would succeed without compaction 2215 * COMPACT_CONTINUE - If compaction should run now 2216 */ 2217 enum compact_result compaction_suitable(struct zone *zone, int order, 2218 unsigned int alloc_flags, 2219 int highest_zoneidx) 2220 { 2221 enum compact_result ret; 2222 int fragindex; 2223 2224 ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx, 2225 zone_page_state(zone, NR_FREE_PAGES)); 2226 /* 2227 * fragmentation index determines if allocation failures are due to 2228 * low memory or external fragmentation 2229 * 2230 * index of -1000 would imply allocations might succeed depending on 2231 * watermarks, but we already failed the high-order watermark check 2232 * index towards 0 implies failure is due to lack of memory 2233 * index towards 1000 implies failure is due to fragmentation 2234 * 2235 * Only compact if a failure would be due to fragmentation. Also 2236 * ignore fragindex for non-costly orders where the alternative to 2237 * a successful reclaim/compaction is OOM. Fragindex and the 2238 * vm.extfrag_threshold sysctl is meant as a heuristic to prevent 2239 * excessive compaction for costly orders, but it should not be at the 2240 * expense of system stability. 2241 */ 2242 if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) { 2243 fragindex = fragmentation_index(zone, order); 2244 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 2245 ret = COMPACT_NOT_SUITABLE_ZONE; 2246 } 2247 2248 trace_mm_compaction_suitable(zone, order, ret); 2249 if (ret == COMPACT_NOT_SUITABLE_ZONE) 2250 ret = COMPACT_SKIPPED; 2251 2252 return ret; 2253 } 2254 2255 bool compaction_zonelist_suitable(struct alloc_context *ac, int order, 2256 int alloc_flags) 2257 { 2258 struct zone *zone; 2259 struct zoneref *z; 2260 2261 /* 2262 * Make sure at least one zone would pass __compaction_suitable if we continue 2263 * retrying the reclaim. 2264 */ 2265 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 2266 ac->highest_zoneidx, ac->nodemask) { 2267 unsigned long available; 2268 enum compact_result compact_result; 2269 2270 /* 2271 * Do not consider all the reclaimable memory because we do not 2272 * want to trash just for a single high order allocation which 2273 * is even not guaranteed to appear even if __compaction_suitable 2274 * is happy about the watermark check. 2275 */ 2276 available = zone_reclaimable_pages(zone) / order; 2277 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); 2278 compact_result = __compaction_suitable(zone, order, alloc_flags, 2279 ac->highest_zoneidx, available); 2280 if (compact_result == COMPACT_CONTINUE) 2281 return true; 2282 } 2283 2284 return false; 2285 } 2286 2287 static enum compact_result 2288 compact_zone(struct compact_control *cc, struct capture_control *capc) 2289 { 2290 enum compact_result ret; 2291 unsigned long start_pfn = cc->zone->zone_start_pfn; 2292 unsigned long end_pfn = zone_end_pfn(cc->zone); 2293 unsigned long last_migrated_pfn; 2294 const bool sync = cc->mode != MIGRATE_ASYNC; 2295 bool update_cached; 2296 unsigned int nr_succeeded = 0; 2297 2298 /* 2299 * These counters track activities during zone compaction. Initialize 2300 * them before compacting a new zone. 2301 */ 2302 cc->total_migrate_scanned = 0; 2303 cc->total_free_scanned = 0; 2304 cc->nr_migratepages = 0; 2305 cc->nr_freepages = 0; 2306 INIT_LIST_HEAD(&cc->freepages); 2307 INIT_LIST_HEAD(&cc->migratepages); 2308 2309 cc->migratetype = gfp_migratetype(cc->gfp_mask); 2310 ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, 2311 cc->highest_zoneidx); 2312 /* Compaction is likely to fail */ 2313 if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) 2314 return ret; 2315 2316 /* huh, compaction_suitable is returning something unexpected */ 2317 VM_BUG_ON(ret != COMPACT_CONTINUE); 2318 2319 /* 2320 * Clear pageblock skip if there were failures recently and compaction 2321 * is about to be retried after being deferred. 2322 */ 2323 if (compaction_restarting(cc->zone, cc->order)) 2324 __reset_isolation_suitable(cc->zone); 2325 2326 /* 2327 * Setup to move all movable pages to the end of the zone. Used cached 2328 * information on where the scanners should start (unless we explicitly 2329 * want to compact the whole zone), but check that it is initialised 2330 * by ensuring the values are within zone boundaries. 2331 */ 2332 cc->fast_start_pfn = 0; 2333 if (cc->whole_zone) { 2334 cc->migrate_pfn = start_pfn; 2335 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 2336 } else { 2337 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; 2338 cc->free_pfn = cc->zone->compact_cached_free_pfn; 2339 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { 2340 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); 2341 cc->zone->compact_cached_free_pfn = cc->free_pfn; 2342 } 2343 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { 2344 cc->migrate_pfn = start_pfn; 2345 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 2346 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 2347 } 2348 2349 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) 2350 cc->whole_zone = true; 2351 } 2352 2353 last_migrated_pfn = 0; 2354 2355 /* 2356 * Migrate has separate cached PFNs for ASYNC and SYNC* migration on 2357 * the basis that some migrations will fail in ASYNC mode. However, 2358 * if the cached PFNs match and pageblocks are skipped due to having 2359 * no isolation candidates, then the sync state does not matter. 2360 * Until a pageblock with isolation candidates is found, keep the 2361 * cached PFNs in sync to avoid revisiting the same blocks. 2362 */ 2363 update_cached = !sync && 2364 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; 2365 2366 trace_mm_compaction_begin(cc, start_pfn, end_pfn, sync); 2367 2368 /* lru_add_drain_all could be expensive with involving other CPUs */ 2369 lru_add_drain(); 2370 2371 while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { 2372 int err; 2373 unsigned long iteration_start_pfn = cc->migrate_pfn; 2374 2375 /* 2376 * Avoid multiple rescans which can happen if a page cannot be 2377 * isolated (dirty/writeback in async mode) or if the migrated 2378 * pages are being allocated before the pageblock is cleared. 2379 * The first rescan will capture the entire pageblock for 2380 * migration. If it fails, it'll be marked skip and scanning 2381 * will proceed as normal. 2382 */ 2383 cc->rescan = false; 2384 if (pageblock_start_pfn(last_migrated_pfn) == 2385 pageblock_start_pfn(iteration_start_pfn)) { 2386 cc->rescan = true; 2387 } 2388 2389 switch (isolate_migratepages(cc)) { 2390 case ISOLATE_ABORT: 2391 ret = COMPACT_CONTENDED; 2392 putback_movable_pages(&cc->migratepages); 2393 cc->nr_migratepages = 0; 2394 goto out; 2395 case ISOLATE_NONE: 2396 if (update_cached) { 2397 cc->zone->compact_cached_migrate_pfn[1] = 2398 cc->zone->compact_cached_migrate_pfn[0]; 2399 } 2400 2401 /* 2402 * We haven't isolated and migrated anything, but 2403 * there might still be unflushed migrations from 2404 * previous cc->order aligned block. 2405 */ 2406 goto check_drain; 2407 case ISOLATE_SUCCESS: 2408 update_cached = false; 2409 last_migrated_pfn = iteration_start_pfn; 2410 } 2411 2412 err = migrate_pages(&cc->migratepages, compaction_alloc, 2413 compaction_free, (unsigned long)cc, cc->mode, 2414 MR_COMPACTION, &nr_succeeded); 2415 2416 trace_mm_compaction_migratepages(cc, nr_succeeded); 2417 2418 /* All pages were either migrated or will be released */ 2419 cc->nr_migratepages = 0; 2420 if (err) { 2421 putback_movable_pages(&cc->migratepages); 2422 /* 2423 * migrate_pages() may return -ENOMEM when scanners meet 2424 * and we want compact_finished() to detect it 2425 */ 2426 if (err == -ENOMEM && !compact_scanners_met(cc)) { 2427 ret = COMPACT_CONTENDED; 2428 goto out; 2429 } 2430 /* 2431 * We failed to migrate at least one page in the current 2432 * order-aligned block, so skip the rest of it. 2433 */ 2434 if (cc->direct_compaction && 2435 (cc->mode == MIGRATE_ASYNC)) { 2436 cc->migrate_pfn = block_end_pfn( 2437 cc->migrate_pfn - 1, cc->order); 2438 /* Draining pcplists is useless in this case */ 2439 last_migrated_pfn = 0; 2440 } 2441 } 2442 2443 check_drain: 2444 /* 2445 * Has the migration scanner moved away from the previous 2446 * cc->order aligned block where we migrated from? If yes, 2447 * flush the pages that were freed, so that they can merge and 2448 * compact_finished() can detect immediately if allocation 2449 * would succeed. 2450 */ 2451 if (cc->order > 0 && last_migrated_pfn) { 2452 unsigned long current_block_start = 2453 block_start_pfn(cc->migrate_pfn, cc->order); 2454 2455 if (last_migrated_pfn < current_block_start) { 2456 lru_add_drain_cpu_zone(cc->zone); 2457 /* No more flushing until we migrate again */ 2458 last_migrated_pfn = 0; 2459 } 2460 } 2461 2462 /* Stop if a page has been captured */ 2463 if (capc && capc->page) { 2464 ret = COMPACT_SUCCESS; 2465 break; 2466 } 2467 } 2468 2469 out: 2470 /* 2471 * Release free pages and update where the free scanner should restart, 2472 * so we don't leave any returned pages behind in the next attempt. 2473 */ 2474 if (cc->nr_freepages > 0) { 2475 unsigned long free_pfn = release_freepages(&cc->freepages); 2476 2477 cc->nr_freepages = 0; 2478 VM_BUG_ON(free_pfn == 0); 2479 /* The cached pfn is always the first in a pageblock */ 2480 free_pfn = pageblock_start_pfn(free_pfn); 2481 /* 2482 * Only go back, not forward. The cached pfn might have been 2483 * already reset to zone end in compact_finished() 2484 */ 2485 if (free_pfn > cc->zone->compact_cached_free_pfn) 2486 cc->zone->compact_cached_free_pfn = free_pfn; 2487 } 2488 2489 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); 2490 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); 2491 2492 trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret); 2493 2494 return ret; 2495 } 2496 2497 static enum compact_result compact_zone_order(struct zone *zone, int order, 2498 gfp_t gfp_mask, enum compact_priority prio, 2499 unsigned int alloc_flags, int highest_zoneidx, 2500 struct page **capture) 2501 { 2502 enum compact_result ret; 2503 struct compact_control cc = { 2504 .order = order, 2505 .search_order = order, 2506 .gfp_mask = gfp_mask, 2507 .zone = zone, 2508 .mode = (prio == COMPACT_PRIO_ASYNC) ? 2509 MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT, 2510 .alloc_flags = alloc_flags, 2511 .highest_zoneidx = highest_zoneidx, 2512 .direct_compaction = true, 2513 .whole_zone = (prio == MIN_COMPACT_PRIORITY), 2514 .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY), 2515 .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY) 2516 }; 2517 struct capture_control capc = { 2518 .cc = &cc, 2519 .page = NULL, 2520 }; 2521 2522 /* 2523 * Make sure the structs are really initialized before we expose the 2524 * capture control, in case we are interrupted and the interrupt handler 2525 * frees a page. 2526 */ 2527 barrier(); 2528 WRITE_ONCE(current->capture_control, &capc); 2529 2530 ret = compact_zone(&cc, &capc); 2531 2532 VM_BUG_ON(!list_empty(&cc.freepages)); 2533 VM_BUG_ON(!list_empty(&cc.migratepages)); 2534 2535 /* 2536 * Make sure we hide capture control first before we read the captured 2537 * page pointer, otherwise an interrupt could free and capture a page 2538 * and we would leak it. 2539 */ 2540 WRITE_ONCE(current->capture_control, NULL); 2541 *capture = READ_ONCE(capc.page); 2542 /* 2543 * Technically, it is also possible that compaction is skipped but 2544 * the page is still captured out of luck(IRQ came and freed the page). 2545 * Returning COMPACT_SUCCESS in such cases helps in properly accounting 2546 * the COMPACT[STALL|FAIL] when compaction is skipped. 2547 */ 2548 if (*capture) 2549 ret = COMPACT_SUCCESS; 2550 2551 return ret; 2552 } 2553 2554 int sysctl_extfrag_threshold = 500; 2555 2556 /** 2557 * try_to_compact_pages - Direct compact to satisfy a high-order allocation 2558 * @gfp_mask: The GFP mask of the current allocation 2559 * @order: The order of the current allocation 2560 * @alloc_flags: The allocation flags of the current allocation 2561 * @ac: The context of current allocation 2562 * @prio: Determines how hard direct compaction should try to succeed 2563 * @capture: Pointer to free page created by compaction will be stored here 2564 * 2565 * This is the main entry point for direct page compaction. 2566 */ 2567 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 2568 unsigned int alloc_flags, const struct alloc_context *ac, 2569 enum compact_priority prio, struct page **capture) 2570 { 2571 int may_perform_io = (__force int)(gfp_mask & __GFP_IO); 2572 struct zoneref *z; 2573 struct zone *zone; 2574 enum compact_result rc = COMPACT_SKIPPED; 2575 2576 /* 2577 * Check if the GFP flags allow compaction - GFP_NOIO is really 2578 * tricky context because the migration might require IO 2579 */ 2580 if (!may_perform_io) 2581 return COMPACT_SKIPPED; 2582 2583 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); 2584 2585 /* Compact each zone in the list */ 2586 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, 2587 ac->highest_zoneidx, ac->nodemask) { 2588 enum compact_result status; 2589 2590 if (prio > MIN_COMPACT_PRIORITY 2591 && compaction_deferred(zone, order)) { 2592 rc = max_t(enum compact_result, COMPACT_DEFERRED, rc); 2593 continue; 2594 } 2595 2596 status = compact_zone_order(zone, order, gfp_mask, prio, 2597 alloc_flags, ac->highest_zoneidx, capture); 2598 rc = max(status, rc); 2599 2600 /* The allocation should succeed, stop compacting */ 2601 if (status == COMPACT_SUCCESS) { 2602 /* 2603 * We think the allocation will succeed in this zone, 2604 * but it is not certain, hence the false. The caller 2605 * will repeat this with true if allocation indeed 2606 * succeeds in this zone. 2607 */ 2608 compaction_defer_reset(zone, order, false); 2609 2610 break; 2611 } 2612 2613 if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE || 2614 status == COMPACT_PARTIAL_SKIPPED)) 2615 /* 2616 * We think that allocation won't succeed in this zone 2617 * so we defer compaction there. If it ends up 2618 * succeeding after all, it will be reset. 2619 */ 2620 defer_compaction(zone, order); 2621 2622 /* 2623 * We might have stopped compacting due to need_resched() in 2624 * async compaction, or due to a fatal signal detected. In that 2625 * case do not try further zones 2626 */ 2627 if ((prio == COMPACT_PRIO_ASYNC && need_resched()) 2628 || fatal_signal_pending(current)) 2629 break; 2630 } 2631 2632 return rc; 2633 } 2634 2635 /* 2636 * Compact all zones within a node till each zone's fragmentation score 2637 * reaches within proactive compaction thresholds (as determined by the 2638 * proactiveness tunable). 2639 * 2640 * It is possible that the function returns before reaching score targets 2641 * due to various back-off conditions, such as, contention on per-node or 2642 * per-zone locks. 2643 */ 2644 static void proactive_compact_node(pg_data_t *pgdat) 2645 { 2646 int zoneid; 2647 struct zone *zone; 2648 struct compact_control cc = { 2649 .order = -1, 2650 .mode = MIGRATE_SYNC_LIGHT, 2651 .ignore_skip_hint = true, 2652 .whole_zone = true, 2653 .gfp_mask = GFP_KERNEL, 2654 .proactive_compaction = true, 2655 }; 2656 2657 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2658 zone = &pgdat->node_zones[zoneid]; 2659 if (!populated_zone(zone)) 2660 continue; 2661 2662 cc.zone = zone; 2663 2664 compact_zone(&cc, NULL); 2665 2666 VM_BUG_ON(!list_empty(&cc.freepages)); 2667 VM_BUG_ON(!list_empty(&cc.migratepages)); 2668 } 2669 } 2670 2671 /* Compact all zones within a node */ 2672 static void compact_node(int nid) 2673 { 2674 pg_data_t *pgdat = NODE_DATA(nid); 2675 int zoneid; 2676 struct zone *zone; 2677 struct compact_control cc = { 2678 .order = -1, 2679 .mode = MIGRATE_SYNC, 2680 .ignore_skip_hint = true, 2681 .whole_zone = true, 2682 .gfp_mask = GFP_KERNEL, 2683 }; 2684 2685 2686 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 2687 2688 zone = &pgdat->node_zones[zoneid]; 2689 if (!populated_zone(zone)) 2690 continue; 2691 2692 cc.zone = zone; 2693 2694 compact_zone(&cc, NULL); 2695 2696 VM_BUG_ON(!list_empty(&cc.freepages)); 2697 VM_BUG_ON(!list_empty(&cc.migratepages)); 2698 } 2699 } 2700 2701 /* Compact all nodes in the system */ 2702 static void compact_nodes(void) 2703 { 2704 int nid; 2705 2706 /* Flush pending updates to the LRU lists */ 2707 lru_add_drain_all(); 2708 2709 for_each_online_node(nid) 2710 compact_node(nid); 2711 } 2712 2713 /* 2714 * Tunable for proactive compaction. It determines how 2715 * aggressively the kernel should compact memory in the 2716 * background. It takes values in the range [0, 100]. 2717 */ 2718 unsigned int __read_mostly sysctl_compaction_proactiveness = 20; 2719 2720 int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int write, 2721 void *buffer, size_t *length, loff_t *ppos) 2722 { 2723 int rc, nid; 2724 2725 rc = proc_dointvec_minmax(table, write, buffer, length, ppos); 2726 if (rc) 2727 return rc; 2728 2729 if (write && sysctl_compaction_proactiveness) { 2730 for_each_online_node(nid) { 2731 pg_data_t *pgdat = NODE_DATA(nid); 2732 2733 if (pgdat->proactive_compact_trigger) 2734 continue; 2735 2736 pgdat->proactive_compact_trigger = true; 2737 wake_up_interruptible(&pgdat->kcompactd_wait); 2738 } 2739 } 2740 2741 return 0; 2742 } 2743 2744 /* 2745 * This is the entry point for compacting all nodes via 2746 * /proc/sys/vm/compact_memory 2747 */ 2748 int sysctl_compaction_handler(struct ctl_table *table, int write, 2749 void *buffer, size_t *length, loff_t *ppos) 2750 { 2751 if (write) 2752 compact_nodes(); 2753 2754 return 0; 2755 } 2756 2757 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 2758 static ssize_t compact_store(struct device *dev, 2759 struct device_attribute *attr, 2760 const char *buf, size_t count) 2761 { 2762 int nid = dev->id; 2763 2764 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 2765 /* Flush pending updates to the LRU lists */ 2766 lru_add_drain_all(); 2767 2768 compact_node(nid); 2769 } 2770 2771 return count; 2772 } 2773 static DEVICE_ATTR_WO(compact); 2774 2775 int compaction_register_node(struct node *node) 2776 { 2777 return device_create_file(&node->dev, &dev_attr_compact); 2778 } 2779 2780 void compaction_unregister_node(struct node *node) 2781 { 2782 return device_remove_file(&node->dev, &dev_attr_compact); 2783 } 2784 #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 2785 2786 static inline bool kcompactd_work_requested(pg_data_t *pgdat) 2787 { 2788 return pgdat->kcompactd_max_order > 0 || kthread_should_stop() || 2789 pgdat->proactive_compact_trigger; 2790 } 2791 2792 static bool kcompactd_node_suitable(pg_data_t *pgdat) 2793 { 2794 int zoneid; 2795 struct zone *zone; 2796 enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; 2797 2798 for (zoneid = 0; zoneid <= highest_zoneidx; zoneid++) { 2799 zone = &pgdat->node_zones[zoneid]; 2800 2801 if (!populated_zone(zone)) 2802 continue; 2803 2804 if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, 2805 highest_zoneidx) == COMPACT_CONTINUE) 2806 return true; 2807 } 2808 2809 return false; 2810 } 2811 2812 static void kcompactd_do_work(pg_data_t *pgdat) 2813 { 2814 /* 2815 * With no special task, compact all zones so that a page of requested 2816 * order is allocatable. 2817 */ 2818 int zoneid; 2819 struct zone *zone; 2820 struct compact_control cc = { 2821 .order = pgdat->kcompactd_max_order, 2822 .search_order = pgdat->kcompactd_max_order, 2823 .highest_zoneidx = pgdat->kcompactd_highest_zoneidx, 2824 .mode = MIGRATE_SYNC_LIGHT, 2825 .ignore_skip_hint = false, 2826 .gfp_mask = GFP_KERNEL, 2827 }; 2828 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, 2829 cc.highest_zoneidx); 2830 count_compact_event(KCOMPACTD_WAKE); 2831 2832 for (zoneid = 0; zoneid <= cc.highest_zoneidx; zoneid++) { 2833 int status; 2834 2835 zone = &pgdat->node_zones[zoneid]; 2836 if (!populated_zone(zone)) 2837 continue; 2838 2839 if (compaction_deferred(zone, cc.order)) 2840 continue; 2841 2842 if (compaction_suitable(zone, cc.order, 0, zoneid) != 2843 COMPACT_CONTINUE) 2844 continue; 2845 2846 if (kthread_should_stop()) 2847 return; 2848 2849 cc.zone = zone; 2850 status = compact_zone(&cc, NULL); 2851 2852 if (status == COMPACT_SUCCESS) { 2853 compaction_defer_reset(zone, cc.order, false); 2854 } else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) { 2855 /* 2856 * Buddy pages may become stranded on pcps that could 2857 * otherwise coalesce on the zone's free area for 2858 * order >= cc.order. This is ratelimited by the 2859 * upcoming deferral. 2860 */ 2861 drain_all_pages(zone); 2862 2863 /* 2864 * We use sync migration mode here, so we defer like 2865 * sync direct compaction does. 2866 */ 2867 defer_compaction(zone, cc.order); 2868 } 2869 2870 count_compact_events(KCOMPACTD_MIGRATE_SCANNED, 2871 cc.total_migrate_scanned); 2872 count_compact_events(KCOMPACTD_FREE_SCANNED, 2873 cc.total_free_scanned); 2874 2875 VM_BUG_ON(!list_empty(&cc.freepages)); 2876 VM_BUG_ON(!list_empty(&cc.migratepages)); 2877 } 2878 2879 /* 2880 * Regardless of success, we are done until woken up next. But remember 2881 * the requested order/highest_zoneidx in case it was higher/tighter 2882 * than our current ones 2883 */ 2884 if (pgdat->kcompactd_max_order <= cc.order) 2885 pgdat->kcompactd_max_order = 0; 2886 if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx) 2887 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 2888 } 2889 2890 void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) 2891 { 2892 if (!order) 2893 return; 2894 2895 if (pgdat->kcompactd_max_order < order) 2896 pgdat->kcompactd_max_order = order; 2897 2898 if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx) 2899 pgdat->kcompactd_highest_zoneidx = highest_zoneidx; 2900 2901 /* 2902 * Pairs with implicit barrier in wait_event_freezable() 2903 * such that wakeups are not missed. 2904 */ 2905 if (!wq_has_sleeper(&pgdat->kcompactd_wait)) 2906 return; 2907 2908 if (!kcompactd_node_suitable(pgdat)) 2909 return; 2910 2911 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, 2912 highest_zoneidx); 2913 wake_up_interruptible(&pgdat->kcompactd_wait); 2914 } 2915 2916 /* 2917 * The background compaction daemon, started as a kernel thread 2918 * from the init process. 2919 */ 2920 static int kcompactd(void *p) 2921 { 2922 pg_data_t *pgdat = (pg_data_t *)p; 2923 struct task_struct *tsk = current; 2924 long default_timeout = msecs_to_jiffies(HPAGE_FRAG_CHECK_INTERVAL_MSEC); 2925 long timeout = default_timeout; 2926 2927 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 2928 2929 if (!cpumask_empty(cpumask)) 2930 set_cpus_allowed_ptr(tsk, cpumask); 2931 2932 set_freezable(); 2933 2934 pgdat->kcompactd_max_order = 0; 2935 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; 2936 2937 while (!kthread_should_stop()) { 2938 unsigned long pflags; 2939 2940 /* 2941 * Avoid the unnecessary wakeup for proactive compaction 2942 * when it is disabled. 2943 */ 2944 if (!sysctl_compaction_proactiveness) 2945 timeout = MAX_SCHEDULE_TIMEOUT; 2946 trace_mm_compaction_kcompactd_sleep(pgdat->node_id); 2947 if (wait_event_freezable_timeout(pgdat->kcompactd_wait, 2948 kcompactd_work_requested(pgdat), timeout) && 2949 !pgdat->proactive_compact_trigger) { 2950 2951 psi_memstall_enter(&pflags); 2952 kcompactd_do_work(pgdat); 2953 psi_memstall_leave(&pflags); 2954 /* 2955 * Reset the timeout value. The defer timeout from 2956 * proactive compaction is lost here but that is fine 2957 * as the condition of the zone changing substantionally 2958 * then carrying on with the previous defer interval is 2959 * not useful. 2960 */ 2961 timeout = default_timeout; 2962 continue; 2963 } 2964 2965 /* 2966 * Start the proactive work with default timeout. Based 2967 * on the fragmentation score, this timeout is updated. 2968 */ 2969 timeout = default_timeout; 2970 if (should_proactive_compact_node(pgdat)) { 2971 unsigned int prev_score, score; 2972 2973 prev_score = fragmentation_score_node(pgdat); 2974 proactive_compact_node(pgdat); 2975 score = fragmentation_score_node(pgdat); 2976 /* 2977 * Defer proactive compaction if the fragmentation 2978 * score did not go down i.e. no progress made. 2979 */ 2980 if (unlikely(score >= prev_score)) 2981 timeout = 2982 default_timeout << COMPACT_MAX_DEFER_SHIFT; 2983 } 2984 if (unlikely(pgdat->proactive_compact_trigger)) 2985 pgdat->proactive_compact_trigger = false; 2986 } 2987 2988 return 0; 2989 } 2990 2991 /* 2992 * This kcompactd start function will be called by init and node-hot-add. 2993 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. 2994 */ 2995 void kcompactd_run(int nid) 2996 { 2997 pg_data_t *pgdat = NODE_DATA(nid); 2998 2999 if (pgdat->kcompactd) 3000 return; 3001 3002 pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid); 3003 if (IS_ERR(pgdat->kcompactd)) { 3004 pr_err("Failed to start kcompactd on node %d\n", nid); 3005 pgdat->kcompactd = NULL; 3006 } 3007 } 3008 3009 /* 3010 * Called by memory hotplug when all memory in a node is offlined. Caller must 3011 * be holding mem_hotplug_begin/done(). 3012 */ 3013 void kcompactd_stop(int nid) 3014 { 3015 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; 3016 3017 if (kcompactd) { 3018 kthread_stop(kcompactd); 3019 NODE_DATA(nid)->kcompactd = NULL; 3020 } 3021 } 3022 3023 /* 3024 * It's optimal to keep kcompactd on the same CPUs as their memory, but 3025 * not required for correctness. So if the last cpu in a node goes 3026 * away, we get changed to run anywhere: as the first one comes back, 3027 * restore their cpu bindings. 3028 */ 3029 static int kcompactd_cpu_online(unsigned int cpu) 3030 { 3031 int nid; 3032 3033 for_each_node_state(nid, N_MEMORY) { 3034 pg_data_t *pgdat = NODE_DATA(nid); 3035 const struct cpumask *mask; 3036 3037 mask = cpumask_of_node(pgdat->node_id); 3038 3039 if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) 3040 /* One of our CPUs online: restore mask */ 3041 if (pgdat->kcompactd) 3042 set_cpus_allowed_ptr(pgdat->kcompactd, mask); 3043 } 3044 return 0; 3045 } 3046 3047 static int __init kcompactd_init(void) 3048 { 3049 int nid; 3050 int ret; 3051 3052 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 3053 "mm/compaction:online", 3054 kcompactd_cpu_online, NULL); 3055 if (ret < 0) { 3056 pr_err("kcompactd: failed to register hotplug callbacks.\n"); 3057 return ret; 3058 } 3059 3060 for_each_node_state(nid, N_MEMORY) 3061 kcompactd_run(nid); 3062 return 0; 3063 } 3064 subsys_initcall(kcompactd_init) 3065 3066 #endif /* CONFIG_COMPACTION */ 3067