1 /* 2 * linux/mm/compaction.c 3 * 4 * Memory compaction for the reduction of external fragmentation. Note that 5 * this heavily depends upon page migration to do all the real heavy 6 * lifting 7 * 8 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie> 9 */ 10 #include <linux/swap.h> 11 #include <linux/migrate.h> 12 #include <linux/compaction.h> 13 #include <linux/mm_inline.h> 14 #include <linux/backing-dev.h> 15 #include <linux/sysctl.h> 16 #include <linux/sysfs.h> 17 #include <linux/balloon_compaction.h> 18 #include <linux/page-isolation.h> 19 #include "internal.h" 20 21 #ifdef CONFIG_COMPACTION 22 static inline void count_compact_event(enum vm_event_item item) 23 { 24 count_vm_event(item); 25 } 26 27 static inline void count_compact_events(enum vm_event_item item, long delta) 28 { 29 count_vm_events(item, delta); 30 } 31 #else 32 #define count_compact_event(item) do { } while (0) 33 #define count_compact_events(item, delta) do { } while (0) 34 #endif 35 36 #if defined CONFIG_COMPACTION || defined CONFIG_CMA 37 38 #define CREATE_TRACE_POINTS 39 #include <trace/events/compaction.h> 40 41 static unsigned long release_freepages(struct list_head *freelist) 42 { 43 struct page *page, *next; 44 unsigned long count = 0; 45 46 list_for_each_entry_safe(page, next, freelist, lru) { 47 list_del(&page->lru); 48 __free_page(page); 49 count++; 50 } 51 52 return count; 53 } 54 55 static void map_pages(struct list_head *list) 56 { 57 struct page *page; 58 59 list_for_each_entry(page, list, lru) { 60 arch_alloc_page(page, 0); 61 kernel_map_pages(page, 1, 1); 62 } 63 } 64 65 static inline bool migrate_async_suitable(int migratetype) 66 { 67 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; 68 } 69 70 /* 71 * Check that the whole (or subset of) a pageblock given by the interval of 72 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it 73 * with the migration of free compaction scanner. The scanners then need to 74 * use only pfn_valid_within() check for arches that allow holes within 75 * pageblocks. 76 * 77 * Return struct page pointer of start_pfn, or NULL if checks were not passed. 78 * 79 * It's possible on some configurations to have a setup like node0 node1 node0 80 * i.e. it's possible that all pages within a zones range of pages do not 81 * belong to a single zone. We assume that a border between node0 and node1 82 * can occur within a single pageblock, but not a node0 node1 node0 83 * interleaving within a single pageblock. It is therefore sufficient to check 84 * the first and last page of a pageblock and avoid checking each individual 85 * page in a pageblock. 86 */ 87 static struct page *pageblock_pfn_to_page(unsigned long start_pfn, 88 unsigned long end_pfn, struct zone *zone) 89 { 90 struct page *start_page; 91 struct page *end_page; 92 93 /* end_pfn is one past the range we are checking */ 94 end_pfn--; 95 96 if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) 97 return NULL; 98 99 start_page = pfn_to_page(start_pfn); 100 101 if (page_zone(start_page) != zone) 102 return NULL; 103 104 end_page = pfn_to_page(end_pfn); 105 106 /* This gives a shorter code than deriving page_zone(end_page) */ 107 if (page_zone_id(start_page) != page_zone_id(end_page)) 108 return NULL; 109 110 return start_page; 111 } 112 113 #ifdef CONFIG_COMPACTION 114 /* Returns true if the pageblock should be scanned for pages to isolate. */ 115 static inline bool isolation_suitable(struct compact_control *cc, 116 struct page *page) 117 { 118 if (cc->ignore_skip_hint) 119 return true; 120 121 return !get_pageblock_skip(page); 122 } 123 124 /* 125 * This function is called to clear all cached information on pageblocks that 126 * should be skipped for page isolation when the migrate and free page scanner 127 * meet. 128 */ 129 static void __reset_isolation_suitable(struct zone *zone) 130 { 131 unsigned long start_pfn = zone->zone_start_pfn; 132 unsigned long end_pfn = zone_end_pfn(zone); 133 unsigned long pfn; 134 135 zone->compact_cached_migrate_pfn[0] = start_pfn; 136 zone->compact_cached_migrate_pfn[1] = start_pfn; 137 zone->compact_cached_free_pfn = end_pfn; 138 zone->compact_blockskip_flush = false; 139 140 /* Walk the zone and mark every pageblock as suitable for isolation */ 141 for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { 142 struct page *page; 143 144 cond_resched(); 145 146 if (!pfn_valid(pfn)) 147 continue; 148 149 page = pfn_to_page(pfn); 150 if (zone != page_zone(page)) 151 continue; 152 153 clear_pageblock_skip(page); 154 } 155 } 156 157 void reset_isolation_suitable(pg_data_t *pgdat) 158 { 159 int zoneid; 160 161 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 162 struct zone *zone = &pgdat->node_zones[zoneid]; 163 if (!populated_zone(zone)) 164 continue; 165 166 /* Only flush if a full compaction finished recently */ 167 if (zone->compact_blockskip_flush) 168 __reset_isolation_suitable(zone); 169 } 170 } 171 172 /* 173 * If no pages were isolated then mark this pageblock to be skipped in the 174 * future. The information is later cleared by __reset_isolation_suitable(). 175 */ 176 static void update_pageblock_skip(struct compact_control *cc, 177 struct page *page, unsigned long nr_isolated, 178 bool migrate_scanner) 179 { 180 struct zone *zone = cc->zone; 181 unsigned long pfn; 182 183 if (cc->ignore_skip_hint) 184 return; 185 186 if (!page) 187 return; 188 189 if (nr_isolated) 190 return; 191 192 set_pageblock_skip(page); 193 194 pfn = page_to_pfn(page); 195 196 /* Update where async and sync compaction should restart */ 197 if (migrate_scanner) { 198 if (cc->finished_update_migrate) 199 return; 200 if (pfn > zone->compact_cached_migrate_pfn[0]) 201 zone->compact_cached_migrate_pfn[0] = pfn; 202 if (cc->mode != MIGRATE_ASYNC && 203 pfn > zone->compact_cached_migrate_pfn[1]) 204 zone->compact_cached_migrate_pfn[1] = pfn; 205 } else { 206 if (cc->finished_update_free) 207 return; 208 if (pfn < zone->compact_cached_free_pfn) 209 zone->compact_cached_free_pfn = pfn; 210 } 211 } 212 #else 213 static inline bool isolation_suitable(struct compact_control *cc, 214 struct page *page) 215 { 216 return true; 217 } 218 219 static void update_pageblock_skip(struct compact_control *cc, 220 struct page *page, unsigned long nr_isolated, 221 bool migrate_scanner) 222 { 223 } 224 #endif /* CONFIG_COMPACTION */ 225 226 /* 227 * Compaction requires the taking of some coarse locks that are potentially 228 * very heavily contended. For async compaction, back out if the lock cannot 229 * be taken immediately. For sync compaction, spin on the lock if needed. 230 * 231 * Returns true if the lock is held 232 * Returns false if the lock is not held and compaction should abort 233 */ 234 static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags, 235 struct compact_control *cc) 236 { 237 if (cc->mode == MIGRATE_ASYNC) { 238 if (!spin_trylock_irqsave(lock, *flags)) { 239 cc->contended = COMPACT_CONTENDED_LOCK; 240 return false; 241 } 242 } else { 243 spin_lock_irqsave(lock, *flags); 244 } 245 246 return true; 247 } 248 249 /* 250 * Compaction requires the taking of some coarse locks that are potentially 251 * very heavily contended. The lock should be periodically unlocked to avoid 252 * having disabled IRQs for a long time, even when there is nobody waiting on 253 * the lock. It might also be that allowing the IRQs will result in 254 * need_resched() becoming true. If scheduling is needed, async compaction 255 * aborts. Sync compaction schedules. 256 * Either compaction type will also abort if a fatal signal is pending. 257 * In either case if the lock was locked, it is dropped and not regained. 258 * 259 * Returns true if compaction should abort due to fatal signal pending, or 260 * async compaction due to need_resched() 261 * Returns false when compaction can continue (sync compaction might have 262 * scheduled) 263 */ 264 static bool compact_unlock_should_abort(spinlock_t *lock, 265 unsigned long flags, bool *locked, struct compact_control *cc) 266 { 267 if (*locked) { 268 spin_unlock_irqrestore(lock, flags); 269 *locked = false; 270 } 271 272 if (fatal_signal_pending(current)) { 273 cc->contended = COMPACT_CONTENDED_SCHED; 274 return true; 275 } 276 277 if (need_resched()) { 278 if (cc->mode == MIGRATE_ASYNC) { 279 cc->contended = COMPACT_CONTENDED_SCHED; 280 return true; 281 } 282 cond_resched(); 283 } 284 285 return false; 286 } 287 288 /* 289 * Aside from avoiding lock contention, compaction also periodically checks 290 * need_resched() and either schedules in sync compaction or aborts async 291 * compaction. This is similar to what compact_unlock_should_abort() does, but 292 * is used where no lock is concerned. 293 * 294 * Returns false when no scheduling was needed, or sync compaction scheduled. 295 * Returns true when async compaction should abort. 296 */ 297 static inline bool compact_should_abort(struct compact_control *cc) 298 { 299 /* async compaction aborts if contended */ 300 if (need_resched()) { 301 if (cc->mode == MIGRATE_ASYNC) { 302 cc->contended = COMPACT_CONTENDED_SCHED; 303 return true; 304 } 305 306 cond_resched(); 307 } 308 309 return false; 310 } 311 312 /* Returns true if the page is within a block suitable for migration to */ 313 static bool suitable_migration_target(struct page *page) 314 { 315 /* If the page is a large free page, then disallow migration */ 316 if (PageBuddy(page)) { 317 /* 318 * We are checking page_order without zone->lock taken. But 319 * the only small danger is that we skip a potentially suitable 320 * pageblock, so it's not worth to check order for valid range. 321 */ 322 if (page_order_unsafe(page) >= pageblock_order) 323 return false; 324 } 325 326 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 327 if (migrate_async_suitable(get_pageblock_migratetype(page))) 328 return true; 329 330 /* Otherwise skip the block */ 331 return false; 332 } 333 334 /* 335 * Isolate free pages onto a private freelist. If @strict is true, will abort 336 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock 337 * (even though it may still end up isolating some pages). 338 */ 339 static unsigned long isolate_freepages_block(struct compact_control *cc, 340 unsigned long *start_pfn, 341 unsigned long end_pfn, 342 struct list_head *freelist, 343 bool strict) 344 { 345 int nr_scanned = 0, total_isolated = 0; 346 struct page *cursor, *valid_page = NULL; 347 unsigned long flags = 0; 348 bool locked = false; 349 unsigned long blockpfn = *start_pfn; 350 351 cursor = pfn_to_page(blockpfn); 352 353 /* Isolate free pages. */ 354 for (; blockpfn < end_pfn; blockpfn++, cursor++) { 355 int isolated, i; 356 struct page *page = cursor; 357 358 /* 359 * Periodically drop the lock (if held) regardless of its 360 * contention, to give chance to IRQs. Abort if fatal signal 361 * pending or async compaction detects need_resched() 362 */ 363 if (!(blockpfn % SWAP_CLUSTER_MAX) 364 && compact_unlock_should_abort(&cc->zone->lock, flags, 365 &locked, cc)) 366 break; 367 368 nr_scanned++; 369 if (!pfn_valid_within(blockpfn)) 370 goto isolate_fail; 371 372 if (!valid_page) 373 valid_page = page; 374 if (!PageBuddy(page)) 375 goto isolate_fail; 376 377 /* 378 * If we already hold the lock, we can skip some rechecking. 379 * Note that if we hold the lock now, checked_pageblock was 380 * already set in some previous iteration (or strict is true), 381 * so it is correct to skip the suitable migration target 382 * recheck as well. 383 */ 384 if (!locked) { 385 /* 386 * The zone lock must be held to isolate freepages. 387 * Unfortunately this is a very coarse lock and can be 388 * heavily contended if there are parallel allocations 389 * or parallel compactions. For async compaction do not 390 * spin on the lock and we acquire the lock as late as 391 * possible. 392 */ 393 locked = compact_trylock_irqsave(&cc->zone->lock, 394 &flags, cc); 395 if (!locked) 396 break; 397 398 /* Recheck this is a buddy page under lock */ 399 if (!PageBuddy(page)) 400 goto isolate_fail; 401 } 402 403 /* Found a free page, break it into order-0 pages */ 404 isolated = split_free_page(page); 405 total_isolated += isolated; 406 for (i = 0; i < isolated; i++) { 407 list_add(&page->lru, freelist); 408 page++; 409 } 410 411 /* If a page was split, advance to the end of it */ 412 if (isolated) { 413 blockpfn += isolated - 1; 414 cursor += isolated - 1; 415 continue; 416 } 417 418 isolate_fail: 419 if (strict) 420 break; 421 else 422 continue; 423 424 } 425 426 /* Record how far we have got within the block */ 427 *start_pfn = blockpfn; 428 429 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 430 431 /* 432 * If strict isolation is requested by CMA then check that all the 433 * pages requested were isolated. If there were any failures, 0 is 434 * returned and CMA will fail. 435 */ 436 if (strict && blockpfn < end_pfn) 437 total_isolated = 0; 438 439 if (locked) 440 spin_unlock_irqrestore(&cc->zone->lock, flags); 441 442 /* Update the pageblock-skip if the whole pageblock was scanned */ 443 if (blockpfn == end_pfn) 444 update_pageblock_skip(cc, valid_page, total_isolated, false); 445 446 count_compact_events(COMPACTFREE_SCANNED, nr_scanned); 447 if (total_isolated) 448 count_compact_events(COMPACTISOLATED, total_isolated); 449 return total_isolated; 450 } 451 452 /** 453 * isolate_freepages_range() - isolate free pages. 454 * @start_pfn: The first PFN to start isolating. 455 * @end_pfn: The one-past-last PFN. 456 * 457 * Non-free pages, invalid PFNs, or zone boundaries within the 458 * [start_pfn, end_pfn) range are considered errors, cause function to 459 * undo its actions and return zero. 460 * 461 * Otherwise, function returns one-past-the-last PFN of isolated page 462 * (which may be greater then end_pfn if end fell in a middle of 463 * a free page). 464 */ 465 unsigned long 466 isolate_freepages_range(struct compact_control *cc, 467 unsigned long start_pfn, unsigned long end_pfn) 468 { 469 unsigned long isolated, pfn, block_end_pfn; 470 LIST_HEAD(freelist); 471 472 pfn = start_pfn; 473 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 474 475 for (; pfn < end_pfn; pfn += isolated, 476 block_end_pfn += pageblock_nr_pages) { 477 /* Protect pfn from changing by isolate_freepages_block */ 478 unsigned long isolate_start_pfn = pfn; 479 480 block_end_pfn = min(block_end_pfn, end_pfn); 481 482 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 483 break; 484 485 isolated = isolate_freepages_block(cc, &isolate_start_pfn, 486 block_end_pfn, &freelist, true); 487 488 /* 489 * In strict mode, isolate_freepages_block() returns 0 if 490 * there are any holes in the block (ie. invalid PFNs or 491 * non-free pages). 492 */ 493 if (!isolated) 494 break; 495 496 /* 497 * If we managed to isolate pages, it is always (1 << n) * 498 * pageblock_nr_pages for some non-negative n. (Max order 499 * page may span two pageblocks). 500 */ 501 } 502 503 /* split_free_page does not map the pages */ 504 map_pages(&freelist); 505 506 if (pfn < end_pfn) { 507 /* Loop terminated early, cleanup. */ 508 release_freepages(&freelist); 509 return 0; 510 } 511 512 /* We don't use freelists for anything. */ 513 return pfn; 514 } 515 516 /* Update the number of anon and file isolated pages in the zone */ 517 static void acct_isolated(struct zone *zone, struct compact_control *cc) 518 { 519 struct page *page; 520 unsigned int count[2] = { 0, }; 521 522 if (list_empty(&cc->migratepages)) 523 return; 524 525 list_for_each_entry(page, &cc->migratepages, lru) 526 count[!!page_is_file_cache(page)]++; 527 528 mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); 529 mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); 530 } 531 532 /* Similar to reclaim, but different enough that they don't share logic */ 533 static bool too_many_isolated(struct zone *zone) 534 { 535 unsigned long active, inactive, isolated; 536 537 inactive = zone_page_state(zone, NR_INACTIVE_FILE) + 538 zone_page_state(zone, NR_INACTIVE_ANON); 539 active = zone_page_state(zone, NR_ACTIVE_FILE) + 540 zone_page_state(zone, NR_ACTIVE_ANON); 541 isolated = zone_page_state(zone, NR_ISOLATED_FILE) + 542 zone_page_state(zone, NR_ISOLATED_ANON); 543 544 return isolated > (inactive + active) / 2; 545 } 546 547 /** 548 * isolate_migratepages_block() - isolate all migrate-able pages within 549 * a single pageblock 550 * @cc: Compaction control structure. 551 * @low_pfn: The first PFN to isolate 552 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock 553 * @isolate_mode: Isolation mode to be used. 554 * 555 * Isolate all pages that can be migrated from the range specified by 556 * [low_pfn, end_pfn). The range is expected to be within same pageblock. 557 * Returns zero if there is a fatal signal pending, otherwise PFN of the 558 * first page that was not scanned (which may be both less, equal to or more 559 * than end_pfn). 560 * 561 * The pages are isolated on cc->migratepages list (not required to be empty), 562 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field 563 * is neither read nor updated. 564 */ 565 static unsigned long 566 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, 567 unsigned long end_pfn, isolate_mode_t isolate_mode) 568 { 569 struct zone *zone = cc->zone; 570 unsigned long nr_scanned = 0, nr_isolated = 0; 571 struct list_head *migratelist = &cc->migratepages; 572 struct lruvec *lruvec; 573 unsigned long flags = 0; 574 bool locked = false; 575 struct page *page = NULL, *valid_page = NULL; 576 577 /* 578 * Ensure that there are not too many pages isolated from the LRU 579 * list by either parallel reclaimers or compaction. If there are, 580 * delay for some time until fewer pages are isolated 581 */ 582 while (unlikely(too_many_isolated(zone))) { 583 /* async migration should just abort */ 584 if (cc->mode == MIGRATE_ASYNC) 585 return 0; 586 587 congestion_wait(BLK_RW_ASYNC, HZ/10); 588 589 if (fatal_signal_pending(current)) 590 return 0; 591 } 592 593 if (compact_should_abort(cc)) 594 return 0; 595 596 /* Time to isolate some pages for migration */ 597 for (; low_pfn < end_pfn; low_pfn++) { 598 /* 599 * Periodically drop the lock (if held) regardless of its 600 * contention, to give chance to IRQs. Abort async compaction 601 * if contended. 602 */ 603 if (!(low_pfn % SWAP_CLUSTER_MAX) 604 && compact_unlock_should_abort(&zone->lru_lock, flags, 605 &locked, cc)) 606 break; 607 608 if (!pfn_valid_within(low_pfn)) 609 continue; 610 nr_scanned++; 611 612 page = pfn_to_page(low_pfn); 613 614 if (!valid_page) 615 valid_page = page; 616 617 /* 618 * Skip if free. We read page order here without zone lock 619 * which is generally unsafe, but the race window is small and 620 * the worst thing that can happen is that we skip some 621 * potential isolation targets. 622 */ 623 if (PageBuddy(page)) { 624 unsigned long freepage_order = page_order_unsafe(page); 625 626 /* 627 * Without lock, we cannot be sure that what we got is 628 * a valid page order. Consider only values in the 629 * valid order range to prevent low_pfn overflow. 630 */ 631 if (freepage_order > 0 && freepage_order < MAX_ORDER) 632 low_pfn += (1UL << freepage_order) - 1; 633 continue; 634 } 635 636 /* 637 * Check may be lockless but that's ok as we recheck later. 638 * It's possible to migrate LRU pages and balloon pages 639 * Skip any other type of page 640 */ 641 if (!PageLRU(page)) { 642 if (unlikely(balloon_page_movable(page))) { 643 if (balloon_page_isolate(page)) { 644 /* Successfully isolated */ 645 goto isolate_success; 646 } 647 } 648 continue; 649 } 650 651 /* 652 * PageLRU is set. lru_lock normally excludes isolation 653 * splitting and collapsing (collapsing has already happened 654 * if PageLRU is set) but the lock is not necessarily taken 655 * here and it is wasteful to take it just to check transhuge. 656 * Check TransHuge without lock and skip the whole pageblock if 657 * it's either a transhuge or hugetlbfs page, as calling 658 * compound_order() without preventing THP from splitting the 659 * page underneath us may return surprising results. 660 */ 661 if (PageTransHuge(page)) { 662 if (!locked) 663 low_pfn = ALIGN(low_pfn + 1, 664 pageblock_nr_pages) - 1; 665 else 666 low_pfn += (1 << compound_order(page)) - 1; 667 668 continue; 669 } 670 671 /* 672 * Migration will fail if an anonymous page is pinned in memory, 673 * so avoid taking lru_lock and isolating it unnecessarily in an 674 * admittedly racy check. 675 */ 676 if (!page_mapping(page) && 677 page_count(page) > page_mapcount(page)) 678 continue; 679 680 /* If we already hold the lock, we can skip some rechecking */ 681 if (!locked) { 682 locked = compact_trylock_irqsave(&zone->lru_lock, 683 &flags, cc); 684 if (!locked) 685 break; 686 687 /* Recheck PageLRU and PageTransHuge under lock */ 688 if (!PageLRU(page)) 689 continue; 690 if (PageTransHuge(page)) { 691 low_pfn += (1 << compound_order(page)) - 1; 692 continue; 693 } 694 } 695 696 lruvec = mem_cgroup_page_lruvec(page, zone); 697 698 /* Try isolate the page */ 699 if (__isolate_lru_page(page, isolate_mode) != 0) 700 continue; 701 702 VM_BUG_ON_PAGE(PageTransCompound(page), page); 703 704 /* Successfully isolated */ 705 del_page_from_lru_list(page, lruvec, page_lru(page)); 706 707 isolate_success: 708 cc->finished_update_migrate = true; 709 list_add(&page->lru, migratelist); 710 cc->nr_migratepages++; 711 nr_isolated++; 712 713 /* Avoid isolating too much */ 714 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) { 715 ++low_pfn; 716 break; 717 } 718 } 719 720 /* 721 * The PageBuddy() check could have potentially brought us outside 722 * the range to be scanned. 723 */ 724 if (unlikely(low_pfn > end_pfn)) 725 low_pfn = end_pfn; 726 727 if (locked) 728 spin_unlock_irqrestore(&zone->lru_lock, flags); 729 730 /* 731 * Update the pageblock-skip information and cached scanner pfn, 732 * if the whole pageblock was scanned without isolating any page. 733 */ 734 if (low_pfn == end_pfn) 735 update_pageblock_skip(cc, valid_page, nr_isolated, true); 736 737 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 738 739 count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned); 740 if (nr_isolated) 741 count_compact_events(COMPACTISOLATED, nr_isolated); 742 743 return low_pfn; 744 } 745 746 /** 747 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range 748 * @cc: Compaction control structure. 749 * @start_pfn: The first PFN to start isolating. 750 * @end_pfn: The one-past-last PFN. 751 * 752 * Returns zero if isolation fails fatally due to e.g. pending signal. 753 * Otherwise, function returns one-past-the-last PFN of isolated page 754 * (which may be greater than end_pfn if end fell in a middle of a THP page). 755 */ 756 unsigned long 757 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, 758 unsigned long end_pfn) 759 { 760 unsigned long pfn, block_end_pfn; 761 762 /* Scan block by block. First and last block may be incomplete */ 763 pfn = start_pfn; 764 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 765 766 for (; pfn < end_pfn; pfn = block_end_pfn, 767 block_end_pfn += pageblock_nr_pages) { 768 769 block_end_pfn = min(block_end_pfn, end_pfn); 770 771 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 772 continue; 773 774 pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, 775 ISOLATE_UNEVICTABLE); 776 777 /* 778 * In case of fatal failure, release everything that might 779 * have been isolated in the previous iteration, and signal 780 * the failure back to caller. 781 */ 782 if (!pfn) { 783 putback_movable_pages(&cc->migratepages); 784 cc->nr_migratepages = 0; 785 break; 786 } 787 788 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) 789 break; 790 } 791 acct_isolated(cc->zone, cc); 792 793 return pfn; 794 } 795 796 #endif /* CONFIG_COMPACTION || CONFIG_CMA */ 797 #ifdef CONFIG_COMPACTION 798 /* 799 * Based on information in the current compact_control, find blocks 800 * suitable for isolating free pages from and then isolate them. 801 */ 802 static void isolate_freepages(struct compact_control *cc) 803 { 804 struct zone *zone = cc->zone; 805 struct page *page; 806 unsigned long block_start_pfn; /* start of current pageblock */ 807 unsigned long isolate_start_pfn; /* exact pfn we start at */ 808 unsigned long block_end_pfn; /* end of current pageblock */ 809 unsigned long low_pfn; /* lowest pfn scanner is able to scan */ 810 int nr_freepages = cc->nr_freepages; 811 struct list_head *freelist = &cc->freepages; 812 813 /* 814 * Initialise the free scanner. The starting point is where we last 815 * successfully isolated from, zone-cached value, or the end of the 816 * zone when isolating for the first time. For looping we also need 817 * this pfn aligned down to the pageblock boundary, because we do 818 * block_start_pfn -= pageblock_nr_pages in the for loop. 819 * For ending point, take care when isolating in last pageblock of a 820 * a zone which ends in the middle of a pageblock. 821 * The low boundary is the end of the pageblock the migration scanner 822 * is using. 823 */ 824 isolate_start_pfn = cc->free_pfn; 825 block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1); 826 block_end_pfn = min(block_start_pfn + pageblock_nr_pages, 827 zone_end_pfn(zone)); 828 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); 829 830 /* 831 * Isolate free pages until enough are available to migrate the 832 * pages on cc->migratepages. We stop searching if the migrate 833 * and free page scanners meet or enough free pages are isolated. 834 */ 835 for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages; 836 block_end_pfn = block_start_pfn, 837 block_start_pfn -= pageblock_nr_pages, 838 isolate_start_pfn = block_start_pfn) { 839 unsigned long isolated; 840 841 /* 842 * This can iterate a massively long zone without finding any 843 * suitable migration targets, so periodically check if we need 844 * to schedule, or even abort async compaction. 845 */ 846 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 847 && compact_should_abort(cc)) 848 break; 849 850 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 851 zone); 852 if (!page) 853 continue; 854 855 /* Check the block is suitable for migration */ 856 if (!suitable_migration_target(page)) 857 continue; 858 859 /* If isolation recently failed, do not retry */ 860 if (!isolation_suitable(cc, page)) 861 continue; 862 863 /* Found a block suitable for isolating free pages from. */ 864 isolated = isolate_freepages_block(cc, &isolate_start_pfn, 865 block_end_pfn, freelist, false); 866 nr_freepages += isolated; 867 868 /* 869 * Remember where the free scanner should restart next time, 870 * which is where isolate_freepages_block() left off. 871 * But if it scanned the whole pageblock, isolate_start_pfn 872 * now points at block_end_pfn, which is the start of the next 873 * pageblock. 874 * In that case we will however want to restart at the start 875 * of the previous pageblock. 876 */ 877 cc->free_pfn = (isolate_start_pfn < block_end_pfn) ? 878 isolate_start_pfn : 879 block_start_pfn - pageblock_nr_pages; 880 881 /* 882 * Set a flag that we successfully isolated in this pageblock. 883 * In the next loop iteration, zone->compact_cached_free_pfn 884 * will not be updated and thus it will effectively contain the 885 * highest pageblock we isolated pages from. 886 */ 887 if (isolated) 888 cc->finished_update_free = true; 889 890 /* 891 * isolate_freepages_block() might have aborted due to async 892 * compaction being contended 893 */ 894 if (cc->contended) 895 break; 896 } 897 898 /* split_free_page does not map the pages */ 899 map_pages(freelist); 900 901 /* 902 * If we crossed the migrate scanner, we want to keep it that way 903 * so that compact_finished() may detect this 904 */ 905 if (block_start_pfn < low_pfn) 906 cc->free_pfn = cc->migrate_pfn; 907 908 cc->nr_freepages = nr_freepages; 909 } 910 911 /* 912 * This is a migrate-callback that "allocates" freepages by taking pages 913 * from the isolated freelists in the block we are migrating to. 914 */ 915 static struct page *compaction_alloc(struct page *migratepage, 916 unsigned long data, 917 int **result) 918 { 919 struct compact_control *cc = (struct compact_control *)data; 920 struct page *freepage; 921 922 /* 923 * Isolate free pages if necessary, and if we are not aborting due to 924 * contention. 925 */ 926 if (list_empty(&cc->freepages)) { 927 if (!cc->contended) 928 isolate_freepages(cc); 929 930 if (list_empty(&cc->freepages)) 931 return NULL; 932 } 933 934 freepage = list_entry(cc->freepages.next, struct page, lru); 935 list_del(&freepage->lru); 936 cc->nr_freepages--; 937 938 return freepage; 939 } 940 941 /* 942 * This is a migrate-callback that "frees" freepages back to the isolated 943 * freelist. All pages on the freelist are from the same zone, so there is no 944 * special handling needed for NUMA. 945 */ 946 static void compaction_free(struct page *page, unsigned long data) 947 { 948 struct compact_control *cc = (struct compact_control *)data; 949 950 list_add(&page->lru, &cc->freepages); 951 cc->nr_freepages++; 952 } 953 954 /* possible outcome of isolate_migratepages */ 955 typedef enum { 956 ISOLATE_ABORT, /* Abort compaction now */ 957 ISOLATE_NONE, /* No pages isolated, continue scanning */ 958 ISOLATE_SUCCESS, /* Pages isolated, migrate */ 959 } isolate_migrate_t; 960 961 /* 962 * Isolate all pages that can be migrated from the first suitable block, 963 * starting at the block pointed to by the migrate scanner pfn within 964 * compact_control. 965 */ 966 static isolate_migrate_t isolate_migratepages(struct zone *zone, 967 struct compact_control *cc) 968 { 969 unsigned long low_pfn, end_pfn; 970 struct page *page; 971 const isolate_mode_t isolate_mode = 972 (cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0); 973 974 /* 975 * Start at where we last stopped, or beginning of the zone as 976 * initialized by compact_zone() 977 */ 978 low_pfn = cc->migrate_pfn; 979 980 /* Only scan within a pageblock boundary */ 981 end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages); 982 983 /* 984 * Iterate over whole pageblocks until we find the first suitable. 985 * Do not cross the free scanner. 986 */ 987 for (; end_pfn <= cc->free_pfn; 988 low_pfn = end_pfn, end_pfn += pageblock_nr_pages) { 989 990 /* 991 * This can potentially iterate a massively long zone with 992 * many pageblocks unsuitable, so periodically check if we 993 * need to schedule, or even abort async compaction. 994 */ 995 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) 996 && compact_should_abort(cc)) 997 break; 998 999 page = pageblock_pfn_to_page(low_pfn, end_pfn, zone); 1000 if (!page) 1001 continue; 1002 1003 /* If isolation recently failed, do not retry */ 1004 if (!isolation_suitable(cc, page)) 1005 continue; 1006 1007 /* 1008 * For async compaction, also only scan in MOVABLE blocks. 1009 * Async compaction is optimistic to see if the minimum amount 1010 * of work satisfies the allocation. 1011 */ 1012 if (cc->mode == MIGRATE_ASYNC && 1013 !migrate_async_suitable(get_pageblock_migratetype(page))) 1014 continue; 1015 1016 /* Perform the isolation */ 1017 low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn, 1018 isolate_mode); 1019 1020 if (!low_pfn || cc->contended) 1021 return ISOLATE_ABORT; 1022 1023 /* 1024 * Either we isolated something and proceed with migration. Or 1025 * we failed and compact_zone should decide if we should 1026 * continue or not. 1027 */ 1028 break; 1029 } 1030 1031 acct_isolated(zone, cc); 1032 /* Record where migration scanner will be restarted */ 1033 cc->migrate_pfn = low_pfn; 1034 1035 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1036 } 1037 1038 static int compact_finished(struct zone *zone, struct compact_control *cc, 1039 const int migratetype) 1040 { 1041 unsigned int order; 1042 unsigned long watermark; 1043 1044 if (cc->contended || fatal_signal_pending(current)) 1045 return COMPACT_PARTIAL; 1046 1047 /* Compaction run completes if the migrate and free scanner meet */ 1048 if (cc->free_pfn <= cc->migrate_pfn) { 1049 /* Let the next compaction start anew. */ 1050 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 1051 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 1052 zone->compact_cached_free_pfn = zone_end_pfn(zone); 1053 1054 /* 1055 * Mark that the PG_migrate_skip information should be cleared 1056 * by kswapd when it goes to sleep. kswapd does not set the 1057 * flag itself as the decision to be clear should be directly 1058 * based on an allocation request. 1059 */ 1060 if (!current_is_kswapd()) 1061 zone->compact_blockskip_flush = true; 1062 1063 return COMPACT_COMPLETE; 1064 } 1065 1066 /* 1067 * order == -1 is expected when compacting via 1068 * /proc/sys/vm/compact_memory 1069 */ 1070 if (cc->order == -1) 1071 return COMPACT_CONTINUE; 1072 1073 /* Compaction run is not finished if the watermark is not met */ 1074 watermark = low_wmark_pages(zone); 1075 watermark += (1 << cc->order); 1076 1077 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 1078 return COMPACT_CONTINUE; 1079 1080 /* Direct compactor: Is a suitable page free? */ 1081 for (order = cc->order; order < MAX_ORDER; order++) { 1082 struct free_area *area = &zone->free_area[order]; 1083 1084 /* Job done if page is free of the right migratetype */ 1085 if (!list_empty(&area->free_list[migratetype])) 1086 return COMPACT_PARTIAL; 1087 1088 /* Job done if allocation would set block type */ 1089 if (cc->order >= pageblock_order && area->nr_free) 1090 return COMPACT_PARTIAL; 1091 } 1092 1093 return COMPACT_CONTINUE; 1094 } 1095 1096 /* 1097 * compaction_suitable: Is this suitable to run compaction on this zone now? 1098 * Returns 1099 * COMPACT_SKIPPED - If there are too few free pages for compaction 1100 * COMPACT_PARTIAL - If the allocation would succeed without compaction 1101 * COMPACT_CONTINUE - If compaction should run now 1102 */ 1103 unsigned long compaction_suitable(struct zone *zone, int order) 1104 { 1105 int fragindex; 1106 unsigned long watermark; 1107 1108 /* 1109 * order == -1 is expected when compacting via 1110 * /proc/sys/vm/compact_memory 1111 */ 1112 if (order == -1) 1113 return COMPACT_CONTINUE; 1114 1115 /* 1116 * Watermarks for order-0 must be met for compaction. Note the 2UL. 1117 * This is because during migration, copies of pages need to be 1118 * allocated and for a short time, the footprint is higher 1119 */ 1120 watermark = low_wmark_pages(zone) + (2UL << order); 1121 if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) 1122 return COMPACT_SKIPPED; 1123 1124 /* 1125 * fragmentation index determines if allocation failures are due to 1126 * low memory or external fragmentation 1127 * 1128 * index of -1000 implies allocations might succeed depending on 1129 * watermarks 1130 * index towards 0 implies failure is due to lack of memory 1131 * index towards 1000 implies failure is due to fragmentation 1132 * 1133 * Only compact if a failure would be due to fragmentation. 1134 */ 1135 fragindex = fragmentation_index(zone, order); 1136 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 1137 return COMPACT_SKIPPED; 1138 1139 if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, 1140 0, 0)) 1141 return COMPACT_PARTIAL; 1142 1143 return COMPACT_CONTINUE; 1144 } 1145 1146 static int compact_zone(struct zone *zone, struct compact_control *cc) 1147 { 1148 int ret; 1149 unsigned long start_pfn = zone->zone_start_pfn; 1150 unsigned long end_pfn = zone_end_pfn(zone); 1151 const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); 1152 const bool sync = cc->mode != MIGRATE_ASYNC; 1153 1154 ret = compaction_suitable(zone, cc->order); 1155 switch (ret) { 1156 case COMPACT_PARTIAL: 1157 case COMPACT_SKIPPED: 1158 /* Compaction is likely to fail */ 1159 return ret; 1160 case COMPACT_CONTINUE: 1161 /* Fall through to compaction */ 1162 ; 1163 } 1164 1165 /* 1166 * Clear pageblock skip if there were failures recently and compaction 1167 * is about to be retried after being deferred. kswapd does not do 1168 * this reset as it'll reset the cached information when going to sleep. 1169 */ 1170 if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) 1171 __reset_isolation_suitable(zone); 1172 1173 /* 1174 * Setup to move all movable pages to the end of the zone. Used cached 1175 * information on where the scanners should start but check that it 1176 * is initialised by ensuring the values are within zone boundaries. 1177 */ 1178 cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; 1179 cc->free_pfn = zone->compact_cached_free_pfn; 1180 if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { 1181 cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); 1182 zone->compact_cached_free_pfn = cc->free_pfn; 1183 } 1184 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { 1185 cc->migrate_pfn = start_pfn; 1186 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; 1187 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; 1188 } 1189 1190 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn); 1191 1192 migrate_prep_local(); 1193 1194 while ((ret = compact_finished(zone, cc, migratetype)) == 1195 COMPACT_CONTINUE) { 1196 int err; 1197 1198 switch (isolate_migratepages(zone, cc)) { 1199 case ISOLATE_ABORT: 1200 ret = COMPACT_PARTIAL; 1201 putback_movable_pages(&cc->migratepages); 1202 cc->nr_migratepages = 0; 1203 goto out; 1204 case ISOLATE_NONE: 1205 continue; 1206 case ISOLATE_SUCCESS: 1207 ; 1208 } 1209 1210 err = migrate_pages(&cc->migratepages, compaction_alloc, 1211 compaction_free, (unsigned long)cc, cc->mode, 1212 MR_COMPACTION); 1213 1214 trace_mm_compaction_migratepages(cc->nr_migratepages, err, 1215 &cc->migratepages); 1216 1217 /* All pages were either migrated or will be released */ 1218 cc->nr_migratepages = 0; 1219 if (err) { 1220 putback_movable_pages(&cc->migratepages); 1221 /* 1222 * migrate_pages() may return -ENOMEM when scanners meet 1223 * and we want compact_finished() to detect it 1224 */ 1225 if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) { 1226 ret = COMPACT_PARTIAL; 1227 goto out; 1228 } 1229 } 1230 } 1231 1232 out: 1233 /* Release free pages and check accounting */ 1234 cc->nr_freepages -= release_freepages(&cc->freepages); 1235 VM_BUG_ON(cc->nr_freepages != 0); 1236 1237 trace_mm_compaction_end(ret); 1238 1239 return ret; 1240 } 1241 1242 static unsigned long compact_zone_order(struct zone *zone, int order, 1243 gfp_t gfp_mask, enum migrate_mode mode, int *contended) 1244 { 1245 unsigned long ret; 1246 struct compact_control cc = { 1247 .nr_freepages = 0, 1248 .nr_migratepages = 0, 1249 .order = order, 1250 .gfp_mask = gfp_mask, 1251 .zone = zone, 1252 .mode = mode, 1253 }; 1254 INIT_LIST_HEAD(&cc.freepages); 1255 INIT_LIST_HEAD(&cc.migratepages); 1256 1257 ret = compact_zone(zone, &cc); 1258 1259 VM_BUG_ON(!list_empty(&cc.freepages)); 1260 VM_BUG_ON(!list_empty(&cc.migratepages)); 1261 1262 *contended = cc.contended; 1263 return ret; 1264 } 1265 1266 int sysctl_extfrag_threshold = 500; 1267 1268 /** 1269 * try_to_compact_pages - Direct compact to satisfy a high-order allocation 1270 * @zonelist: The zonelist used for the current allocation 1271 * @order: The order of the current allocation 1272 * @gfp_mask: The GFP mask of the current allocation 1273 * @nodemask: The allowed nodes to allocate from 1274 * @mode: The migration mode for async, sync light, or sync migration 1275 * @contended: Return value that determines if compaction was aborted due to 1276 * need_resched() or lock contention 1277 * @candidate_zone: Return the zone where we think allocation should succeed 1278 * 1279 * This is the main entry point for direct page compaction. 1280 */ 1281 unsigned long try_to_compact_pages(struct zonelist *zonelist, 1282 int order, gfp_t gfp_mask, nodemask_t *nodemask, 1283 enum migrate_mode mode, int *contended, 1284 struct zone **candidate_zone) 1285 { 1286 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 1287 int may_enter_fs = gfp_mask & __GFP_FS; 1288 int may_perform_io = gfp_mask & __GFP_IO; 1289 struct zoneref *z; 1290 struct zone *zone; 1291 int rc = COMPACT_DEFERRED; 1292 int alloc_flags = 0; 1293 int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */ 1294 1295 *contended = COMPACT_CONTENDED_NONE; 1296 1297 /* Check if the GFP flags allow compaction */ 1298 if (!order || !may_enter_fs || !may_perform_io) 1299 return COMPACT_SKIPPED; 1300 1301 #ifdef CONFIG_CMA 1302 if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) 1303 alloc_flags |= ALLOC_CMA; 1304 #endif 1305 /* Compact each zone in the list */ 1306 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 1307 nodemask) { 1308 int status; 1309 int zone_contended; 1310 1311 if (compaction_deferred(zone, order)) 1312 continue; 1313 1314 status = compact_zone_order(zone, order, gfp_mask, mode, 1315 &zone_contended); 1316 rc = max(status, rc); 1317 /* 1318 * It takes at least one zone that wasn't lock contended 1319 * to clear all_zones_contended. 1320 */ 1321 all_zones_contended &= zone_contended; 1322 1323 /* If a normal allocation would succeed, stop compacting */ 1324 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 1325 alloc_flags)) { 1326 *candidate_zone = zone; 1327 /* 1328 * We think the allocation will succeed in this zone, 1329 * but it is not certain, hence the false. The caller 1330 * will repeat this with true if allocation indeed 1331 * succeeds in this zone. 1332 */ 1333 compaction_defer_reset(zone, order, false); 1334 /* 1335 * It is possible that async compaction aborted due to 1336 * need_resched() and the watermarks were ok thanks to 1337 * somebody else freeing memory. The allocation can 1338 * however still fail so we better signal the 1339 * need_resched() contention anyway (this will not 1340 * prevent the allocation attempt). 1341 */ 1342 if (zone_contended == COMPACT_CONTENDED_SCHED) 1343 *contended = COMPACT_CONTENDED_SCHED; 1344 1345 goto break_loop; 1346 } 1347 1348 if (mode != MIGRATE_ASYNC) { 1349 /* 1350 * We think that allocation won't succeed in this zone 1351 * so we defer compaction there. If it ends up 1352 * succeeding after all, it will be reset. 1353 */ 1354 defer_compaction(zone, order); 1355 } 1356 1357 /* 1358 * We might have stopped compacting due to need_resched() in 1359 * async compaction, or due to a fatal signal detected. In that 1360 * case do not try further zones and signal need_resched() 1361 * contention. 1362 */ 1363 if ((zone_contended == COMPACT_CONTENDED_SCHED) 1364 || fatal_signal_pending(current)) { 1365 *contended = COMPACT_CONTENDED_SCHED; 1366 goto break_loop; 1367 } 1368 1369 continue; 1370 break_loop: 1371 /* 1372 * We might not have tried all the zones, so be conservative 1373 * and assume they are not all lock contended. 1374 */ 1375 all_zones_contended = 0; 1376 break; 1377 } 1378 1379 /* 1380 * If at least one zone wasn't deferred or skipped, we report if all 1381 * zones that were tried were lock contended. 1382 */ 1383 if (rc > COMPACT_SKIPPED && all_zones_contended) 1384 *contended = COMPACT_CONTENDED_LOCK; 1385 1386 return rc; 1387 } 1388 1389 1390 /* Compact all zones within a node */ 1391 static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) 1392 { 1393 int zoneid; 1394 struct zone *zone; 1395 1396 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { 1397 1398 zone = &pgdat->node_zones[zoneid]; 1399 if (!populated_zone(zone)) 1400 continue; 1401 1402 cc->nr_freepages = 0; 1403 cc->nr_migratepages = 0; 1404 cc->zone = zone; 1405 INIT_LIST_HEAD(&cc->freepages); 1406 INIT_LIST_HEAD(&cc->migratepages); 1407 1408 if (cc->order == -1 || !compaction_deferred(zone, cc->order)) 1409 compact_zone(zone, cc); 1410 1411 if (cc->order > 0) { 1412 if (zone_watermark_ok(zone, cc->order, 1413 low_wmark_pages(zone), 0, 0)) 1414 compaction_defer_reset(zone, cc->order, false); 1415 } 1416 1417 VM_BUG_ON(!list_empty(&cc->freepages)); 1418 VM_BUG_ON(!list_empty(&cc->migratepages)); 1419 } 1420 } 1421 1422 void compact_pgdat(pg_data_t *pgdat, int order) 1423 { 1424 struct compact_control cc = { 1425 .order = order, 1426 .mode = MIGRATE_ASYNC, 1427 }; 1428 1429 if (!order) 1430 return; 1431 1432 __compact_pgdat(pgdat, &cc); 1433 } 1434 1435 static void compact_node(int nid) 1436 { 1437 struct compact_control cc = { 1438 .order = -1, 1439 .mode = MIGRATE_SYNC, 1440 .ignore_skip_hint = true, 1441 }; 1442 1443 __compact_pgdat(NODE_DATA(nid), &cc); 1444 } 1445 1446 /* Compact all nodes in the system */ 1447 static void compact_nodes(void) 1448 { 1449 int nid; 1450 1451 /* Flush pending updates to the LRU lists */ 1452 lru_add_drain_all(); 1453 1454 for_each_online_node(nid) 1455 compact_node(nid); 1456 } 1457 1458 /* The written value is actually unused, all memory is compacted */ 1459 int sysctl_compact_memory; 1460 1461 /* This is the entry point for compacting all nodes via /proc/sys/vm */ 1462 int sysctl_compaction_handler(struct ctl_table *table, int write, 1463 void __user *buffer, size_t *length, loff_t *ppos) 1464 { 1465 if (write) 1466 compact_nodes(); 1467 1468 return 0; 1469 } 1470 1471 int sysctl_extfrag_handler(struct ctl_table *table, int write, 1472 void __user *buffer, size_t *length, loff_t *ppos) 1473 { 1474 proc_dointvec_minmax(table, write, buffer, length, ppos); 1475 1476 return 0; 1477 } 1478 1479 #if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) 1480 static ssize_t sysfs_compact_node(struct device *dev, 1481 struct device_attribute *attr, 1482 const char *buf, size_t count) 1483 { 1484 int nid = dev->id; 1485 1486 if (nid >= 0 && nid < nr_node_ids && node_online(nid)) { 1487 /* Flush pending updates to the LRU lists */ 1488 lru_add_drain_all(); 1489 1490 compact_node(nid); 1491 } 1492 1493 return count; 1494 } 1495 static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1496 1497 int compaction_register_node(struct node *node) 1498 { 1499 return device_create_file(&node->dev, &dev_attr_compact); 1500 } 1501 1502 void compaction_unregister_node(struct node *node) 1503 { 1504 return device_remove_file(&node->dev, &dev_attr_compact); 1505 } 1506 #endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1507 1508 #endif /* CONFIG_COMPACTION */ 1509