Lines Matching +full:scaled +full:- +full:sync
1 // SPDX-License-Identifier: GPL-2.0
9 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
17 #include <linux/backing-dev.h>
20 #include <linux/page-isolation.h>
46 * order == -1 is expected when compacting proactively via
53 return order == -1; in is_via_compact_memory()
71 * Page order with-respect-to which proactive compaction
80 #define COMPACTION_HPAGE_ORDER (PMD_SHIFT - PAGE_SHIFT)
102 list_del(&page->lru); in release_free_list()
128 zone->compact_considered = 0; in defer_compaction()
129 zone->compact_defer_shift++; in defer_compaction()
131 if (order < zone->compact_order_failed) in defer_compaction()
132 zone->compact_order_failed = order; in defer_compaction()
134 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction()
135 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction()
143 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred()
145 if (order < zone->compact_order_failed) in compaction_deferred()
149 if (++zone->compact_considered >= defer_limit) { in compaction_deferred()
150 zone->compact_considered = defer_limit; in compaction_deferred()
168 zone->compact_considered = 0; in compaction_defer_reset()
169 zone->compact_defer_shift = 0; in compaction_defer_reset()
171 if (order >= zone->compact_order_failed) in compaction_defer_reset()
172 zone->compact_order_failed = order + 1; in compaction_defer_reset()
180 if (order < zone->compact_order_failed) in compaction_restarting()
183 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && in compaction_restarting()
184 zone->compact_considered >= 1UL << zone->compact_defer_shift; in compaction_restarting()
191 if (cc->ignore_skip_hint) in isolation_suitable()
199 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; in reset_cached_positions()
200 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; in reset_cached_positions()
201 zone->compact_cached_free_pfn = in reset_cached_positions()
202 pageblock_start_pfn(zone_end_pfn(zone) - 1); in reset_cached_positions()
238 while (start_nr-- > 0) { in skip_offline_sections_reverse()
300 * non-movable pageblock as the starting point. in __reset_isolation_pfn()
308 block_pfn = max(block_pfn, zone->zone_start_pfn); in __reset_isolation_pfn()
316 block_pfn = pageblock_end_pfn(pfn) - 1; in __reset_isolation_pfn()
317 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); in __reset_isolation_pfn()
351 unsigned long migrate_pfn = zone->zone_start_pfn; in __reset_isolation_suitable()
352 unsigned long free_pfn = zone_end_pfn(zone) - 1; in __reset_isolation_suitable()
359 if (!zone->compact_blockskip_flush) in __reset_isolation_suitable()
362 zone->compact_blockskip_flush = false; in __reset_isolation_suitable()
371 free_pfn -= pageblock_nr_pages) { in __reset_isolation_suitable()
379 zone->compact_init_migrate_pfn = reset_migrate; in __reset_isolation_suitable()
380 zone->compact_cached_migrate_pfn[0] = reset_migrate; in __reset_isolation_suitable()
381 zone->compact_cached_migrate_pfn[1] = reset_migrate; in __reset_isolation_suitable()
389 zone->compact_init_free_pfn = reset_free; in __reset_isolation_suitable()
390 zone->compact_cached_free_pfn = reset_free; in __reset_isolation_suitable()
396 zone->compact_cached_migrate_pfn[0] = migrate_pfn; in __reset_isolation_suitable()
397 zone->compact_cached_migrate_pfn[1] = migrate_pfn; in __reset_isolation_suitable()
398 zone->compact_cached_free_pfn = free_pfn; in __reset_isolation_suitable()
407 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable()
424 if (cc->ignore_skip_hint) in test_and_set_skip()
428 if (!skip && !cc->no_set_skip_hint) in test_and_set_skip()
436 struct zone *zone = cc->zone; in update_cached_migrate()
439 if (cc->no_set_skip_hint) in update_cached_migrate()
444 /* Update where async and sync compaction should restart */ in update_cached_migrate()
445 if (pfn > zone->compact_cached_migrate_pfn[0]) in update_cached_migrate()
446 zone->compact_cached_migrate_pfn[0] = pfn; in update_cached_migrate()
447 if (cc->mode != MIGRATE_ASYNC && in update_cached_migrate()
448 pfn > zone->compact_cached_migrate_pfn[1]) in update_cached_migrate()
449 zone->compact_cached_migrate_pfn[1] = pfn; in update_cached_migrate()
459 struct zone *zone = cc->zone; in update_pageblock_skip()
461 if (cc->no_set_skip_hint) in update_pageblock_skip()
466 if (pfn < zone->compact_cached_free_pfn) in update_pageblock_skip()
467 zone->compact_cached_free_pfn = pfn; in update_pageblock_skip()
501 * Sync compaction acquires the lock.
510 if (cc->mode == MIGRATE_ASYNC && !cc->contended) { in compact_lock_irqsave()
514 cc->contended = true; in compact_lock_irqsave()
542 cc->contended = true; in compact_unlock_should_abort()
553 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
586 && compact_unlock_should_abort(&cc->zone->lock, flags, in isolate_freepages_block()
603 blockpfn += (1UL << order) - 1; in isolate_freepages_block()
604 page += (1UL << order) - 1; in isolate_freepages_block()
605 nr_scanned += (1UL << order) - 1; in isolate_freepages_block()
616 locked = compact_lock_irqsave(&cc->zone->lock, in isolate_freepages_block()
624 /* Found a free page, will break it into order-0 pages */ in isolate_freepages_block()
631 nr_scanned += isolated - 1; in isolate_freepages_block()
633 cc->nr_freepages += isolated; in isolate_freepages_block()
634 list_add_tail(&page->lru, &freelist[order]); in isolate_freepages_block()
636 if (!strict && cc->nr_migratepages <= cc->nr_freepages) { in isolate_freepages_block()
641 blockpfn += isolated - 1; in isolate_freepages_block()
642 page += isolated - 1; in isolate_freepages_block()
652 spin_unlock_irqrestore(&cc->zone->lock, flags); in isolate_freepages_block()
674 cc->total_free_scanned += nr_scanned; in isolate_freepages_block()
681 * isolate_freepages_range() - isolate free pages.
684 * @end_pfn: The one-past-last PFN.
686 * Non-free pages, invalid PFNs, or zone boundaries within the
688 * undo its actions and return zero. cc->freepages[] are empty.
690 * Otherwise, function returns one-past-the-last PFN of isolated page
692 * a free page). cc->freepages[] contain free pages isolated.
702 INIT_LIST_HEAD(&cc->freepages[order]); in isolate_freepages_range()
706 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_freepages_range()
707 block_start_pfn = cc->zone->zone_start_pfn; in isolate_freepages_range()
729 block_end_pfn, cc->zone)) in isolate_freepages_range()
733 block_end_pfn, cc->freepages, 0, true); in isolate_freepages_range()
738 * non-free pages). in isolate_freepages_range()
745 * pageblock_nr_pages for some non-negative n. (Max order in isolate_freepages_range()
752 release_free_list(cc->freepages); in isolate_freepages_range()
763 pg_data_t *pgdat = cc->zone->zone_pgdat; in too_many_isolated()
781 if (cc->gfp_mask & __GFP_FS) { in too_many_isolated()
794 * skip_isolation_on_order() - determine when to skip folio isolation based on
796 * @order: to-be-isolated folio order
820 * isolate_migratepages_block() - isolate all migrate-able pages within
824 * @end_pfn: The one-past-the-last PFN to isolate, within same pageblock
829 * Returns errno, like -EAGAIN or -EINTR in case e.g signal pending or congestion,
830 * -ENOMEM in case we could not allocate a page, or 0.
831 * cc->migrate_pfn will contain the next pfn to scan.
833 * The pages are isolated on cc->migratepages list (not required to be empty),
834 * and cc->nr_migratepages is updated accordingly.
840 pg_data_t *pgdat = cc->zone->zone_pgdat; in isolate_migratepages_block()
854 cc->migrate_pfn = low_pfn; in isolate_migratepages_block()
863 if (cc->nr_migratepages) in isolate_migratepages_block()
864 return -EAGAIN; in isolate_migratepages_block()
867 if (cc->mode == MIGRATE_ASYNC) in isolate_migratepages_block()
868 return -EAGAIN; in isolate_migratepages_block()
873 return -EINTR; in isolate_migratepages_block()
878 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { in isolate_migratepages_block()
880 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
890 * previous order-aligned block, and did not skip it due in isolate_migratepages_block()
898 * We failed to isolate in the previous order-aligned in isolate_migratepages_block()
903 * a compound or a high-order buddy page in the in isolate_migratepages_block()
906 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
921 cc->contended = true; in isolate_migratepages_block()
922 ret = -EINTR; in isolate_migratepages_block()
941 low_pfn == cc->zone->zone_start_pfn)) { in isolate_migratepages_block()
957 if (!cc->alloc_contig) { in isolate_migratepages_block()
960 low_pfn += (1UL << order) - 1; in isolate_migratepages_block()
961 nr_scanned += (1UL << order) - 1; in isolate_migratepages_block()
972 ret = isolate_or_dissolve_huge_folio(folio, &cc->migratepages); in isolate_migratepages_block()
976 * reports an error. In case of -ENOMEM, abort right away. in isolate_migratepages_block()
979 /* Do not report -EBUSY down the chain */ in isolate_migratepages_block()
980 if (ret == -EBUSY) in isolate_migratepages_block()
982 low_pfn += (1UL << order) - 1; in isolate_migratepages_block()
983 nr_scanned += (1UL << order) - 1; in isolate_migratepages_block()
990 * on the cc->migratepages list. in isolate_migratepages_block()
992 low_pfn += folio_nr_pages(folio) - folio_page_idx(folio, page) - 1; in isolate_migratepages_block()
998 * Buddy and cannot be re-allocated because they are in isolate_migratepages_block()
999 * isolated. Fall-through as the check below handles in isolate_migratepages_block()
1019 low_pfn += (1UL << freepage_order) - 1; in isolate_migratepages_block()
1020 nr_scanned += (1UL << freepage_order) - 1; in isolate_migratepages_block()
1033 if (PageCompound(page) && !cc->alloc_contig) { in isolate_migratepages_block()
1037 if (skip_isolation_on_order(order, cc->order)) { in isolate_migratepages_block()
1039 low_pfn += (1UL << order) - 1; in isolate_migratepages_block()
1040 nr_scanned += (1UL << order) - 1; in isolate_migratepages_block()
1048 * It's possible to migrate LRU and non-lru movable pages. in isolate_migratepages_block()
1071 * sure the page is not being freed elsewhere -- the in isolate_migratepages_block()
1084 if (!mapping && (folio_ref_count(folio) - 1) > folio_mapcount(folio)) in isolate_migratepages_block()
1091 if (!(cc->gfp_mask & __GFP_FS) && mapping) in isolate_migratepages_block()
1107 * it will be able to migrate without blocking - clean pages in isolate_migratepages_block()
1122 * a ->migrate_folio callback are possible to migrate in isolate_migratepages_block()
1144 mapping->a_ops->migrate_folio; in isolate_migratepages_block()
1163 compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); in isolate_migratepages_block()
1176 !cc->finish_pageblock) { in isolate_migratepages_block()
1186 cc->order) && in isolate_migratepages_block()
1187 !cc->alloc_contig)) { in isolate_migratepages_block()
1188 low_pfn += folio_nr_pages(folio) - 1; in isolate_migratepages_block()
1189 nr_scanned += folio_nr_pages(folio) - 1; in isolate_migratepages_block()
1197 low_pfn += folio_nr_pages(folio) - 1; in isolate_migratepages_block()
1206 list_add(&folio->lru, &cc->migratepages); in isolate_migratepages_block()
1208 cc->nr_migratepages += folio_nr_pages(folio); in isolate_migratepages_block()
1210 nr_scanned += folio_nr_pages(folio) - 1; in isolate_migratepages_block()
1218 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX && in isolate_migratepages_block()
1219 !cc->finish_pageblock && !cc->contended) { in isolate_migratepages_block()
1235 if (!skip_on_failure && ret != -ENOMEM) in isolate_migratepages_block()
1240 * instead of migrating, as we cannot form the cc->order buddy in isolate_migratepages_block()
1248 putback_movable_pages(&cc->migratepages); in isolate_migratepages_block()
1249 cc->nr_migratepages = 0; in isolate_migratepages_block()
1254 low_pfn = next_skip_pfn - 1; in isolate_migratepages_block()
1259 next_skip_pfn += 1UL << cc->order; in isolate_migratepages_block()
1262 if (ret == -ENOMEM) in isolate_migratepages_block()
1291 if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) { in isolate_migratepages_block()
1292 if (!cc->no_set_skip_hint && valid_page && !skip_updated) in isolate_migratepages_block()
1301 cc->total_migrate_scanned += nr_scanned; in isolate_migratepages_block()
1305 cc->migrate_pfn = low_pfn; in isolate_migratepages_block()
1311 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
1314 * @end_pfn: The one-past-last PFN.
1316 * Returns -EAGAIN when contented, -EINTR in case of a signal pending, -ENOMEM
1329 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages_range()
1330 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages_range()
1340 block_end_pfn, cc->zone)) in isolate_migratepages_range()
1349 if (cc->nr_migratepages >= COMPACT_CLUSTER_MAX) in isolate_migratepages_range()
1367 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) in suitable_migration_source()
1372 if (cc->migratetype == MIGRATE_MOVABLE) in suitable_migration_source()
1375 return block_mt == cc->migratetype; in suitable_migration_source()
1384 int order = cc->order > 0 ? cc->order : pageblock_order; in suitable_migration_target()
1387 * We are checking page_order without zone->lock taken. But in suitable_migration_target()
1395 if (cc->ignore_block_suitable) in suitable_migration_target()
1409 unsigned short shift = BITS_PER_LONG - 1; in freelist_scan_limit()
1411 return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1; in freelist_scan_limit()
1420 return (cc->free_pfn >> pageblock_order) in compact_scanners_met()
1421 <= (cc->migrate_pfn >> pageblock_order); in compact_scanners_met()
1434 if (!list_is_first(&freepage->buddy_list, freelist)) { in move_freelist_head()
1435 list_cut_before(&sublist, freelist, &freepage->buddy_list); in move_freelist_head()
1451 if (!list_is_last(&freepage->buddy_list, freelist)) { in move_freelist_tail()
1452 list_cut_position(&sublist, freelist, &freepage->buddy_list); in move_freelist_tail()
1464 if (cc->nr_freepages >= cc->nr_migratepages) in fast_isolate_around()
1468 if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC) in fast_isolate_around()
1472 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); in fast_isolate_around()
1473 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); in fast_isolate_around()
1475 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); in fast_isolate_around()
1479 isolate_freepages_block(cc, &start_pfn, end_pfn, cc->freepages, 1, false); in fast_isolate_around()
1482 if (start_pfn == end_pfn && !cc->no_set_skip_hint) in fast_isolate_around()
1486 /* Search orders in round-robin fashion */
1489 order--; in next_search_order()
1491 order = cc->order - 1; in next_search_order()
1494 if (order == cc->search_order) { in next_search_order()
1495 cc->search_order--; in next_search_order()
1496 if (cc->search_order < 0) in next_search_order()
1497 cc->search_order = cc->order - 1; in next_search_order()
1498 return -1; in next_search_order()
1516 if (cc->order <= 0) in fast_isolate_freepages()
1523 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { in fast_isolate_freepages()
1532 distance = (cc->free_pfn - cc->migrate_pfn); in fast_isolate_freepages()
1533 low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2)); in fast_isolate_freepages()
1534 min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1)); in fast_isolate_freepages()
1543 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); in fast_isolate_freepages()
1545 for (order = cc->search_order; in fast_isolate_freepages()
1548 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages()
1555 if (!area->nr_free) in fast_isolate_freepages()
1558 spin_lock_irqsave(&cc->zone->lock, flags); in fast_isolate_freepages()
1559 freelist = &area->free_list[MIGRATE_MOVABLE]; in fast_isolate_freepages()
1569 cc->zone->zone_start_pfn); in fast_isolate_freepages()
1572 cc->fast_search_fail = 0; in fast_isolate_freepages()
1573 cc->search_order = order; in fast_isolate_freepages()
1605 nr_scanned += nr_isolated - 1; in fast_isolate_freepages()
1607 cc->nr_freepages += nr_isolated; in fast_isolate_freepages()
1608 list_add_tail(&page->lru, &cc->freepages[order]); in fast_isolate_freepages()
1612 order = cc->search_order + 1; in fast_isolate_freepages()
1617 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_isolate_freepages()
1620 if (cc->nr_freepages >= cc->nr_migratepages) in fast_isolate_freepages()
1631 trace_mm_compaction_fast_isolate_freepages(min_pfn, cc->free_pfn, in fast_isolate_freepages()
1635 cc->fast_search_fail++; in fast_isolate_freepages()
1644 cc->free_pfn = highest; in fast_isolate_freepages()
1646 if (cc->direct_compaction && pfn_valid(min_pfn)) { in fast_isolate_freepages()
1649 zone_end_pfn(cc->zone)), in fast_isolate_freepages()
1650 cc->zone); in fast_isolate_freepages()
1654 cc->free_pfn = min_pfn; in fast_isolate_freepages()
1660 if (highest && highest >= cc->zone->compact_cached_free_pfn) { in fast_isolate_freepages()
1661 highest -= pageblock_nr_pages; in fast_isolate_freepages()
1662 cc->zone->compact_cached_free_pfn = highest; in fast_isolate_freepages()
1665 cc->total_free_scanned += nr_scanned; in fast_isolate_freepages()
1679 struct zone *zone = cc->zone; in isolate_freepages()
1689 if (cc->nr_freepages) in isolate_freepages()
1694 * successfully isolated from, zone-cached value, or the end of the in isolate_freepages()
1697 * block_start_pfn -= pageblock_nr_pages in the for loop. in isolate_freepages()
1703 isolate_start_pfn = cc->free_pfn; in isolate_freepages()
1707 low_pfn = pageblock_end_pfn(cc->migrate_pfn); in isolate_freepages()
1708 stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1; in isolate_freepages()
1712 * pages on cc->migratepages. We stop searching if the migrate in isolate_freepages()
1717 block_start_pfn -= pageblock_nr_pages, in isolate_freepages()
1750 block_end_pfn, cc->freepages, stride, false); in isolate_freepages()
1754 update_pageblock_skip(cc, page, block_start_pfn - in isolate_freepages()
1758 if (cc->nr_freepages >= cc->nr_migratepages) { in isolate_freepages()
1765 block_start_pfn - pageblock_nr_pages; in isolate_freepages()
1790 cc->free_pfn = isolate_start_pfn; in isolate_freepages()
1794 * This is a migrate-callback that "allocates" freepages by taking pages
1809 if (!list_empty(&cc->freepages[start_order])) in compaction_alloc_noprof()
1821 freepage = list_first_entry(&cc->freepages[start_order], struct page, in compaction_alloc_noprof()
1825 list_del(&freepage->lru); in compaction_alloc_noprof()
1828 start_order--; in compaction_alloc_noprof()
1831 list_add(&freepage[size].lru, &cc->freepages[start_order]); in compaction_alloc_noprof()
1836 post_alloc_hook(&dst->page, order, __GFP_MOVABLE); in compaction_alloc_noprof()
1837 set_page_refcounted(&dst->page); in compaction_alloc_noprof()
1839 prep_compound_page(&dst->page, order); in compaction_alloc_noprof()
1840 cc->nr_freepages -= 1 << order; in compaction_alloc_noprof()
1841 cc->nr_migratepages -= 1 << order; in compaction_alloc_noprof()
1842 return page_rmappable_folio(&dst->page); in compaction_alloc_noprof()
1851 * This is a migrate-callback that "frees" freepages back to the isolated
1859 struct page *page = &dst->page; in compaction_free()
1863 list_add(&dst->lru, &cc->freepages[order]); in compaction_free()
1864 cc->nr_freepages += 1 << order; in compaction_free()
1866 cc->nr_migratepages += 1 << order; in compaction_free()
1897 if (cc->fast_start_pfn == ULONG_MAX) in update_fast_start_pfn()
1900 if (!cc->fast_start_pfn) in update_fast_start_pfn()
1901 cc->fast_start_pfn = pfn; in update_fast_start_pfn()
1903 cc->fast_start_pfn = min(cc->fast_start_pfn, pfn); in update_fast_start_pfn()
1909 if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX) in reinit_migrate_pfn()
1910 return cc->migrate_pfn; in reinit_migrate_pfn()
1912 cc->migrate_pfn = cc->fast_start_pfn; in reinit_migrate_pfn()
1913 cc->fast_start_pfn = ULONG_MAX; in reinit_migrate_pfn()
1915 return cc->migrate_pfn; in reinit_migrate_pfn()
1928 unsigned long pfn = cc->migrate_pfn; in fast_find_migrateblock()
1934 if (cc->ignore_skip_hint) in fast_find_migrateblock()
1941 if (cc->finish_pageblock) in fast_find_migrateblock()
1949 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) in fast_find_migrateblock()
1957 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) in fast_find_migrateblock()
1966 if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE) in fast_find_migrateblock()
1975 distance = (cc->free_pfn - cc->migrate_pfn) >> 1; in fast_find_migrateblock()
1976 if (cc->migrate_pfn != cc->zone->zone_start_pfn) in fast_find_migrateblock()
1978 high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance); in fast_find_migrateblock()
1980 for (order = cc->order - 1; in fast_find_migrateblock()
1982 order--) { in fast_find_migrateblock()
1983 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock()
1988 if (!area->nr_free) in fast_find_migrateblock()
1991 spin_lock_irqsave(&cc->zone->lock, flags); in fast_find_migrateblock()
1992 freelist = &area->free_list[MIGRATE_MOVABLE]; in fast_find_migrateblock()
2017 if (pfn < cc->zone->zone_start_pfn) in fast_find_migrateblock()
2018 pfn = cc->zone->zone_start_pfn; in fast_find_migrateblock()
2019 cc->fast_search_fail = 0; in fast_find_migrateblock()
2024 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_find_migrateblock()
2027 cc->total_migrate_scanned += nr_scanned; in fast_find_migrateblock()
2034 cc->fast_search_fail++; in fast_find_migrateblock()
2053 (cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0); in isolate_migratepages()
2063 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages()
2064 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages()
2071 fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail; in isolate_migratepages()
2080 for (; block_end_pfn <= cc->free_pfn; in isolate_migratepages()
2082 cc->migrate_pfn = low_pfn = block_end_pfn, in isolate_migratepages()
2095 block_end_pfn, cc->zone); in isolate_migratepages()
2101 block_end_pfn = min(next_pfn, cc->free_pfn); in isolate_migratepages()
2113 low_pfn == cc->zone->zone_start_pfn) && in isolate_migratepages()
2143 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; in isolate_migratepages()
2149 * pgdat_kswapd_lock() pins pgdat->kswapd, so a concurrent kswapd_stop() can't
2157 running = pgdat->kswapd && task_is_running(pgdat->kswapd); in kswapd_is_running()
2174 * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It
2186 score = zone->present_pages * fragmentation_score_zone(zone); in fragmentation_score_zone_weighted()
2187 return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); in fragmentation_score_zone_weighted()
2191 * The per-node proactive (background) compaction process is started by its
2194 * the node's score falls below the low threshold, or one of the back-off
2205 zone = &pgdat->node_zones[zoneid]; in fragmentation_score_node()
2218 wmark_low = 100U - sysctl_compaction_proactiveness; in fragmentation_score_wmark()
2237 const int migratetype = cc->migratetype; in __compact_finished()
2243 reset_cached_positions(cc->zone); in __compact_finished()
2251 if (cc->direct_compaction) in __compact_finished()
2252 cc->zone->compact_blockskip_flush = true; in __compact_finished()
2254 if (cc->whole_zone) in __compact_finished()
2260 if (cc->proactive_compaction) { in __compact_finished()
2264 pgdat = cc->zone->zone_pgdat; in __compact_finished()
2268 score = fragmentation_score_zone(cc->zone); in __compact_finished()
2279 if (is_via_compact_memory(cc->order)) in __compact_finished()
2288 if (!pageblock_aligned(cc->migrate_pfn)) in __compact_finished()
2296 if (defrag_mode && !cc->direct_compaction) { in __compact_finished()
2297 if (__zone_watermark_ok(cc->zone, cc->order, in __compact_finished()
2298 high_wmark_pages(cc->zone), in __compact_finished()
2299 cc->highest_zoneidx, cc->alloc_flags, in __compact_finished()
2300 zone_page_state(cc->zone, in __compact_finished()
2309 for (order = cc->order; order < NR_PAGE_ORDERS; order++) { in __compact_finished()
2310 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished()
2329 * stealing for a non-movable allocation, make sure in __compact_finished()
2339 if (cc->contended || fatal_signal_pending(current)) in __compact_finished()
2350 trace_mm_compaction_finished(cc->zone, cc->order, ret); in compact_finished()
2362 * Watermarks for order-0 must be met for compaction to be able to in __compaction_suitable()
2379 watermark += low_wmark_pages(zone) - min_wmark_pages(zone); in __compaction_suitable()
2399 * index of -1000 would imply allocations might succeed depending on in compaction_suitable()
2400 * watermarks, but we already failed the high-order watermark check in compaction_suitable()
2405 * ignore fragindex for non-costly orders where the alternative to in compaction_suitable()
2442 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in compaction_zonelist_suitable()
2443 ac->highest_zoneidx, ac->nodemask) { in compaction_zonelist_suitable()
2455 ac->highest_zoneidx, available)) in compaction_zonelist_suitable()
2489 * free memory in the non-CMA pageblocks. Otherwise compaction could form in compaction_suit_allocation_order()
2490 * the high-order page in CMA pageblocks, which would not help the in compaction_suit_allocation_order()
2493 * possibility that compaction would migrate pages from non-CMA to CMA in compaction_suit_allocation_order()
2514 unsigned long start_pfn = cc->zone->zone_start_pfn; in compact_zone()
2515 unsigned long end_pfn = zone_end_pfn(cc->zone); in compact_zone()
2517 const bool sync = cc->mode != MIGRATE_ASYNC; in compact_zone() local
2526 cc->total_migrate_scanned = 0; in compact_zone()
2527 cc->total_free_scanned = 0; in compact_zone()
2528 cc->nr_migratepages = 0; in compact_zone()
2529 cc->nr_freepages = 0; in compact_zone()
2531 INIT_LIST_HEAD(&cc->freepages[order]); in compact_zone()
2532 INIT_LIST_HEAD(&cc->migratepages); in compact_zone()
2534 cc->migratetype = gfp_migratetype(cc->gfp_mask); in compact_zone()
2536 if (!is_via_compact_memory(cc->order)) { in compact_zone()
2537 ret = compaction_suit_allocation_order(cc->zone, cc->order, in compact_zone()
2538 cc->highest_zoneidx, in compact_zone()
2539 cc->alloc_flags, in compact_zone()
2540 cc->mode == MIGRATE_ASYNC, in compact_zone()
2541 !cc->direct_compaction); in compact_zone()
2550 if (compaction_restarting(cc->zone, cc->order)) in compact_zone()
2551 __reset_isolation_suitable(cc->zone); in compact_zone()
2559 cc->fast_start_pfn = 0; in compact_zone()
2560 if (cc->whole_zone) { in compact_zone()
2561 cc->migrate_pfn = start_pfn; in compact_zone()
2562 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); in compact_zone()
2564 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; in compact_zone()
2565 cc->free_pfn = cc->zone->compact_cached_free_pfn; in compact_zone()
2566 if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { in compact_zone()
2567 cc->free_pfn = pageblock_start_pfn(end_pfn - 1); in compact_zone()
2568 cc->zone->compact_cached_free_pfn = cc->free_pfn; in compact_zone()
2570 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { in compact_zone()
2571 cc->migrate_pfn = start_pfn; in compact_zone()
2572 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; in compact_zone()
2573 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; in compact_zone()
2576 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) in compact_zone()
2577 cc->whole_zone = true; in compact_zone()
2583 * Migrate has separate cached PFNs for ASYNC and SYNC* migration on in compact_zone()
2586 * no isolation candidates, then the sync state does not matter. in compact_zone()
2588 * cached PFNs in sync to avoid revisiting the same blocks. in compact_zone()
2590 update_cached = !sync && in compact_zone()
2591 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; in compact_zone()
2593 trace_mm_compaction_begin(cc, start_pfn, end_pfn, sync); in compact_zone()
2600 unsigned long iteration_start_pfn = cc->migrate_pfn; in compact_zone()
2610 cc->finish_pageblock = false; in compact_zone()
2613 cc->finish_pageblock = true; in compact_zone()
2620 putback_movable_pages(&cc->migratepages); in compact_zone()
2621 cc->nr_migratepages = 0; in compact_zone()
2625 cc->zone->compact_cached_migrate_pfn[1] = in compact_zone()
2626 cc->zone->compact_cached_migrate_pfn[0]; in compact_zone()
2632 * previous cc->order aligned block. in compact_zone()
2637 last_migrated_pfn = max(cc->zone->zone_start_pfn, in compact_zone()
2638 pageblock_start_pfn(cc->migrate_pfn - 1)); in compact_zone()
2643 * compaction_alloc/free() will update cc->nr_migratepages in compact_zone()
2646 nr_migratepages = cc->nr_migratepages; in compact_zone()
2647 err = migrate_pages(&cc->migratepages, compaction_alloc, in compact_zone()
2648 compaction_free, (unsigned long)cc, cc->mode, in compact_zone()
2654 cc->nr_migratepages = 0; in compact_zone()
2656 putback_movable_pages(&cc->migratepages); in compact_zone()
2658 * migrate_pages() may return -ENOMEM when scanners meet in compact_zone()
2661 if (err == -ENOMEM && !compact_scanners_met(cc)) { in compact_zone()
2667 * within the pageblock_order-aligned block and in compact_zone()
2676 if (!pageblock_aligned(cc->migrate_pfn) && in compact_zone()
2677 !cc->ignore_skip_hint && !cc->finish_pageblock && in compact_zone()
2678 (cc->mode < MIGRATE_SYNC)) { in compact_zone()
2679 cc->finish_pageblock = true; in compact_zone()
2686 if (cc->order == COMPACTION_HPAGE_ORDER) in compact_zone()
2694 if (capc && capc->page) { in compact_zone()
2702 * cc->order aligned block where we migrated from? If yes, in compact_zone()
2707 if (cc->order > 0 && last_migrated_pfn) { in compact_zone()
2709 block_start_pfn(cc->migrate_pfn, cc->order); in compact_zone()
2712 lru_add_drain_cpu_zone(cc->zone); in compact_zone()
2724 if (cc->nr_freepages > 0) { in compact_zone()
2725 unsigned long free_pfn = release_free_list(cc->freepages); in compact_zone()
2727 cc->nr_freepages = 0; in compact_zone()
2735 if (free_pfn > cc->zone->compact_cached_free_pfn) in compact_zone()
2736 cc->zone->compact_cached_free_pfn = free_pfn; in compact_zone()
2739 count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); in compact_zone()
2740 count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned); in compact_zone()
2742 trace_mm_compaction_end(cc, start_pfn, end_pfn, sync, ret); in compact_zone()
2744 VM_BUG_ON(!list_empty(&cc->migratepages)); in compact_zone()
2780 WRITE_ONCE(current->capture_control, &capc); in compact_zone_order()
2789 WRITE_ONCE(current->capture_control, NULL); in compact_zone_order()
2804 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
2828 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in try_to_compact_pages()
2829 ac->highest_zoneidx, ac->nodemask) { in try_to_compact_pages()
2844 alloc_flags, ac->highest_zoneidx, capture); in try_to_compact_pages()
2883 * compact_node() - compact all zones within a node
2890 * reaching score targets due to various back-off conditions, such as,
2891 * contention on per-node or per-zone locks.
2898 .order = -1, in compact_node()
2907 zone = &pgdat->node_zones[zoneid]; in compact_node()
2912 return -EINTR; in compact_node()
2959 if (pgdat->proactive_compact_trigger) in compaction_proactiveness_sysctl_handler()
2962 pgdat->proactive_compact_trigger = true; in compaction_proactiveness_sysctl_handler()
2963 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, -1, in compaction_proactiveness_sysctl_handler()
2964 pgdat->nr_zones - 1); in compaction_proactiveness_sysctl_handler()
2965 wake_up_interruptible(&pgdat->kcompactd_wait); in compaction_proactiveness_sysctl_handler()
2986 return -EINVAL; in sysctl_compaction_handler()
2999 int nid = dev->id; in compact_store()
3014 return device_create_file(&node->dev, &dev_attr_compact); in compaction_register_node()
3019 device_remove_file(&node->dev, &dev_attr_compact); in compaction_unregister_node()
3025 return pgdat->kcompactd_max_order > 0 || kthread_should_stop() || in kcompactd_work_requested()
3026 pgdat->proactive_compact_trigger; in kcompactd_work_requested()
3033 enum zone_type highest_zoneidx = pgdat->kcompactd_highest_zoneidx; in kcompactd_node_suitable()
3039 zone = &pgdat->node_zones[zoneid]; in kcompactd_node_suitable()
3045 pgdat->kcompactd_max_order, in kcompactd_node_suitable()
3064 .order = pgdat->kcompactd_max_order, in kcompactd_do_work()
3065 .search_order = pgdat->kcompactd_max_order, in kcompactd_do_work()
3066 .highest_zoneidx = pgdat->kcompactd_highest_zoneidx, in kcompactd_do_work()
3074 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, in kcompactd_do_work()
3081 zone = &pgdat->node_zones[zoneid]; in kcompactd_do_work()
3112 * We use sync migration mode here, so we defer like in kcompactd_do_work()
3113 * sync direct compaction does. in kcompactd_do_work()
3129 if (pgdat->kcompactd_max_order <= cc.order) in kcompactd_do_work()
3130 pgdat->kcompactd_max_order = 0; in kcompactd_do_work()
3131 if (pgdat->kcompactd_highest_zoneidx >= cc.highest_zoneidx) in kcompactd_do_work()
3132 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; in kcompactd_do_work()
3140 if (pgdat->kcompactd_max_order < order) in wakeup_kcompactd()
3141 pgdat->kcompactd_max_order = order; in wakeup_kcompactd()
3143 if (pgdat->kcompactd_highest_zoneidx > highest_zoneidx) in wakeup_kcompactd()
3144 pgdat->kcompactd_highest_zoneidx = highest_zoneidx; in wakeup_kcompactd()
3150 if (!wq_has_sleeper(&pgdat->kcompactd_wait)) in wakeup_kcompactd()
3156 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, in wakeup_kcompactd()
3158 wake_up_interruptible(&pgdat->kcompactd_wait); in wakeup_kcompactd()
3171 current->flags |= PF_KCOMPACTD; in kcompactd()
3174 pgdat->kcompactd_max_order = 0; in kcompactd()
3175 pgdat->kcompactd_highest_zoneidx = pgdat->nr_zones - 1; in kcompactd()
3186 trace_mm_compaction_kcompactd_sleep(pgdat->node_id); in kcompactd()
3187 if (wait_event_freezable_timeout(pgdat->kcompactd_wait, in kcompactd()
3189 !pgdat->proactive_compact_trigger) { in kcompactd()
3224 if (unlikely(pgdat->proactive_compact_trigger)) in kcompactd()
3225 pgdat->proactive_compact_trigger = false; in kcompactd()
3228 current->flags &= ~PF_KCOMPACTD; in kcompactd()
3234 * This kcompactd start function will be called by init and node-hot-add.
3235 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
3241 if (pgdat->kcompactd) in kcompactd_run()
3244 pgdat->kcompactd = kthread_create_on_node(kcompactd, pgdat, nid, "kcompactd%d", nid); in kcompactd_run()
3245 if (IS_ERR(pgdat->kcompactd)) { in kcompactd_run()
3247 pgdat->kcompactd = NULL; in kcompactd_run()
3249 wake_up_process(pgdat->kcompactd); in kcompactd_run()
3259 struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; in kcompactd_stop()
3263 NODE_DATA(nid)->kcompactd = NULL; in kcompactd_stop()
3275 old = *(int *)table->data; in proc_dointvec_minmax_warn_RT_change()
3279 if (old != *(int *)table->data) in proc_dointvec_minmax_warn_RT_change()
3281 table->procname, current->comm, in proc_dointvec_minmax_warn_RT_change()