Lines Matching full:zone
73 * the "fragmentation score" of a node/zone.
159 static void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
161 zone->compact_considered = 0; in defer_compaction()
162 zone->compact_defer_shift++; in defer_compaction()
164 if (order < zone->compact_order_failed) in defer_compaction()
165 zone->compact_order_failed = order; in defer_compaction()
167 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) in defer_compaction()
168 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; in defer_compaction()
170 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
174 static bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
176 unsigned long defer_limit = 1UL << zone->compact_defer_shift; in compaction_deferred()
178 if (order < zone->compact_order_failed) in compaction_deferred()
182 if (++zone->compact_considered >= defer_limit) { in compaction_deferred()
183 zone->compact_considered = defer_limit; in compaction_deferred()
187 trace_mm_compaction_deferred(zone, order); in compaction_deferred()
197 void compaction_defer_reset(struct zone *zone, int order, in compaction_defer_reset() argument
201 zone->compact_considered = 0; in compaction_defer_reset()
202 zone->compact_defer_shift = 0; in compaction_defer_reset()
204 if (order >= zone->compact_order_failed) in compaction_defer_reset()
205 zone->compact_order_failed = order + 1; in compaction_defer_reset()
207 trace_mm_compaction_defer_reset(zone, order); in compaction_defer_reset()
211 static bool compaction_restarting(struct zone *zone, int order) in compaction_restarting() argument
213 if (order < zone->compact_order_failed) in compaction_restarting()
216 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT && in compaction_restarting()
217 zone->compact_considered >= 1UL << zone->compact_defer_shift; in compaction_restarting()
230 static void reset_cached_positions(struct zone *zone) in reset_cached_positions() argument
232 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; in reset_cached_positions()
233 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; in reset_cached_positions()
234 zone->compact_cached_free_pfn = in reset_cached_positions()
235 pageblock_start_pfn(zone_end_pfn(zone) - 1); in reset_cached_positions()
309 __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source, in __reset_isolation_pfn() argument
319 if (zone != page_zone(page)) in __reset_isolation_pfn()
339 /* Ensure the start of the pageblock or zone is online and valid */ in __reset_isolation_pfn()
341 block_pfn = max(block_pfn, zone->zone_start_pfn); in __reset_isolation_pfn()
348 /* Ensure the end of the pageblock or zone is online and valid */ in __reset_isolation_pfn()
350 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1); in __reset_isolation_pfn()
382 static void __reset_isolation_suitable(struct zone *zone) in __reset_isolation_suitable() argument
384 unsigned long migrate_pfn = zone->zone_start_pfn; in __reset_isolation_suitable()
385 unsigned long free_pfn = zone_end_pfn(zone) - 1; in __reset_isolation_suitable()
392 if (!zone->compact_blockskip_flush) in __reset_isolation_suitable()
395 zone->compact_blockskip_flush = false; in __reset_isolation_suitable()
398 * Walk the zone and update pageblock skip information. Source looks in __reset_isolation_suitable()
408 if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) && in __reset_isolation_suitable()
412 zone->compact_init_migrate_pfn = reset_migrate; in __reset_isolation_suitable()
413 zone->compact_cached_migrate_pfn[0] = reset_migrate; in __reset_isolation_suitable()
414 zone->compact_cached_migrate_pfn[1] = reset_migrate; in __reset_isolation_suitable()
418 if (__reset_isolation_pfn(zone, free_pfn, free_set, true) && in __reset_isolation_suitable()
422 zone->compact_init_free_pfn = reset_free; in __reset_isolation_suitable()
423 zone->compact_cached_free_pfn = reset_free; in __reset_isolation_suitable()
429 zone->compact_cached_migrate_pfn[0] = migrate_pfn; in __reset_isolation_suitable()
430 zone->compact_cached_migrate_pfn[1] = migrate_pfn; in __reset_isolation_suitable()
431 zone->compact_cached_free_pfn = free_pfn; in __reset_isolation_suitable()
440 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable() local
441 if (!populated_zone(zone)) in reset_isolation_suitable()
444 __reset_isolation_suitable(zone); in reset_isolation_suitable()
469 struct zone *zone = cc->zone; in update_cached_migrate() local
478 if (pfn > zone->compact_cached_migrate_pfn[0]) in update_cached_migrate()
479 zone->compact_cached_migrate_pfn[0] = pfn; in update_cached_migrate()
481 pfn > zone->compact_cached_migrate_pfn[1]) in update_cached_migrate()
482 zone->compact_cached_migrate_pfn[1] = pfn; in update_cached_migrate()
492 struct zone *zone = cc->zone; in update_pageblock_skip() local
499 if (pfn < zone->compact_cached_free_pfn) in update_pageblock_skip()
500 zone->compact_cached_free_pfn = pfn; in update_pageblock_skip()
619 && compact_unlock_should_abort(&cc->zone->lock, flags, in isolate_freepages_block()
649 locked = compact_lock_irqsave(&cc->zone->lock, in isolate_freepages_block()
685 spin_unlock_irqrestore(&cc->zone->lock, flags); in isolate_freepages_block()
719 * Non-free pages, invalid PFNs, or zone boundaries within the
739 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_freepages_range()
740 block_start_pfn = cc->zone->zone_start_pfn; in isolate_freepages_range()
762 block_end_pfn, cc->zone)) in isolate_freepages_range()
796 pg_data_t *pgdat = cc->zone->zone_pgdat; in too_many_isolated()
873 pg_data_t *pgdat = cc->zone->zone_pgdat; in isolate_migratepages_block()
974 low_pfn == cc->zone->zone_start_pfn)) { in isolate_migratepages_block()
1038 * Skip if free. We read page order here without zone lock in isolate_migratepages_block()
1365 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages_range()
1366 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages_range()
1376 block_end_pfn, cc->zone)) in isolate_migratepages_range()
1423 * We are checking page_order without zone->lock taken. But in suitable_migration_target()
1508 start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn); in fast_isolate_around()
1509 end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)); in fast_isolate_around()
1511 page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone); in fast_isolate_around()
1559 if (cc->free_pfn >= cc->zone->compact_init_free_pfn) { in fast_isolate_freepages()
1584 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages()
1594 spin_lock_irqsave(&cc->zone->lock, flags); in fast_isolate_freepages()
1605 cc->zone->zone_start_pfn); in fast_isolate_freepages()
1653 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_isolate_freepages()
1685 zone_end_pfn(cc->zone)), in fast_isolate_freepages()
1686 cc->zone); in fast_isolate_freepages()
1696 if (highest && highest >= cc->zone->compact_cached_free_pfn) { in fast_isolate_freepages()
1698 cc->zone->compact_cached_free_pfn = highest; in fast_isolate_freepages()
1715 struct zone *zone = cc->zone; in isolate_freepages() local
1730 * successfully isolated from, zone-cached value, or the end of the in isolate_freepages()
1731 * zone when isolating for the first time. For looping we also need in isolate_freepages()
1735 * zone which ends in the middle of a pageblock. in isolate_freepages()
1742 zone_end_pfn(zone)); in isolate_freepages()
1758 * This can iterate a massively long zone without finding any in isolate_freepages()
1765 zone); in isolate_freepages()
1888 * freelist. All pages on the freelist are from the same zone, so there is no
1981 * If the migrate_pfn is not at the start of a zone or the start in fast_find_migrateblock()
1985 if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn)) in fast_find_migrateblock()
2012 if (cc->migrate_pfn != cc->zone->zone_start_pfn) in fast_find_migrateblock()
2019 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock()
2027 spin_lock_irqsave(&cc->zone->lock, flags); in fast_find_migrateblock()
2053 if (pfn < cc->zone->zone_start_pfn) in fast_find_migrateblock()
2054 pfn = cc->zone->zone_start_pfn; in fast_find_migrateblock()
2060 spin_unlock_irqrestore(&cc->zone->lock, flags); in fast_find_migrateblock()
2093 * Start at where we last stopped, or beginning of the zone as in isolate_migratepages()
2099 if (block_start_pfn < cc->zone->zone_start_pfn) in isolate_migratepages()
2100 block_start_pfn = cc->zone->zone_start_pfn; in isolate_migratepages()
2123 * This can potentially iterate a massively long zone with in isolate_migratepages()
2131 block_end_pfn, cc->zone); in isolate_migratepages()
2149 low_pfn == cc->zone->zone_start_pfn) && in isolate_migratepages()
2200 * A zone's fragmentation score is the external fragmentation wrt to the
2203 static unsigned int fragmentation_score_zone(struct zone *zone) in fragmentation_score_zone() argument
2205 return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER); in fragmentation_score_zone()
2209 * A weighted zone's fragmentation score is the external fragmentation
2210 * wrt to the COMPACTION_HPAGE_ORDER scaled by the zone's size. It
2218 static unsigned int fragmentation_score_zone_weighted(struct zone *zone) in fragmentation_score_zone_weighted() argument
2222 score = zone->present_pages * fragmentation_score_zone(zone); in fragmentation_score_zone_weighted()
2223 return div64_ul(score, zone->zone_pgdat->node_present_pages + 1); in fragmentation_score_zone_weighted()
2239 struct zone *zone; in fragmentation_score_node() local
2241 zone = &pgdat->node_zones[zoneid]; in fragmentation_score_node()
2242 if (!populated_zone(zone)) in fragmentation_score_node()
2244 score += fragmentation_score_zone_weighted(zone); in fragmentation_score_node()
2279 reset_cached_positions(cc->zone); in __compact_finished()
2288 cc->zone->compact_blockskip_flush = true; in __compact_finished()
2300 pgdat = cc->zone->zone_pgdat; in __compact_finished()
2304 score = fragmentation_score_zone(cc->zone); in __compact_finished()
2333 if (__zone_watermark_ok(cc->zone, cc->order, in __compact_finished()
2334 high_wmark_pages(cc->zone), in __compact_finished()
2336 zone_page_state(cc->zone, in __compact_finished()
2346 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished()
2386 trace_mm_compaction_finished(cc->zone, cc->order, ret); in compact_finished()
2393 static bool __compaction_suitable(struct zone *zone, int order, in __compaction_suitable() argument
2415 watermark += low_wmark_pages(zone) - min_wmark_pages(zone); in __compaction_suitable()
2416 return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx, in __compaction_suitable()
2421 * compaction_suitable: Is this suitable to run compaction on this zone now?
2423 bool compaction_suitable(struct zone *zone, int order, unsigned long watermark, in compaction_suitable() argument
2429 suitable = __compaction_suitable(zone, order, watermark, highest_zoneidx, in compaction_suitable()
2430 zone_page_state(zone, NR_FREE_PAGES)); in compaction_suitable()
2450 int fragindex = fragmentation_index(zone, order); in compaction_suitable()
2462 trace_mm_compaction_suitable(zone, order, compact_result); in compaction_suitable()
2471 struct zone *zone; in compaction_zonelist_suitable() local
2475 * Make sure at least one zone would pass __compaction_suitable if we continue in compaction_zonelist_suitable()
2478 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in compaction_zonelist_suitable()
2488 available = zone_reclaimable_pages(zone) / order; in compaction_zonelist_suitable()
2489 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in compaction_zonelist_suitable()
2490 if (__compaction_suitable(zone, order, min_wmark_pages(zone), in compaction_zonelist_suitable()
2506 compaction_suit_allocation_order(struct zone *zone, unsigned int order, in compaction_suit_allocation_order() argument
2514 free_pages = zone_page_state(zone, NR_FREE_PAGES_BLOCKS); in compaction_suit_allocation_order()
2516 free_pages = zone_page_state(zone, NR_FREE_PAGES); in compaction_suit_allocation_order()
2518 watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in compaction_suit_allocation_order()
2519 if (__zone_watermark_ok(zone, order, watermark, highest_zoneidx, in compaction_suit_allocation_order()
2534 if (!__zone_watermark_ok(zone, 0, watermark + compact_gap(order), in compaction_suit_allocation_order()
2536 zone_page_state(zone, NR_FREE_PAGES))) in compaction_suit_allocation_order()
2540 if (!compaction_suitable(zone, order, watermark, highest_zoneidx)) in compaction_suit_allocation_order()
2550 unsigned long start_pfn = cc->zone->zone_start_pfn; in compact_zone()
2551 unsigned long end_pfn = zone_end_pfn(cc->zone); in compact_zone()
2559 * These counters track activities during zone compaction. Initialize in compact_zone()
2560 * them before compacting a new zone. in compact_zone()
2573 ret = compaction_suit_allocation_order(cc->zone, cc->order, in compact_zone()
2586 if (compaction_restarting(cc->zone, cc->order)) in compact_zone()
2587 __reset_isolation_suitable(cc->zone); in compact_zone()
2590 * Setup to move all movable pages to the end of the zone. Used cached in compact_zone()
2592 * want to compact the whole zone), but check that it is initialised in compact_zone()
2593 * by ensuring the values are within zone boundaries. in compact_zone()
2600 cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; in compact_zone()
2601 cc->free_pfn = cc->zone->compact_cached_free_pfn; in compact_zone()
2604 cc->zone->compact_cached_free_pfn = cc->free_pfn; in compact_zone()
2608 cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; in compact_zone()
2609 cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; in compact_zone()
2612 if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn) in compact_zone()
2627 cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1]; in compact_zone()
2661 cc->zone->compact_cached_migrate_pfn[1] = in compact_zone()
2662 cc->zone->compact_cached_migrate_pfn[0]; in compact_zone()
2673 last_migrated_pfn = max(cc->zone->zone_start_pfn, in compact_zone()
2748 lru_add_drain_cpu_zone(cc->zone); in compact_zone()
2769 * already reset to zone end in compact_finished() in compact_zone()
2771 if (free_pfn > cc->zone->compact_cached_free_pfn) in compact_zone()
2772 cc->zone->compact_cached_free_pfn = free_pfn; in compact_zone()
2785 static enum compact_result compact_zone_order(struct zone *zone, int order, in compact_zone_order() argument
2795 .zone = zone, in compact_zone_order()
2855 struct zone *zone; in try_to_compact_pages() local
2863 /* Compact each zone in the list */ in try_to_compact_pages()
2864 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in try_to_compact_pages()
2870 !__cpuset_zone_allowed(zone, gfp_mask)) in try_to_compact_pages()
2874 && compaction_deferred(zone, order)) { in try_to_compact_pages()
2879 status = compact_zone_order(zone, order, gfp_mask, prio, in try_to_compact_pages()
2886 * We think the allocation will succeed in this zone, in try_to_compact_pages()
2889 * succeeds in this zone. in try_to_compact_pages()
2891 compaction_defer_reset(zone, order, false); in try_to_compact_pages()
2899 * We think that allocation won't succeed in this zone in try_to_compact_pages()
2903 defer_compaction(zone, order); in try_to_compact_pages()
2923 * For proactive compaction, compact till each zone's fragmentation score
2927 * contention on per-node or per-zone locks.
2932 struct zone *zone; in compact_node() local
2943 zone = &pgdat->node_zones[zoneid]; in compact_node()
2944 if (!populated_zone(zone)) in compact_node()
2950 cc.zone = zone; in compact_node()
3068 struct zone *zone; in kcompactd_node_suitable() local
3075 zone = &pgdat->node_zones[zoneid]; in kcompactd_node_suitable()
3077 if (!populated_zone(zone)) in kcompactd_node_suitable()
3080 ret = compaction_suit_allocation_order(zone, in kcompactd_node_suitable()
3098 struct zone *zone; in kcompactd_do_work() local
3117 zone = &pgdat->node_zones[zoneid]; in kcompactd_do_work()
3118 if (!populated_zone(zone)) in kcompactd_do_work()
3121 if (compaction_deferred(zone, cc.order)) in kcompactd_do_work()
3124 ret = compaction_suit_allocation_order(zone, in kcompactd_do_work()
3133 cc.zone = zone; in kcompactd_do_work()
3137 compaction_defer_reset(zone, cc.order, false); in kcompactd_do_work()
3141 * otherwise coalesce on the zone's free area for in kcompactd_do_work()
3145 drain_all_pages(zone); in kcompactd_do_work()
3151 defer_compaction(zone, cc.order); in kcompactd_do_work()
3233 * as the condition of the zone changing substantionally in kcompactd()