Lines Matching refs:order

51 static inline bool is_via_compact_memory(int order)  in is_via_compact_memory()  argument
53 return order == -1; in is_via_compact_memory()
59 static inline bool is_via_compact_memory(int order) { return false; } in is_via_compact_memory() argument
67 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order)) argument
68 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument
83 static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags) in mark_allocated_noprof() argument
85 post_alloc_hook(page, order, __GFP_MOVABLE); in mark_allocated_noprof()
93 int order; in release_free_list() local
96 for (order = 0; order < NR_PAGE_ORDERS; order++) { in release_free_list()
99 list_for_each_entry_safe(page, next, &freepages[order], lru) { in release_free_list()
107 mark_allocated(page, order, __GFP_MOVABLE); in release_free_list()
108 __free_pages(page, order); in release_free_list()
126 static void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
131 if (order < zone->compact_order_failed) in defer_compaction()
132 zone->compact_order_failed = order; in defer_compaction()
137 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
141 static bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
145 if (order < zone->compact_order_failed) in compaction_deferred()
154 trace_mm_compaction_deferred(zone, order); in compaction_deferred()
164 void compaction_defer_reset(struct zone *zone, int order, in compaction_defer_reset() argument
171 if (order >= zone->compact_order_failed) in compaction_defer_reset()
172 zone->compact_order_failed = order + 1; in compaction_defer_reset()
174 trace_mm_compaction_defer_reset(zone, order); in compaction_defer_reset()
178 static bool compaction_restarting(struct zone *zone, int order) in compaction_restarting() argument
180 if (order < zone->compact_order_failed) in compaction_restarting()
568 unsigned int order; in isolate_freepages_block() local
599 const unsigned int order = compound_order(page); in isolate_freepages_block() local
601 if ((order <= MAX_PAGE_ORDER) && in isolate_freepages_block()
602 (blockpfn + (1UL << order) <= end_pfn)) { in isolate_freepages_block()
603 blockpfn += (1UL << order) - 1; in isolate_freepages_block()
604 page += (1UL << order) - 1; in isolate_freepages_block()
605 nr_scanned += (1UL << order) - 1; in isolate_freepages_block()
625 order = buddy_order(page); in isolate_freepages_block()
626 isolated = __isolate_free_page(page, order); in isolate_freepages_block()
629 set_page_private(page, order); in isolate_freepages_block()
634 list_add_tail(&page->lru, &freelist[order]); in isolate_freepages_block()
699 int order; in isolate_freepages_range() local
701 for (order = 0; order < NR_PAGE_ORDERS; order++) in isolate_freepages_range()
702 INIT_LIST_HEAD(&cc->freepages[order]); in isolate_freepages_range()
801 static bool skip_isolation_on_order(int order, int target_order) in skip_isolation_on_order() argument
810 if (!is_via_compact_memory(target_order) && order >= target_order) in skip_isolation_on_order()
816 return order >= pageblock_order; in skip_isolation_on_order()
880 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
906 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
951 const unsigned int order = compound_order(page); in isolate_migratepages_block() local
959 if (order <= MAX_PAGE_ORDER) { in isolate_migratepages_block()
960 low_pfn += (1UL << order) - 1; in isolate_migratepages_block()
961 nr_scanned += (1UL << order) - 1; in isolate_migratepages_block()
982 low_pfn += (1UL << order) - 1; in isolate_migratepages_block()
983 nr_scanned += (1UL << order) - 1; in isolate_migratepages_block()
1034 const unsigned int order = compound_order(page); in isolate_migratepages_block() local
1037 if (skip_isolation_on_order(order, cc->order)) { in isolate_migratepages_block()
1038 if (order <= MAX_PAGE_ORDER) { in isolate_migratepages_block()
1039 low_pfn += (1UL << order) - 1; in isolate_migratepages_block()
1040 nr_scanned += (1UL << order) - 1; in isolate_migratepages_block()
1186 cc->order) && in isolate_migratepages_block()
1259 next_skip_pfn += 1UL << cc->order; in isolate_migratepages_block()
1384 int order = cc->order > 0 ? cc->order : pageblock_order; in suitable_migration_target() local
1391 if (buddy_order_unsafe(page) >= order) in suitable_migration_target()
1487 static int next_search_order(struct compact_control *cc, int order) in next_search_order() argument
1489 order--; in next_search_order()
1490 if (order < 0) in next_search_order()
1491 order = cc->order - 1; in next_search_order()
1494 if (order == cc->search_order) { in next_search_order()
1497 cc->search_order = cc->order - 1; in next_search_order()
1501 return order; in next_search_order()
1513 int order; in fast_isolate_freepages() local
1516 if (cc->order <= 0) in fast_isolate_freepages()
1543 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); in fast_isolate_freepages()
1545 for (order = cc->search_order; in fast_isolate_freepages()
1546 !page && order >= 0; in fast_isolate_freepages()
1547 order = next_search_order(cc, order)) { in fast_isolate_freepages()
1548 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages()
1573 cc->search_order = order; in fast_isolate_freepages()
1602 if (__isolate_free_page(page, order)) { in fast_isolate_freepages()
1603 set_page_private(page, order); in fast_isolate_freepages()
1604 nr_isolated = 1 << order; in fast_isolate_freepages()
1608 list_add_tail(&page->lru, &cc->freepages[order]); in fast_isolate_freepages()
1612 order = cc->search_order + 1; in fast_isolate_freepages()
1801 int order = folio_order(src); in compaction_alloc_noprof() local
1808 for (start_order = order; start_order < NR_PAGE_ORDERS; start_order++) in compaction_alloc_noprof()
1827 while (start_order > order) { in compaction_alloc_noprof()
1836 post_alloc_hook(&dst->page, order, __GFP_MOVABLE); in compaction_alloc_noprof()
1838 if (order) in compaction_alloc_noprof()
1839 prep_compound_page(&dst->page, order); in compaction_alloc_noprof()
1840 cc->nr_freepages -= 1 << order; in compaction_alloc_noprof()
1841 cc->nr_migratepages -= 1 << order; in compaction_alloc_noprof()
1858 int order = folio_order(dst); in compaction_free() local
1862 free_pages_prepare(page, order); in compaction_free()
1863 list_add(&dst->lru, &cc->freepages[order]); in compaction_free()
1864 cc->nr_freepages += 1 << order; in compaction_free()
1866 cc->nr_migratepages += 1 << order; in compaction_free()
1930 int order; in fast_find_migrateblock() local
1957 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) in fast_find_migrateblock()
1980 for (order = cc->order - 1; in fast_find_migrateblock()
1981 order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; in fast_find_migrateblock()
1982 order--) { in fast_find_migrateblock()
1983 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock()
2236 unsigned int order; in __compact_finished() local
2279 if (is_via_compact_memory(cc->order)) in __compact_finished()
2297 if (__zone_watermark_ok(cc->zone, cc->order, in __compact_finished()
2309 for (order = cc->order; order < NR_PAGE_ORDERS; order++) { in __compact_finished()
2310 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished()
2326 if (find_suitable_fallback(area, order, migratetype, true) >= 0) in __compact_finished()
2350 trace_mm_compaction_finished(cc->zone, cc->order, ret); in compact_finished()
2357 static bool __compaction_suitable(struct zone *zone, int order, in __compaction_suitable() argument
2377 watermark += compact_gap(order); in __compaction_suitable()
2378 if (order > PAGE_ALLOC_COSTLY_ORDER) in __compaction_suitable()
2387 bool compaction_suitable(struct zone *zone, int order, unsigned long watermark, in compaction_suitable() argument
2393 suitable = __compaction_suitable(zone, order, watermark, highest_zoneidx, in compaction_suitable()
2413 if (order > PAGE_ALLOC_COSTLY_ORDER) { in compaction_suitable()
2414 int fragindex = fragmentation_index(zone, order); in compaction_suitable()
2426 trace_mm_compaction_suitable(zone, order, compact_result); in compaction_suitable()
2432 bool compaction_zonelist_suitable(struct alloc_context *ac, int order, in compaction_zonelist_suitable() argument
2452 available = zone_reclaimable_pages(zone) / order; in compaction_zonelist_suitable()
2454 if (__compaction_suitable(zone, order, min_wmark_pages(zone), in compaction_zonelist_suitable()
2470 compaction_suit_allocation_order(struct zone *zone, unsigned int order, in compaction_suit_allocation_order() argument
2483 if (__zone_watermark_ok(zone, order, watermark, highest_zoneidx, in compaction_suit_allocation_order()
2496 if (order > PAGE_ALLOC_COSTLY_ORDER && async && in compaction_suit_allocation_order()
2498 if (!__zone_watermark_ok(zone, 0, watermark + compact_gap(order), in compaction_suit_allocation_order()
2504 if (!compaction_suitable(zone, order, watermark, highest_zoneidx)) in compaction_suit_allocation_order()
2520 int order; in compact_zone() local
2530 for (order = 0; order < NR_PAGE_ORDERS; order++) in compact_zone()
2531 INIT_LIST_HEAD(&cc->freepages[order]); in compact_zone()
2536 if (!is_via_compact_memory(cc->order)) { in compact_zone()
2537 ret = compaction_suit_allocation_order(cc->zone, cc->order, in compact_zone()
2550 if (compaction_restarting(cc->zone, cc->order)) in compact_zone()
2686 if (cc->order == COMPACTION_HPAGE_ORDER) in compact_zone()
2707 if (cc->order > 0 && last_migrated_pfn) { in compact_zone()
2709 block_start_pfn(cc->migrate_pfn, cc->order); in compact_zone()
2749 static enum compact_result compact_zone_order(struct zone *zone, int order, in compact_zone_order() argument
2756 .order = order, in compact_zone_order()
2757 .search_order = order, in compact_zone_order()
2814 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, in try_to_compact_pages() argument
2825 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); in try_to_compact_pages()
2838 && compaction_deferred(zone, order)) { in try_to_compact_pages()
2843 status = compact_zone_order(zone, order, gfp_mask, prio, in try_to_compact_pages()
2855 compaction_defer_reset(zone, order, false); in try_to_compact_pages()
2867 defer_compaction(zone, order); in try_to_compact_pages()
2898 .order = -1, in compact_node()
3064 .order = pgdat->kcompactd_max_order, in kcompactd_do_work()
3074 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, in kcompactd_do_work()
3085 if (compaction_deferred(zone, cc.order)) in kcompactd_do_work()
3089 cc.order, zoneid, cc.alloc_flags, in kcompactd_do_work()
3101 compaction_defer_reset(zone, cc.order, false); in kcompactd_do_work()
3115 defer_compaction(zone, cc.order); in kcompactd_do_work()
3129 if (pgdat->kcompactd_max_order <= cc.order) in kcompactd_do_work()
3135 void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) in wakeup_kcompactd() argument
3137 if (!order) in wakeup_kcompactd()
3140 if (pgdat->kcompactd_max_order < order) in wakeup_kcompactd()
3141 pgdat->kcompactd_max_order = order; in wakeup_kcompactd()
3156 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, in wakeup_kcompactd()