Lines Matching defs:order

46  * order == -1 is expected when compacting proactively via
51 static inline bool is_via_compact_memory(int order)
53 return order == -1;
59 static inline bool is_via_compact_memory(int order) { return false; }
67 #define block_start_pfn(pfn, order) round_down(pfn, 1UL << (order))
68 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order))
71 * Page order with-respect-to which proactive compaction
83 static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags)
85 post_alloc_hook(page, order, __GFP_MOVABLE);
92 int order;
95 for (order = 0; order < NR_PAGE_ORDERS; order++) {
98 list_for_each_entry_safe(page, next, &freepages[order], lru) {
106 mark_allocated(page, order, __GFP_MOVABLE);
107 __free_pages(page, order);
158 static void defer_compaction(struct zone *zone, int order)
163 if (order < zone->compact_order_failed)
164 zone->compact_order_failed = order;
169 trace_mm_compaction_defer_compaction(zone, order);
173 static bool compaction_deferred(struct zone *zone, int order)
177 if (order < zone->compact_order_failed)
186 trace_mm_compaction_deferred(zone, order);
192 * Update defer tracking counters after successful compaction of given order,
196 void compaction_defer_reset(struct zone *zone, int order,
203 if (order >= zone->compact_order_failed)
204 zone->compact_order_failed = order + 1;
206 trace_mm_compaction_defer_reset(zone, order);
210 static bool compaction_restarting(struct zone *zone, int order)
212 if (order < zone->compact_order_failed)
291 * released. It is always pointless to compact pages of such order (if they are
600 unsigned int order;
631 const unsigned int order = compound_order(page);
633 if (blockpfn + (1UL << order) <= end_pfn) {
634 blockpfn += (1UL << order) - 1;
635 page += (1UL << order) - 1;
636 nr_scanned += (1UL << order) - 1;
655 /* Found a free page, will break it into order-0 pages */
656 order = buddy_order(page);
657 isolated = __isolate_free_page(page, order);
660 set_page_private(page, order);
665 list_add_tail(&page->lru, &freelist[order]);
730 int order;
732 for (order = 0; order < NR_PAGE_ORDERS; order++)
733 INIT_LIST_HEAD(&cc->freepages[order]);
749 * is more than pageblock order. In this case, we adjust
776 * pageblock_nr_pages for some non-negative n. (Max order
826 * folio order and compaction target order
827 * @order: to-be-isolated folio order
828 * @target_order: compaction target order
832 static bool skip_isolation_on_order(int order, int target_order)
837 * target order: we wouldn't be here if we'd have a free folio with
841 if (!is_via_compact_memory(target_order) && order >= target_order)
847 return order >= pageblock_order;
911 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
921 * previous order-aligned block, and did not skip it due
929 * We failed to isolate in the previous order-aligned
932 * next_skip_pfn by 1 << order, as low_pfn might have
934 * a compound or a high-order buddy page in the
937 next_skip_pfn = block_end_pfn(low_pfn, cc->order);
984 * bigger than its order. THPs and other compound pages
988 const unsigned int order = compound_order(page);
990 if (order <= MAX_PAGE_ORDER) {
991 low_pfn += (1UL << order) - 1;
992 nr_scanned += (1UL << order) - 1;
1036 * Skip if free. We read page order here without zone lock
1046 * a valid page order. Consider only values in the
1047 * valid order range to prevent low_pfn overflow.
1065 const unsigned int order = compound_order(page);
1067 /* Skip based on page order and compaction target order. */
1068 if (skip_isolation_on_order(order, cc->order)) {
1069 if (order <= MAX_PAGE_ORDER) {
1070 low_pfn += (1UL << order) - 1;
1071 nr_scanned += (1UL << order) - 1;
1217 * Check LRU folio order under the lock
1220 cc->order) &&
1274 * instead of migrating, as we cannot form the cc->order buddy
1293 next_skip_pfn += 1UL << cc->order;
1418 int order = cc->order > 0 ? cc->order : pageblock_order;
1423 * pageblock, so it's not worth to check order for valid range.
1425 if (buddy_order_unsafe(page) >= order)
1521 static int next_search_order(struct compact_control *cc, int order)
1523 order--;
1524 if (order < 0)
1525 order = cc->order - 1;
1528 if (order == cc->search_order) {
1531 cc->search_order = cc->order - 1;
1535 return order;
1547 int order;
1549 /* Full compaction passes in a negative order */
1550 if (cc->order <= 0)
1574 * Search starts from the last successful isolation order or the next
1575 * order to search after a previous failure
1577 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
1579 for (order = cc->search_order;
1580 !page && order >= 0;
1581 order = next_search_order(cc, order)) {
1582 struct free_area *area = &cc->zone->free_area[order];
1607 cc->search_order = order;
1636 if (__isolate_free_page(page, order)) {
1637 set_page_private(page, order);
1638 nr_isolated = 1 << order;
1642 list_add_tail(&page->lru, &cc->freepages[order]);
1646 order = cc->search_order + 1;
1658 * Smaller scan on next order so the total scan is related
1835 int order = folio_order(src);
1842 for (start_order = order; start_order < NR_PAGE_ORDERS; start_order++)
1861 while (start_order > order) {
1870 post_alloc_hook(&dst->page, order, __GFP_MOVABLE);
1871 if (order)
1872 prep_compound_page(&dst->page, order);
1873 cc->nr_freepages -= 1 << order;
1874 cc->nr_migratepages -= 1 << order;
1891 int order = folio_order(dst);
1895 free_pages_prepare(page, order);
1896 list_add(&dst->lru, &cc->freepages[order]);
1897 cc->nr_freepages += 1 << order;
1899 cc->nr_migratepages += 1 << order;
1963 int order;
1990 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
2013 for (order = cc->order - 1;
2014 order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit;
2015 order--) {
2016 struct free_area *area = &cc->zone->free_area[order];
2273 unsigned int order;
2316 if (is_via_compact_memory(cc->order))
2330 for (order = cc->order; order < NR_PAGE_ORDERS; order++) {
2331 struct free_area *area = &cc->zone->free_area[order];
2348 if (find_suitable_fallback(area, order, migratetype,
2373 trace_mm_compaction_finished(cc->zone, cc->order, ret);
2380 static bool __compaction_suitable(struct zone *zone, int order,
2386 * Watermarks for order-0 must be met for compaction to be able to
2399 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
2401 watermark += compact_gap(order);
2409 bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx)
2414 suitable = __compaction_suitable(zone, order, highest_zoneidx,
2421 * watermarks, but we already failed the high-order watermark check
2434 if (order > PAGE_ALLOC_COSTLY_ORDER) {
2435 int fragindex = fragmentation_index(zone, order);
2447 trace_mm_compaction_suitable(zone, order, compact_result);
2452 bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
2468 * want to trash just for a single high order allocation which
2472 available = zone_reclaimable_pages(zone) / order;
2474 if (__compaction_suitable(zone, order, ac->highest_zoneidx,
2483 * Should we do compaction for target allocation order.
2484 * Return COMPACT_SUCCESS if allocation for target order can be already
2486 * Return COMPACT_SKIPPED if compaction for target order is likely to fail
2487 * Return COMPACT_CONTINUE if compaction for target order should be ran
2490 compaction_suit_allocation_order(struct zone *zone, unsigned int order,
2496 if (zone_watermark_ok(zone, order, watermark, highest_zoneidx,
2500 if (!compaction_suitable(zone, order, highest_zoneidx))
2516 int order;
2526 for (order = 0; order < NR_PAGE_ORDERS; order++)
2527 INIT_LIST_HEAD(&cc->freepages[order]);
2532 if (!is_via_compact_memory(cc->order)) {
2533 ret = compaction_suit_allocation_order(cc->zone, cc->order,
2544 if (compaction_restarting(cc->zone, cc->order))
2626 * previous cc->order aligned block.
2680 if (cc->order == COMPACTION_HPAGE_ORDER)
2696 * cc->order aligned block where we migrated from? If yes,
2701 if (cc->order > 0 && last_migrated_pfn) {
2703 block_start_pfn(cc->migrate_pfn, cc->order);
2743 static enum compact_result compact_zone_order(struct zone *zone, int order,
2750 .order = order,
2751 .search_order = order,
2798 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
2800 * @order: The order of the current allocation
2808 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
2819 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
2832 && compaction_deferred(zone, order)) {
2837 status = compact_zone_order(zone, order, gfp_mask, prio,
2849 compaction_defer_reset(zone, order, false);
2861 defer_compaction(zone, order);
2892 .order = -1,
3050 * order is allocatable.
3055 .order = pgdat->kcompactd_max_order,
3064 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
3075 if (compaction_deferred(zone, cc.order))
3079 cc.order, zoneid, ALLOC_WMARK_MIN);
3090 compaction_defer_reset(zone, cc.order, false);
3095 * order >= cc.order. This is ratelimited by the
3104 defer_compaction(zone, cc.order);
3115 * the requested order/highest_zoneidx in case it was higher/tighter
3118 if (pgdat->kcompactd_max_order <= cc.order)
3124 void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx)
3126 if (!order)
3129 if (pgdat->kcompactd_max_order < order)
3130 pgdat->kcompactd_max_order = order;
3145 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,