Lines Matching refs:order
218 static void __free_pages_ok(struct page *page, unsigned int order,
293 static bool page_contains_unaccepted(struct page *page, unsigned int order);
294 static bool cond_accept_memory(struct zone *zone, unsigned int order,
320 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
322 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
330 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
658 static inline unsigned int order_to_pindex(int migratetype, int order) in order_to_pindex() argument
663 if (order > PAGE_ALLOC_COSTLY_ORDER) { in order_to_pindex()
664 VM_BUG_ON(order != HPAGE_PMD_ORDER); in order_to_pindex()
671 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); in order_to_pindex()
674 return (MIGRATE_PCPTYPES * order) + migratetype; in order_to_pindex()
679 int order = pindex / MIGRATE_PCPTYPES; in pindex_to_order() local
683 order = HPAGE_PMD_ORDER; in pindex_to_order()
685 VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER); in pindex_to_order()
688 return order; in pindex_to_order()
691 static inline bool pcp_allowed_order(unsigned int order) in pcp_allowed_order() argument
693 if (order <= PAGE_ALLOC_COSTLY_ORDER) in pcp_allowed_order()
696 if (order == HPAGE_PMD_ORDER) in pcp_allowed_order()
714 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
717 int nr_pages = 1 << order; in prep_compound_page()
723 prep_compound_head(page, order); in prep_compound_page()
726 static inline void set_buddy_order(struct page *page, unsigned int order) in set_buddy_order() argument
728 set_page_private(page, order); in set_buddy_order()
745 int order, int migratetype) in compaction_capture() argument
747 if (!capc || order != capc->cc->order) in compaction_capture()
762 if (order < pageblock_order && migratetype == MIGRATE_MOVABLE && in compaction_capture()
767 trace_mm_page_alloc_extfrag(page, capc->cc->order, order, in compaction_capture()
782 int order, int migratetype) in compaction_capture() argument
807 unsigned int order, int migratetype, in __add_to_free_list() argument
810 struct free_area *area = &zone->free_area[order]; in __add_to_free_list()
811 int nr_pages = 1 << order; in __add_to_free_list()
823 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) in __add_to_free_list()
833 unsigned int order, int old_mt, int new_mt) in move_to_free_list() argument
835 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
836 int nr_pages = 1 << order; in move_to_free_list()
848 if (order >= pageblock_order && in move_to_free_list()
857 unsigned int order, int migratetype) in __del_page_from_free_list() argument
859 int nr_pages = 1 << order; in __del_page_from_free_list()
872 zone->free_area[order].nr_free--; in __del_page_from_free_list()
874 if (order >= pageblock_order && !is_migrate_isolate(migratetype)) in __del_page_from_free_list()
879 unsigned int order, int migratetype) in del_page_from_free_list() argument
881 __del_page_from_free_list(page, zone, order, migratetype); in del_page_from_free_list()
882 account_freepages(zone, -(1 << order), migratetype); in del_page_from_free_list()
902 struct page *page, unsigned int order) in buddy_merge_likely() argument
907 if (order >= MAX_PAGE_ORDER - 1) in buddy_merge_likely()
913 return find_buddy_page_pfn(higher_page, higher_page_pfn, order + 1, in buddy_merge_likely()
954 struct zone *zone, unsigned int order, in __free_one_page() argument
967 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); in __free_one_page()
970 account_freepages(zone, 1 << order, migratetype); in __free_one_page()
972 while (order < MAX_PAGE_ORDER) { in __free_one_page()
975 if (compaction_capture(capc, page, order, migratetype)) { in __free_one_page()
976 account_freepages(zone, -(1 << order), migratetype); in __free_one_page()
980 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); in __free_one_page()
984 if (unlikely(order >= pageblock_order)) { in __free_one_page()
1004 clear_page_guard(zone, buddy, order); in __free_one_page()
1006 __del_page_from_free_list(buddy, zone, order, buddy_mt); in __free_one_page()
1014 change_pageblock_range(buddy, order, migratetype); in __free_one_page()
1020 order++; in __free_one_page()
1024 set_buddy_order(page, order); in __free_one_page()
1028 else if (is_shuffle_order(order)) in __free_one_page()
1031 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); in __free_one_page()
1033 __add_to_free_list(page, zone, order, migratetype, to_tail); in __free_one_page()
1037 page_reporting_notify_free(order); in __free_one_page()
1317 unsigned int order) in free_pages_prepare() argument
1327 trace_mm_page_free(page, order); in free_pages_prepare()
1328 kmsan_free_page(page, order); in free_pages_prepare()
1331 __memcg_kmem_uncharge_page(page, order); in free_pages_prepare()
1347 if (unlikely(PageHWPoison(page)) && !order) { in free_pages_prepare()
1349 reset_page_owner(page, order); in free_pages_prepare()
1350 page_table_check_free(page, order); in free_pages_prepare()
1351 pgalloc_tag_sub(page, 1 << order); in free_pages_prepare()
1362 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); in free_pages_prepare()
1368 if (unlikely(order)) { in free_pages_prepare()
1377 for (i = 1; i < (1 << order); i++) { in free_pages_prepare()
1390 mod_mthp_stat(order, MTHP_STAT_NR_ANON, -1); in free_pages_prepare()
1406 reset_page_owner(page, order); in free_pages_prepare()
1407 page_table_check_free(page, order); in free_pages_prepare()
1408 pgalloc_tag_sub(page, 1 << order); in free_pages_prepare()
1412 PAGE_SIZE << order); in free_pages_prepare()
1414 PAGE_SIZE << order); in free_pages_prepare()
1417 kernel_poison_pages(page, 1 << order); in free_pages_prepare()
1428 kasan_poison_pages(page, order, init); in free_pages_prepare()
1435 kernel_init_pages(page, 1 << order); in free_pages_prepare()
1442 arch_free_page(page, order); in free_pages_prepare()
1444 debug_pagealloc_unmap_pages(page, 1 << order); in free_pages_prepare()
1459 unsigned int order; in free_pcppages_bulk() local
1484 order = pindex_to_order(pindex); in free_pcppages_bulk()
1485 nr_pages = 1 << order; in free_pcppages_bulk()
1499 __free_one_page(page, pfn, zone, order, mt, FPI_NONE); in free_pcppages_bulk()
1500 trace_mm_page_pcpu_drain(page, order, mt); in free_pcppages_bulk()
1509 unsigned long pfn, int order, fpi_t fpi) in split_large_buddy() argument
1511 unsigned long end = pfn + (1 << order); in split_large_buddy()
1513 VM_WARN_ON_ONCE(!IS_ALIGNED(pfn, 1 << order)); in split_large_buddy()
1517 if (order > pageblock_order) in split_large_buddy()
1518 order = pageblock_order; in split_large_buddy()
1523 __free_one_page(page, pfn, zone, order, mt, fpi); in split_large_buddy()
1524 pfn += 1 << order; in split_large_buddy()
1532 unsigned int order) in add_page_to_zone_llist() argument
1535 page->private = order; in add_page_to_zone_llist()
1541 unsigned long pfn, unsigned int order, in free_one_page() argument
1549 add_page_to_zone_llist(zone, page, order); in free_one_page()
1570 split_large_buddy(zone, page, pfn, order, fpi_flags); in free_one_page()
1573 __count_vm_events(PGFREE, 1 << order); in free_one_page()
1576 static void __free_pages_ok(struct page *page, unsigned int order, in __free_pages_ok() argument
1582 if (free_pages_prepare(page, order)) in __free_pages_ok()
1583 free_one_page(zone, page, pfn, order, fpi_flags); in __free_pages_ok()
1586 void __meminit __free_pages_core(struct page *page, unsigned int order, in __free_pages_core() argument
1589 unsigned int nr_pages = 1 << order; in __free_pages_core()
1620 if (page_contains_unaccepted(page, order)) { in __free_pages_core()
1621 if (order == MAX_PAGE_ORDER && __free_unaccepted(page)) in __free_pages_core()
1624 accept_memory(page_to_phys(page), PAGE_SIZE << order); in __free_pages_core()
1631 __free_pages_ok(page, order, FPI_TO_TAIL); in __free_pages_core()
1765 static inline bool check_new_pages(struct page *page, unsigned int order) in check_new_pages() argument
1768 for (int i = 0; i < (1 << order); i++) { in check_new_pages()
1807 inline void post_alloc_hook(struct page *page, unsigned int order, in post_alloc_hook() argument
1817 arch_alloc_page(page, order); in post_alloc_hook()
1818 debug_pagealloc_map_pages(page, 1 << order); in post_alloc_hook()
1825 kernel_unpoison_pages(page, 1 << order); in post_alloc_hook()
1838 init = !tag_clear_highpages(page, 1 << order); in post_alloc_hook()
1841 kasan_unpoison_pages(page, order, init)) { in post_alloc_hook()
1850 for (i = 0; i != 1 << order; ++i) in post_alloc_hook()
1855 kernel_init_pages(page, 1 << order); in post_alloc_hook()
1857 set_page_owner(page, order, gfp_flags); in post_alloc_hook()
1858 page_table_check_alloc(page, order); in post_alloc_hook()
1859 pgalloc_tag_add(page, current, 1 << order); in post_alloc_hook()
1862 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page() argument
1865 post_alloc_hook(page, order, gfp_flags); in prep_new_page()
1867 if (order && (gfp_flags & __GFP_COMP)) in prep_new_page()
1868 prep_compound_page(page, order); in prep_new_page()
1887 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
1895 for (current_order = order; current_order < NR_PAGE_ORDERS; ++current_order) { in __rmqueue_smallest()
1901 page_del_and_expand(zone, page, order, current_order, in __rmqueue_smallest()
1903 trace_mm_page_alloc_zone_locked(page, order, migratetype, in __rmqueue_smallest()
1904 pcp_allowed_order(order) && in __rmqueue_smallest()
1927 unsigned int order) in __rmqueue_cma_fallback() argument
1929 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
1933 unsigned int order) { return NULL; } in __rmqueue_cma_fallback() argument
1945 unsigned int order; in __move_freepages_block() local
1962 order = buddy_order(page); in __move_freepages_block()
1964 move_to_free_list(page, zone, order, old_mt, new_mt); in __move_freepages_block()
1966 pfn += 1 << order; in __move_freepages_block()
1967 pages_moved += 1 << order; in __move_freepages_block()
2049 int order = start_pfn ? __ffs(start_pfn) : MAX_PAGE_ORDER; in find_large_buddy() local
2055 if (++order > MAX_PAGE_ORDER) in find_large_buddy()
2057 pfn &= ~0UL << order; in find_large_buddy()
2122 int order = buddy_order(buddy); in __move_freepages_block_isolate() local
2124 del_page_from_free_list(buddy, zone, order, in __move_freepages_block_isolate()
2127 split_large_buddy(zone, buddy, buddy_pfn, order, FPI_NONE); in __move_freepages_block_isolate()
2203 static bool should_try_claim_block(unsigned int order, int start_mt) in should_try_claim_block() argument
2212 if (order >= pageblock_order) in should_try_claim_block()
2219 if (order >= pageblock_order / 2) in should_try_claim_block()
2251 int find_suitable_fallback(struct free_area *area, unsigned int order, in find_suitable_fallback() argument
2256 if (claimable && !should_try_claim_block(order, migratetype)) in find_suitable_fallback()
2281 int current_order, int order, int start_type, in try_to_claim_block() argument
2293 nr_added = expand(zone, page, order, current_order, start_type); in try_to_claim_block()
2340 return __rmqueue_smallest(zone, order, start_type); in try_to_claim_block()
2355 __rmqueue_claim(struct zone *zone, int order, int start_migratetype, in __rmqueue_claim() argument
2360 int min_order = order; in __rmqueue_claim()
2369 if (order < pageblock_order && alloc_flags & ALLOC_NOFRAGMENT) in __rmqueue_claim()
2392 page = try_to_claim_block(zone, page, current_order, order, in __rmqueue_claim()
2396 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_claim()
2410 __rmqueue_steal(struct zone *zone, int order, int start_migratetype) in __rmqueue_steal() argument
2417 for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { in __rmqueue_steal()
2425 page_del_and_expand(zone, page, order, current_order, fallback_mt); in __rmqueue_steal()
2426 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_steal()
2446 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
2460 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2477 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2483 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2491 page = __rmqueue_claim(zone, order, migratetype, alloc_flags); in __rmqueue()
2500 page = __rmqueue_steal(zone, order, migratetype); in __rmqueue()
2515 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
2530 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk()
2826 unsigned int order, fpi_t fpi_flags, unsigned long *UP_flags) in free_frozen_page_commit() argument
2841 __count_vm_events(PGFREE, 1 << order); in free_frozen_page_commit()
2842 pindex = order_to_pindex(migratetype, order); in free_frozen_page_commit()
2844 pcp->count += 1 << order; in free_frozen_page_commit()
2853 if (order && order <= PAGE_ALLOC_COSTLY_ORDER) { in free_frozen_page_commit()
2863 pcp->free_count += (1 << order); in free_frozen_page_commit()
2929 static void __free_frozen_pages(struct page *page, unsigned int order, in __free_frozen_pages() argument
2938 if (!pcp_allowed_order(order)) { in __free_frozen_pages()
2939 __free_pages_ok(page, order, fpi_flags); in __free_frozen_pages()
2943 if (!free_pages_prepare(page, order)) in __free_frozen_pages()
2957 free_one_page(zone, page, pfn, order, fpi_flags); in __free_frozen_pages()
2965 add_page_to_zone_llist(zone, page, order); in __free_frozen_pages()
2971 order, fpi_flags, &UP_flags)) in __free_frozen_pages()
2975 free_one_page(zone, page, pfn, order, fpi_flags); in __free_frozen_pages()
2979 void free_frozen_pages(struct page *page, unsigned int order) in free_frozen_pages() argument
2981 __free_frozen_pages(page, order, FPI_NONE); in free_frozen_pages()
2998 unsigned int order = folio_order(folio); in free_unref_folios() local
3000 if (!free_pages_prepare(&folio->page, order)) in free_unref_folios()
3006 if (!pcp_allowed_order(order)) { in free_unref_folios()
3008 pfn, order, FPI_NONE); in free_unref_folios()
3011 folio->private = (void *)(unsigned long)order; in free_unref_folios()
3022 unsigned int order = (unsigned long)folio->private; in free_unref_folios() local
3043 order, FPI_NONE); in free_unref_folios()
3054 order, FPI_NONE); in free_unref_folios()
3069 migratetype, order, FPI_NONE, &UP_flags)) { in free_unref_folios()
3088 void split_page(struct page *page, unsigned int order) in split_page() argument
3095 for (i = 1; i < (1 << order); i++) in split_page()
3097 split_page_owner(page, order, 0); in split_page()
3098 pgalloc_tag_split(page_folio(page), order, 0); in split_page()
3099 split_page_memcg(page, order); in split_page()
3103 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
3116 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
3121 del_page_from_free_list(page, zone, order, mt); in __isolate_free_page()
3127 if (order >= pageblock_order - 1) { in __isolate_free_page()
3128 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
3141 return 1UL << order; in __isolate_free_page()
3153 void __putback_isolated_page(struct page *page, unsigned int order, int mt) in __putback_isolated_page() argument
3161 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
3193 unsigned int order, unsigned int alloc_flags, in rmqueue_buddy() argument
3208 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue_buddy()
3212 page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm); in rmqueue_buddy()
3221 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue_buddy()
3229 } while (check_new_pages(page, order)); in rmqueue_buddy()
3231 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue_buddy()
3237 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order) in nr_pcp_alloc() argument
3251 if (order) in nr_pcp_alloc()
3263 if (!order) { in nr_pcp_alloc()
3282 batch = max(batch >> order, 2); in nr_pcp_alloc()
3289 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, in __rmqueue_pcplist() argument
3299 int batch = nr_pcp_alloc(pcp, zone, order); in __rmqueue_pcplist()
3302 alloced = rmqueue_bulk(zone, order, in __rmqueue_pcplist()
3306 pcp->count += alloced << order; in __rmqueue_pcplist()
3313 pcp->count -= 1 << order; in __rmqueue_pcplist()
3314 } while (check_new_pages(page, order)); in __rmqueue_pcplist()
3321 struct zone *zone, unsigned int order, in rmqueue_pcplist() argument
3340 list = &pcp->lists[order_to_pindex(migratetype, order)]; in rmqueue_pcplist()
3341 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); in rmqueue_pcplist()
3344 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue_pcplist()
3364 struct zone *zone, unsigned int order, in rmqueue() argument
3370 if (likely(pcp_allowed_order(order))) { in rmqueue()
3371 page = rmqueue_pcplist(preferred_zone, zone, order, in rmqueue()
3377 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, in rmqueue()
3397 static void reserve_highatomic_pageblock(struct page *page, int order, in reserve_highatomic_pageblock() argument
3427 if (order < pageblock_order) { in reserve_highatomic_pageblock()
3432 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); in reserve_highatomic_pageblock()
3433 zone->nr_reserved_highatomic += 1 << order; in reserve_highatomic_pageblock()
3457 int order; in unreserve_highatomic_pageblock() local
3471 for (order = 0; order < NR_PAGE_ORDERS; order++) { in unreserve_highatomic_pageblock()
3472 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
3479 size = max(pageblock_nr_pages, 1UL << order); in unreserve_highatomic_pageblock()
3500 if (order < pageblock_order) in unreserve_highatomic_pageblock()
3505 move_to_free_list(page, zone, order, in unreserve_highatomic_pageblock()
3508 change_pageblock_range(page, order, in unreserve_highatomic_pageblock()
3529 unsigned int order, unsigned int alloc_flags) in __zone_watermark_unusable_free() argument
3531 long unusable_free = (1 << order) - 1; in __zone_watermark_unusable_free()
3555 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok() argument
3563 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); in __zone_watermark_ok()
3603 if (!order) in __zone_watermark_ok()
3607 for (o = order; o < NR_PAGE_ORDERS; o++) { in __zone_watermark_ok()
3633 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok() argument
3636 return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, in zone_watermark_ok()
3640 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast() argument
3652 if (!order) { in zone_watermark_fast()
3665 if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags, in zone_watermark_fast()
3675 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost in zone_watermark_fast()
3678 return __zone_watermark_ok(z, order, mark, highest_zoneidx, in zone_watermark_fast()
3761 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
3846 cond_accept_memory(zone, order, alloc_flags); in get_page_from_freelist()
3859 if (zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3868 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3873 if (cond_accept_memory(zone, order, alloc_flags)) in get_page_from_freelist()
3881 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3893 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3903 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
3912 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, in get_page_from_freelist()
3915 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist()
3922 reserve_highatomic_pageblock(page, order, zone); in get_page_from_freelist()
3926 if (cond_accept_memory(zone, order, alloc_flags)) in get_page_from_freelist()
3931 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
4004 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, in __alloc_pages_cpuset_fallback() argument
4010 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4017 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4023 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
4031 .order = order, in __alloc_pages_may_oom()
4055 ~__GFP_DIRECT_RECLAIM, order, in __alloc_pages_may_oom()
4064 if (order > PAGE_ALLOC_COSTLY_ORDER) in __alloc_pages_may_oom()
4101 page = __alloc_pages_cpuset_fallback(gfp_mask, order, in __alloc_pages_may_oom()
4118 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
4126 if (!order) in __alloc_pages_direct_compact()
4133 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
4150 prep_new_page(page, order, gfp_mask, alloc_flags); in __alloc_pages_direct_compact()
4154 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
4160 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
4177 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, in should_compact_retry() argument
4188 if (!order) in should_compact_retry()
4199 ret = compaction_zonelist_suitable(ac, order, alloc_flags); in should_compact_retry()
4217 if (order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
4229 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? in should_compact_retry()
4238 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); in should_compact_retry()
4243 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
4252 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, in should_compact_retry() argument
4260 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
4364 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
4377 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
4390 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
4399 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
4404 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
4423 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, in wake_all_kswapds() argument
4433 reclaim_order = max(order, pageblock_order); in wake_all_kswapds()
4435 reclaim_order = order; in wake_all_kswapds()
4449 gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) in gfp_to_alloc_flags() argument
4478 if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE)) in gfp_to_alloc_flags()
4553 should_reclaim_retry(gfp_t gfp_mask, unsigned order, in should_reclaim_retry() argument
4566 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) in should_reclaim_retry()
4600 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
4602 trace_reclaim_retry_zone(z, order, reclaimable, in should_reclaim_retry()
4663 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
4669 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; in __alloc_pages_slowpath()
4708 alloc_flags = gfp_to_alloc_flags(gfp_mask, order); in __alloc_pages_slowpath()
4735 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4741 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4756 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) in __alloc_pages_slowpath()
4758 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
4811 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4830 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4843 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4849 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4866 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, in __alloc_pages_slowpath()
4877 should_compact_retry(ac, order, alloc_flags, in __alloc_pages_slowpath()
4897 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
4942 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); in __alloc_pages_slowpath()
4951 "page allocation failure: order:%u", order); in __alloc_pages_slowpath()
4956 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, in prepare_alloc_pages() argument
4985 should_fail_alloc_page(gfp_mask, order)) in prepare_alloc_pages()
5174 struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order, in __alloc_frozen_pages_noprof() argument
5186 if (WARN_ON_ONCE_GFP(order > MAX_PAGE_ORDER, gfp)) in __alloc_frozen_pages_noprof()
5199 if (!prepare_alloc_pages(gfp, order, preferred_nid, nodemask, &ac, in __alloc_frozen_pages_noprof()
5210 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); in __alloc_frozen_pages_noprof()
5223 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); in __alloc_frozen_pages_noprof()
5227 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { in __alloc_frozen_pages_noprof()
5228 free_frozen_pages(page, order); in __alloc_frozen_pages_noprof()
5232 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); in __alloc_frozen_pages_noprof()
5233 kmsan_alloc_page(page, order, alloc_gfp); in __alloc_frozen_pages_noprof()
5239 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, in __alloc_pages_noprof() argument
5244 page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask); in __alloc_pages_noprof()
5251 struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, in __folio_alloc_noprof() argument
5254 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order, in __folio_alloc_noprof()
5265 unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) in get_free_pages_noprof() argument
5269 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); in get_free_pages_noprof()
5282 static void ___free_pages(struct page *page, unsigned int order, in ___free_pages() argument
5291 __free_frozen_pages(page, order, fpi_flags); in ___free_pages()
5293 pgalloc_tag_sub_pages(tag, (1 << order) - 1); in ___free_pages()
5294 while (order-- > 0) { in ___free_pages()
5300 clear_page_tag_ref(page + (1 << order)); in ___free_pages()
5301 __free_frozen_pages(page + (1 << order), order, in ___free_pages()
5327 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
5329 ___free_pages(page, order, FPI_NONE); in __free_pages()
5337 void free_pages_nolock(struct page *page, unsigned int order) in free_pages_nolock() argument
5339 ___free_pages(page, order, FPI_TRYLOCK); in free_pages_nolock()
5351 void free_pages(unsigned long addr, unsigned int order) in free_pages() argument
5355 __free_pages(virt_to_page((void *)addr), order); in free_pages()
5361 static void *make_alloc_exact(unsigned long addr, unsigned int order, in make_alloc_exact() argument
5369 split_page_owner(page, order, 0); in make_alloc_exact()
5370 pgalloc_tag_split(page_folio(page), order, 0); in make_alloc_exact()
5371 split_page_memcg(page, order); in make_alloc_exact()
5375 last = page + (1UL << order); in make_alloc_exact()
5399 unsigned int order = get_order(size); in alloc_pages_exact_noprof() local
5405 addr = get_free_pages_noprof(gfp_mask, order); in alloc_pages_exact_noprof()
5406 return make_alloc_exact(addr, order, size); in alloc_pages_exact_noprof()
5424 unsigned int order = get_order(size); in alloc_pages_exact_nid_noprof() local
5430 p = alloc_pages_node_noprof(nid, gfp_mask, order); in alloc_pages_exact_nid_noprof()
5433 return make_alloc_exact((unsigned long)page_address(p), order, size); in alloc_pages_exact_nid_noprof()
6857 int order; in split_free_pages() local
6859 for (order = 0; order < NR_PAGE_ORDERS; order++) { in split_free_pages()
6861 int nr_pages = 1 << order; in split_free_pages()
6863 list_for_each_entry_safe(page, next, &list[order], lru) { in split_free_pages()
6866 post_alloc_hook(page, order, gfp_mask); in split_free_pages()
6868 if (!order) in split_free_pages()
6871 split_page(page, order); in split_free_pages()
6938 const unsigned int order = ilog2(end - start); in alloc_contig_range_noprof() local
6944 .order = -1, in alloc_contig_range_noprof()
6961 if (WARN_ON_ONCE((gfp_mask & __GFP_COMP) && order > MAX_FOLIO_ORDER)) in alloc_contig_range_noprof()
7062 check_new_pages(head, order); in alloc_contig_range_noprof()
7063 prep_new_page(head, order, gfp_mask, 0); in alloc_contig_range_noprof()
7260 unsigned int order; in __offline_isolated_pages() local
7290 order = buddy_order(page); in __offline_isolated_pages()
7291 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE); in __offline_isolated_pages()
7292 pfn += (1 << order); in __offline_isolated_pages()
7306 unsigned int order; in is_free_buddy_page() local
7308 for (order = 0; order < NR_PAGE_ORDERS; order++) { in is_free_buddy_page()
7309 const struct page *head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
7312 buddy_order_unsafe(head) >= order) in is_free_buddy_page()
7316 return order <= MAX_PAGE_ORDER; in is_free_buddy_page()
7322 unsigned int order, int migratetype, in add_to_free_list() argument
7325 __add_to_free_list(page, zone, order, migratetype, tail); in add_to_free_list()
7326 account_freepages(zone, 1 << order, migratetype); in add_to_free_list()
7367 unsigned int order; in take_page_off_buddy() local
7371 for (order = 0; order < NR_PAGE_ORDERS; order++) { in take_page_off_buddy()
7372 struct page *page_head = page - (pfn & ((1 << order) - 1)); in take_page_off_buddy()
7375 if (PageBuddy(page_head) && page_order >= order) { in take_page_off_buddy()
7454 static bool page_contains_unaccepted(struct page *page, unsigned int order) in page_contains_unaccepted() argument
7458 return range_contains_unaccepted_memory(start, PAGE_SIZE << order); in page_contains_unaccepted()
7509 static bool cond_accept_memory(struct zone *zone, unsigned int order, in cond_accept_memory() argument
7535 __zone_watermark_unusable_free(zone, order, 0) - in cond_accept_memory()
7568 static bool page_contains_unaccepted(struct page *page, unsigned int order) in page_contains_unaccepted() argument
7573 static bool cond_accept_memory(struct zone *zone, unsigned int order, in cond_accept_memory() argument
7587 struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order) in alloc_frozen_pages_nolock_noprof() argument
7629 if (!pcp_allowed_order(order)) in alloc_frozen_pages_nolock_noprof()
7639 prepare_alloc_pages(alloc_gfp, order, nid, NULL, &ac, in alloc_frozen_pages_nolock_noprof()
7646 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); in alloc_frozen_pages_nolock_noprof()
7651 unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) { in alloc_frozen_pages_nolock_noprof()
7652 __free_frozen_pages(page, order, FPI_TRYLOCK); in alloc_frozen_pages_nolock_noprof()
7655 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); in alloc_frozen_pages_nolock_noprof()
7656 kmsan_alloc_page(page, order, alloc_gfp); in alloc_frozen_pages_nolock_noprof()
7675 struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order) in alloc_pages_nolock_noprof() argument
7679 page = alloc_frozen_pages_nolock_noprof(gfp_flags, nid, order); in alloc_pages_nolock_noprof()