Lines Matching full:page
13 * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
62 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
69 * Skip free page reporting notification for the (possibly merged) page.
70 * This does not hinder free page reporting from grabbing the page,
72 * the free page reporting infrastructure about a newly freed page. For
73 * example, used when temporarily pulling a page from a freelist and
79 * Place the (possibly merged) page to the tail of the freelist. Will ignore
80 * page shuffling (relevant code - e.g., memory onlining - is expected to
85 * (memory onlining) or untouched pages (page isolation, free page
90 /* Free the page without taking locks. Rely on trylock only. */
244 static void __free_pages_ok(struct page *page, unsigned int order,
319 static bool page_contains_unaccepted(struct page *page, unsigned int order);
322 static bool __free_unaccepted(struct page *page);
363 static inline unsigned long *get_pageblock_bitmap(const struct page *page, in get_pageblock_bitmap() argument
369 return page_zone(page)->pageblock_flags; in get_pageblock_bitmap()
373 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) in pfn_to_bitidx() argument
378 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); in pfn_to_bitidx()
389 get_pfnblock_bitmap_bitidx(const struct page *page, unsigned long pfn, in get_pfnblock_bitmap_bitidx() argument
401 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); in get_pfnblock_bitmap_bitidx()
403 bitmap = get_pageblock_bitmap(page, pfn); in get_pfnblock_bitmap_bitidx()
404 *bitidx = pfn_to_bitidx(page, pfn); in get_pfnblock_bitmap_bitidx()
414 * @page: The page within the block of interest
415 * @pfn: The target page frame number
420 static unsigned long __get_pfnblock_flags_mask(const struct page *page, in __get_pfnblock_flags_mask() argument
428 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); in __get_pfnblock_flags_mask()
440 * @page: The page within the block of interest
441 * @pfn: The target page frame number
446 bool get_pfnblock_bit(const struct page *page, unsigned long pfn, in get_pfnblock_bit() argument
455 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); in get_pfnblock_bit()
462 * @page: The page within the block of interest
463 * @pfn: The target page frame number
467 * Use get_pfnblock_migratetype() if caller already has both @page and @pfn
471 get_pfnblock_migratetype(const struct page *page, unsigned long pfn) in get_pfnblock_migratetype() argument
476 flags = __get_pfnblock_flags_mask(page, pfn, mask); in get_pfnblock_migratetype()
488 * @page: The page within the block of interest
489 * @pfn: The target page frame number
493 static void __set_pfnblock_flags_mask(struct page *page, unsigned long pfn, in __set_pfnblock_flags_mask() argument
500 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); in __set_pfnblock_flags_mask()
512 * @page: The page within the block of interest
513 * @pfn: The target page frame number
516 void set_pfnblock_bit(const struct page *page, unsigned long pfn, in set_pfnblock_bit() argument
525 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); in set_pfnblock_bit()
532 * @page: The page within the block of interest
533 * @pfn: The target page frame number
536 void clear_pfnblock_bit(const struct page *page, unsigned long pfn, in clear_pfnblock_bit() argument
545 get_pfnblock_bitmap_bitidx(page, pfn, &bitmap_word, &bitidx); in clear_pfnblock_bit()
552 * @page: The page within the block of interest
555 static void set_pageblock_migratetype(struct page *page, in set_pageblock_migratetype() argument
568 VM_WARN_ONCE(get_pageblock_isolate(page), in set_pageblock_migratetype()
572 __set_pfnblock_flags_mask(page, page_to_pfn(page), in set_pageblock_migratetype()
577 void __meminit init_pageblock_migratetype(struct page *page, in init_pageblock_migratetype() argument
599 __set_pfnblock_flags_mask(page, page_to_pfn(page), flags, in init_pageblock_migratetype()
604 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
608 unsigned long pfn = page_to_pfn(page); in page_outside_zone_boundaries()
619 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", in page_outside_zone_boundaries()
629 static bool __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
631 if (page_outside_zone_boundaries(zone, page)) in bad_range()
633 if (zone != page_zone(page)) in bad_range()
639 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
645 static void bad_page(struct page *page, const char *reason) in bad_page() argument
662 "BUG: Bad page state: %lu messages suppressed\n", in bad_page()
671 pr_alert("BUG: Bad page state in process %s pfn:%05lx\n", in bad_page()
672 current->comm, page_to_pfn(page)); in bad_page()
673 dump_page(page, reason); in bad_page()
679 if (PageBuddy(page)) in bad_page()
680 __ClearPageBuddy(page); in bad_page()
731 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
734 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
736 * The first tail page's ->compound_order holds the order of allocation.
740 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
745 __SetPageHead(page); in prep_compound_page()
747 prep_compound_tail(page, i); in prep_compound_page()
749 prep_compound_head(page, order); in prep_compound_page()
752 static inline void set_buddy_order(struct page *page, unsigned int order) in set_buddy_order() argument
754 set_page_private(page, order); in set_buddy_order()
755 __SetPageBuddy(page); in set_buddy_order()
765 !capc->page && in task_capc()
770 compaction_capture(struct capture_control *capc, struct page *page, in compaction_capture() argument
786 * have trouble finding a high-order free page. in compaction_capture()
793 trace_mm_page_alloc_extfrag(page, capc->cc->order, order, in compaction_capture()
796 capc->page = page; in compaction_capture()
807 compaction_capture(struct capture_control *capc, struct page *page, in compaction_capture() argument
832 static inline void __add_to_free_list(struct page *page, struct zone *zone, in __add_to_free_list() argument
839 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, in __add_to_free_list()
840 "page type is %d, passed migratetype is %d (nr=%d)\n", in __add_to_free_list()
841 get_pageblock_migratetype(page), migratetype, nr_pages); in __add_to_free_list()
844 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); in __add_to_free_list()
846 list_add(&page->buddy_list, &area->free_list[migratetype]); in __add_to_free_list()
858 static inline void move_to_free_list(struct page *page, struct zone *zone, in move_to_free_list() argument
864 /* Free page moving can fail, so it happens before the type update */ in move_to_free_list()
865 VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt, in move_to_free_list()
866 "page type is %d, passed migratetype is %d (nr=%d)\n", in move_to_free_list()
867 get_pageblock_migratetype(page), old_mt, nr_pages); in move_to_free_list()
869 list_move_tail(&page->buddy_list, &area->free_list[new_mt]); in move_to_free_list()
882 static inline void __del_page_from_free_list(struct page *page, struct zone *zone, in __del_page_from_free_list() argument
887 VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, in __del_page_from_free_list()
888 "page type is %d, passed migratetype is %d (nr=%d)\n", in __del_page_from_free_list()
889 get_pageblock_migratetype(page), migratetype, nr_pages); in __del_page_from_free_list()
891 /* clear reported state and update reported page count */ in __del_page_from_free_list()
892 if (page_reported(page)) in __del_page_from_free_list()
893 __ClearPageReported(page); in __del_page_from_free_list()
895 list_del(&page->buddy_list); in __del_page_from_free_list()
896 __ClearPageBuddy(page); in __del_page_from_free_list()
897 set_page_private(page, 0); in __del_page_from_free_list()
904 static inline void del_page_from_free_list(struct page *page, struct zone *zone, in del_page_from_free_list() argument
907 __del_page_from_free_list(page, zone, order, migratetype); in del_page_from_free_list()
911 static inline struct page *get_page_from_free_area(struct free_area *area, in get_page_from_free_area()
915 struct page, buddy_list); in get_page_from_free_area()
919 * If this is less than the 2nd largest possible page, check if the buddy
922 * that is happening, add the free page to the tail of the list
924 * as a 2-level higher order page
928 struct page *page, unsigned int order) in buddy_merge_likely() argument
931 struct page *higher_page; in buddy_merge_likely()
937 higher_page = page + (higher_page_pfn - pfn); in buddy_merge_likely()
943 static void change_pageblock_range(struct page *pageblock_page, in change_pageblock_range()
968 * Page's order is recorded in page_private(page) field.
978 static inline void __free_one_page(struct page *page, in __free_one_page() argument
986 struct page *buddy; in __free_one_page()
990 VM_BUG_ON_PAGE(page->flags.f & PAGE_FLAGS_CHECK_AT_PREP, page); in __free_one_page()
993 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); in __free_one_page()
994 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
1001 if (compaction_capture(capc, page, order, migratetype)) { in __free_one_page()
1006 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); in __free_one_page()
1026 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page, in __free_one_page()
1044 page = page + (combined_pfn - pfn); in __free_one_page()
1050 set_buddy_order(page, order); in __free_one_page()
1057 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); in __free_one_page()
1059 __add_to_free_list(page, zone, order, migratetype, to_tail); in __free_one_page()
1061 /* Notify page reporting subsystem of freed page */ in __free_one_page()
1067 * A bad page could be due to a number of fields. Instead of multiple branches,
1071 static inline bool page_expected_state(struct page *page, in page_expected_state() argument
1074 if (unlikely(atomic_read(&page->_mapcount) != -1)) in page_expected_state()
1077 if (unlikely((unsigned long)page->mapping | in page_expected_state()
1078 page_ref_count(page) | in page_expected_state()
1080 page->memcg_data | in page_expected_state()
1082 page_pool_page_is_pp(page) | in page_expected_state()
1083 (page->flags.f & check_flags))) in page_expected_state()
1089 static const char *page_bad_reason(struct page *page, unsigned long flags) in page_bad_reason() argument
1093 if (unlikely(atomic_read(&page->_mapcount) != -1)) in page_bad_reason()
1095 if (unlikely(page->mapping != NULL)) in page_bad_reason()
1097 if (unlikely(page_ref_count(page) != 0)) in page_bad_reason()
1099 if (unlikely(page->flags.f & flags)) { in page_bad_reason()
1106 if (unlikely(page->memcg_data)) in page_bad_reason()
1107 bad_reason = "page still charged to cgroup"; in page_bad_reason()
1109 if (unlikely(page_pool_page_is_pp(page))) in page_bad_reason()
1114 static inline bool free_page_is_bad(struct page *page) in free_page_is_bad() argument
1116 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) in free_page_is_bad()
1120 bad_page(page, page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); in free_page_is_bad()
1129 static int free_tail_page_prepare(struct page *head_page, struct page *page) in free_tail_page_prepare() argument
1135 * We rely page->lru.next never has bit 0 set, unless the page in free_tail_page_prepare()
1144 switch (page - head_page) { in free_tail_page_prepare()
1146 /* the first tail page: these may be in place of ->mapping */ in free_tail_page_prepare()
1148 bad_page(page, "nonzero large_mapcount"); in free_tail_page_prepare()
1153 bad_page(page, "nonzero nr_pages_mapped"); in free_tail_page_prepare()
1158 bad_page(page, "nonzero mm mapcount 0"); in free_tail_page_prepare()
1162 bad_page(page, "nonzero mm mapcount 1"); in free_tail_page_prepare()
1168 bad_page(page, "nonzero entire_mapcount"); in free_tail_page_prepare()
1172 bad_page(page, "nonzero pincount"); in free_tail_page_prepare()
1178 /* the second tail page: deferred_list overlaps ->mapping */ in free_tail_page_prepare()
1180 bad_page(page, "on deferred list"); in free_tail_page_prepare()
1185 bad_page(page, "nonzero entire_mapcount"); in free_tail_page_prepare()
1189 bad_page(page, "nonzero pincount"); in free_tail_page_prepare()
1195 /* the third tail page: hugetlb specifics overlap ->mappings */ in free_tail_page_prepare()
1200 if (page->mapping != TAIL_MAPPING) { in free_tail_page_prepare()
1201 bad_page(page, "corrupted mapping in tail page"); in free_tail_page_prepare()
1206 if (unlikely(!PageTail(page))) { in free_tail_page_prepare()
1207 bad_page(page, "PageTail not set"); in free_tail_page_prepare()
1210 if (unlikely(compound_head(page) != head_page)) { in free_tail_page_prepare()
1211 bad_page(page, "compound_head not consistent"); in free_tail_page_prepare()
1216 page->mapping = NULL; in free_tail_page_prepare()
1217 clear_compound_head(page); in free_tail_page_prepare()
1226 * using page tags instead (see below).
1227 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating
1228 * that error detection is disabled for accesses via the page address.
1245 * KASAN memory tracking as the poison will be properly inserted at page
1250 static inline bool should_skip_kasan_poison(struct page *page) in should_skip_kasan_poison() argument
1255 return page_kasan_tag(page) == KASAN_TAG_KERNEL; in should_skip_kasan_poison()
1258 static void kernel_init_pages(struct page *page, int numpages) in kernel_init_pages() argument
1265 clear_highpage_kasan_tagged(page + i); in kernel_init_pages()
1272 void __clear_page_tag_ref(struct page *page) in __clear_page_tag_ref() argument
1277 if (get_page_tag_ref(page, &ref, &handle)) { in __clear_page_tag_ref()
1286 void __pgalloc_tag_add(struct page *page, struct task_struct *task, in __pgalloc_tag_add() argument
1292 if (get_page_tag_ref(page, &ref, &handle)) { in __pgalloc_tag_add()
1299 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, in pgalloc_tag_add() argument
1303 __pgalloc_tag_add(page, task, nr); in pgalloc_tag_add()
1308 void __pgalloc_tag_sub(struct page *page, unsigned int nr) in __pgalloc_tag_sub() argument
1313 if (get_page_tag_ref(page, &ref, &handle)) { in __pgalloc_tag_sub()
1320 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) in pgalloc_tag_sub() argument
1323 __pgalloc_tag_sub(page, nr); in pgalloc_tag_sub()
1335 static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, in pgalloc_tag_add() argument
1337 static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} in pgalloc_tag_sub() argument
1342 __always_inline bool __free_pages_prepare(struct page *page, in __free_pages_prepare() argument
1346 bool skip_kasan_poison = should_skip_kasan_poison(page); in __free_pages_prepare()
1348 bool compound = PageCompound(page); in __free_pages_prepare()
1349 struct folio *folio = page_folio(page); in __free_pages_prepare()
1351 VM_BUG_ON_PAGE(PageTail(page), page); in __free_pages_prepare()
1353 trace_mm_page_free(page, order); in __free_pages_prepare()
1354 kmsan_free_page(page, order); in __free_pages_prepare()
1356 if (memcg_kmem_online() && PageMemcgKmem(page)) in __free_pages_prepare()
1357 __memcg_kmem_uncharge_page(page, order); in __free_pages_prepare()
1373 if (unlikely(PageHWPoison(page)) && !order) { in __free_pages_prepare()
1375 reset_page_owner(page, order); in __free_pages_prepare()
1376 page_table_check_free(page, order); in __free_pages_prepare()
1377 pgalloc_tag_sub(page, 1 << order); in __free_pages_prepare()
1380 * The page is isolated and accounted for. in __free_pages_prepare()
1382 * when the page is freed by unpoison_memory(). in __free_pages_prepare()
1384 clear_page_tag_ref(page); in __free_pages_prepare()
1388 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); in __free_pages_prepare()
1391 * Check tail pages before head page information is cleared to in __free_pages_prepare()
1398 page[1].flags.f &= ~PAGE_FLAGS_SECOND; in __free_pages_prepare()
1405 bad += free_tail_page_prepare(page, page + i); in __free_pages_prepare()
1407 if (free_page_is_bad(page + i)) { in __free_pages_prepare()
1412 (page + i)->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; in __free_pages_prepare()
1419 if (unlikely(page_has_type(page))) in __free_pages_prepare()
1421 page->page_type = UINT_MAX; in __free_pages_prepare()
1424 if (free_page_is_bad(page)) in __free_pages_prepare()
1430 page_cpupid_reset_last(page); in __free_pages_prepare()
1431 page->flags.f &= ~PAGE_FLAGS_CHECK_AT_PREP; in __free_pages_prepare()
1432 page->private = 0; in __free_pages_prepare()
1433 reset_page_owner(page, order); in __free_pages_prepare()
1434 page_table_check_free(page, order); in __free_pages_prepare()
1435 pgalloc_tag_sub(page, 1 << order); in __free_pages_prepare()
1437 if (!PageHighMem(page) && !(fpi_flags & FPI_TRYLOCK)) { in __free_pages_prepare()
1438 debug_check_no_locks_freed(page_address(page), in __free_pages_prepare()
1440 debug_check_no_obj_freed(page_address(page), in __free_pages_prepare()
1444 kernel_poison_pages(page, 1 << order); in __free_pages_prepare()
1452 * page becomes unavailable via debug_pagealloc or arch_free_page. in __free_pages_prepare()
1455 kasan_poison_pages(page, order, init); in __free_pages_prepare()
1462 kernel_init_pages(page, 1 << order); in __free_pages_prepare()
1465 * arch_free_page() can make the page's contents inaccessible. s390 in __free_pages_prepare()
1466 * does this. So nothing which can access the page's contents should in __free_pages_prepare()
1469 arch_free_page(page, order); in __free_pages_prepare()
1471 debug_pagealloc_unmap_pages(page, 1 << order); in __free_pages_prepare()
1476 bool free_pages_prepare(struct page *page, unsigned int order) in free_pages_prepare() argument
1478 return __free_pages_prepare(page, order, FPI_NONE); in free_pages_prepare()
1492 struct page *page; in free_pcppages_bulk() local
1522 page = list_last_entry(list, struct page, pcp_list); in free_pcppages_bulk()
1523 pfn = page_to_pfn(page); in free_pcppages_bulk()
1524 mt = get_pfnblock_migratetype(page, pfn); in free_pcppages_bulk()
1527 list_del(&page->pcp_list); in free_pcppages_bulk()
1531 __free_one_page(page, pfn, zone, order, mt, FPI_NONE); in free_pcppages_bulk()
1532 trace_mm_page_pcpu_drain(page, order, mt); in free_pcppages_bulk()
1539 /* Split a multi-block free page into its individual pageblocks. */
1540 static void split_large_buddy(struct zone *zone, struct page *page, in split_large_buddy() argument
1546 /* Caller removed page from freelist, buddy info cleared! */ in split_large_buddy()
1547 VM_WARN_ON_ONCE(PageBuddy(page)); in split_large_buddy()
1553 int mt = get_pfnblock_migratetype(page, pfn); in split_large_buddy()
1555 __free_one_page(page, pfn, zone, order, mt, fpi); in split_large_buddy()
1559 page = pfn_to_page(pfn); in split_large_buddy()
1563 static void add_page_to_zone_llist(struct zone *zone, struct page *page, in add_page_to_zone_llist() argument
1567 page->private = order; in add_page_to_zone_llist()
1568 /* Add the page to the free list */ in add_page_to_zone_llist()
1569 llist_add(&page->pcp_llist, &zone->trylock_free_pages); in add_page_to_zone_llist()
1572 static void free_one_page(struct zone *zone, struct page *page, in free_one_page() argument
1581 add_page_to_zone_llist(zone, page, order); in free_one_page()
1592 struct page *p, *tmp; in free_one_page()
1602 split_large_buddy(zone, page, pfn, order, fpi_flags); in free_one_page()
1608 static void __free_pages_ok(struct page *page, unsigned int order, in __free_pages_ok() argument
1611 unsigned long pfn = page_to_pfn(page); in __free_pages_ok()
1612 struct zone *zone = page_zone(page); in __free_pages_ok()
1614 if (__free_pages_prepare(page, order, fpi_flags)) in __free_pages_ok()
1615 free_one_page(zone, page, pfn, order, fpi_flags); in __free_pages_ok()
1618 void __meminit __free_pages_core(struct page *page, unsigned int order, in __free_pages_core() argument
1622 struct page *p = page; in __free_pages_core()
1641 adjust_managed_page_count(page, nr_pages); in __free_pages_core()
1649 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); in __free_pages_core()
1652 if (page_contains_unaccepted(page, order)) { in __free_pages_core()
1653 if (order == MAX_PAGE_ORDER && __free_unaccepted(page)) in __free_pages_core()
1656 accept_memory(page_to_phys(page), PAGE_SIZE << order); in __free_pages_core()
1663 __free_pages_ok(page, order, FPI_TO_TAIL); in __free_pages_core()
1671 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
1678 * the first and last page of a pageblock and avoid checking each individual
1679 * page in a pageblock.
1681 * Note: the function may return non-NULL struct page even for a page block
1690 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, in __pageblock_pfn_to_page()
1693 struct page *start_page; in __pageblock_pfn_to_page()
1694 struct page *end_page; in __pageblock_pfn_to_page()
1732 static inline unsigned int expand(struct zone *zone, struct page *page, int low, in expand() argument
1741 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
1744 * Mark as guard pages (or page), that will allow to in expand()
1746 * Corresponding page table entries will not be touched, in expand()
1749 if (set_page_guard(zone, &page[size], high)) in expand()
1752 __add_to_free_list(&page[size], zone, high, migratetype, false); in expand()
1753 set_buddy_order(&page[size], high); in expand()
1761 struct page *page, int low, in page_del_and_expand() argument
1766 __del_page_from_free_list(page, zone, high, migratetype); in page_del_and_expand()
1767 nr_pages -= expand(zone, page, low, high, migratetype); in page_del_and_expand()
1771 static void check_new_page_bad(struct page *page) in check_new_page_bad() argument
1773 if (unlikely(PageHWPoison(page))) { in check_new_page_bad()
1775 if (PageBuddy(page)) in check_new_page_bad()
1776 __ClearPageBuddy(page); in check_new_page_bad()
1780 bad_page(page, in check_new_page_bad()
1781 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); in check_new_page_bad()
1785 * This page is about to be returned from the page allocator
1787 static bool check_new_page(struct page *page) in check_new_page() argument
1789 if (likely(page_expected_state(page, in check_new_page()
1793 check_new_page_bad(page); in check_new_page()
1797 static inline bool check_new_pages(struct page *page, unsigned int order) in check_new_pages() argument
1801 struct page *p = page + i; in check_new_pages()
1839 inline void post_alloc_hook(struct page *page, unsigned int order, in post_alloc_hook() argument
1847 set_page_private(page, 0); in post_alloc_hook()
1849 arch_alloc_page(page, order); in post_alloc_hook()
1850 debug_pagealloc_map_pages(page, 1 << order); in post_alloc_hook()
1853 * Page unpoisoning must happen before memory initialization. in post_alloc_hook()
1855 * allocations and the page unpoisoning code will complain. in post_alloc_hook()
1857 kernel_unpoison_pages(page, 1 << order); in post_alloc_hook()
1870 init = !tag_clear_highpages(page, 1 << order); in post_alloc_hook()
1873 kasan_unpoison_pages(page, order, init)) { in post_alloc_hook()
1879 * If memory tags have not been set by KASAN, reset the page in post_alloc_hook()
1883 page_kasan_tag_reset(page + i); in post_alloc_hook()
1887 kernel_init_pages(page, 1 << order); in post_alloc_hook()
1889 set_page_owner(page, order, gfp_flags); in post_alloc_hook()
1890 page_table_check_alloc(page, order); in post_alloc_hook()
1891 pgalloc_tag_add(page, current, 1 << order); in post_alloc_hook()
1894 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page() argument
1897 post_alloc_hook(page, order, gfp_flags); in prep_new_page()
1900 prep_compound_page(page, order); in prep_new_page()
1903 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to in prep_new_page()
1904 * allocate the page. The expectation is that the caller is taking in prep_new_page()
1905 * steps that will free more memory. The caller should avoid the page in prep_new_page()
1909 set_page_pfmemalloc(page); in prep_new_page()
1911 clear_page_pfmemalloc(page); in prep_new_page()
1916 * the smallest available page from the freelists
1919 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest()
1924 struct page *page; in __rmqueue_smallest() local
1926 /* Find a page of the appropriate size in the preferred list */ in __rmqueue_smallest()
1929 page = get_page_from_free_area(area, migratetype); in __rmqueue_smallest()
1930 if (!page) in __rmqueue_smallest()
1933 page_del_and_expand(zone, page, order, current_order, in __rmqueue_smallest()
1935 trace_mm_page_alloc_zone_locked(page, order, migratetype, in __rmqueue_smallest()
1938 return page; in __rmqueue_smallest()
1958 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback()
1964 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback()
1975 struct page *page; in __move_freepages_block() local
1984 page = pfn_to_page(pfn); in __move_freepages_block()
1985 if (!PageBuddy(page)) { in __move_freepages_block()
1991 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in __move_freepages_block()
1992 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in __move_freepages_block()
1994 order = buddy_order(page); in __move_freepages_block()
1996 move_to_free_list(page, zone, order, old_mt, new_mt); in __move_freepages_block()
2005 static bool prep_move_freepages_block(struct zone *zone, struct page *page, in prep_move_freepages_block() argument
2011 pfn = page_to_pfn(page); in prep_move_freepages_block()
2033 page = pfn_to_page(pfn); in prep_move_freepages_block()
2034 if (PageBuddy(page)) { in prep_move_freepages_block()
2035 int nr = 1 << buddy_order(page); in prep_move_freepages_block()
2046 if (PageLRU(page) || page_has_movable_ops(page)) in prep_move_freepages_block()
2055 static int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
2061 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) in move_freepages_block()
2082 struct page *page; in find_large_buddy() local
2085 while (!PageBuddy(page = pfn_to_page(pfn))) { in find_large_buddy()
2095 if (pfn + (1 << buddy_order(page)) > start_pfn) in find_large_buddy()
2102 static inline void toggle_pageblock_isolate(struct page *page, bool isolate) in toggle_pageblock_isolate() argument
2105 set_pageblock_isolate(page); in toggle_pageblock_isolate()
2107 clear_pageblock_isolate(page); in toggle_pageblock_isolate()
2111 * __move_freepages_block_isolate - move free pages in block for page isolation
2113 * @page: the pageblock page
2117 * case encountered in page isolation, where the block of interest
2120 * Unlike the regular page allocator path, which moves pages while
2121 * stealing buddies off the freelist, page isolation is interested in
2130 struct page *page, bool isolate) in __move_freepages_block_isolate() argument
2135 struct page *buddy; in __move_freepages_block_isolate()
2137 if (isolate == get_pageblock_isolate(page)) { in __move_freepages_block_isolate()
2143 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) in __move_freepages_block_isolate()
2158 toggle_pageblock_isolate(page, isolate); in __move_freepages_block_isolate()
2166 from_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page), in __move_freepages_block_isolate()
2171 to_mt = __get_pfnblock_flags_mask(page, page_to_pfn(page), in __move_freepages_block_isolate()
2181 bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page) in pageblock_isolate_and_move_free_pages() argument
2183 return __move_freepages_block_isolate(zone, page, true); in pageblock_isolate_and_move_free_pages()
2186 bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page) in pageblock_unisolate_and_move_free_pages() argument
2188 return __move_freepages_block_isolate(zone, page, false); in pageblock_unisolate_and_move_free_pages()
2311 static struct page *
2312 try_to_claim_block(struct zone *zone, struct page *page, in try_to_claim_block() argument
2323 del_page_from_free_list(page, zone, current_order, block_type); in try_to_claim_block()
2324 change_pageblock_range(page, current_order, start_type); in try_to_claim_block()
2325 nr_added = expand(zone, page, order, current_order, start_type); in try_to_claim_block()
2327 return page; in try_to_claim_block()
2339 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages, in try_to_claim_block()
2386 static __always_inline struct page *
2393 struct page *page; in __rmqueue_claim() local
2405 * Find the largest available free page in the other list. This roughly in __rmqueue_claim()
2423 page = get_page_from_free_area(area, fallback_mt); in __rmqueue_claim()
2424 page = try_to_claim_block(zone, page, current_order, order, in __rmqueue_claim()
2427 if (page) { in __rmqueue_claim()
2428 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_claim()
2430 return page; in __rmqueue_claim()
2438 * Try to steal a single page from some fallback migratetype. Leave the rest of
2441 static __always_inline struct page *
2446 struct page *page; in __rmqueue_steal() local
2456 page = get_page_from_free_area(area, fallback_mt); in __rmqueue_steal()
2457 page_del_and_expand(zone, page, order, current_order, fallback_mt); in __rmqueue_steal()
2458 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_steal()
2460 return page; in __rmqueue_steal()
2477 static __always_inline struct page *
2481 struct page *page; in __rmqueue() local
2492 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2493 if (page) in __rmqueue()
2494 return page; in __rmqueue()
2509 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2510 if (page) in __rmqueue()
2511 return page; in __rmqueue()
2515 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2516 if (page) { in __rmqueue()
2518 return page; in __rmqueue()
2523 page = __rmqueue_claim(zone, order, migratetype, alloc_flags); in __rmqueue()
2524 if (page) { in __rmqueue()
2527 return page; in __rmqueue()
2532 page = __rmqueue_steal(zone, order, migratetype); in __rmqueue()
2533 if (page) { in __rmqueue()
2535 return page; in __rmqueue()
2562 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk() local
2564 if (unlikely(page == NULL)) in rmqueue_bulk()
2569 * physical page order. The page is added to the tail of in rmqueue_bulk()
2571 * is ordered by page number under some conditions. This is in rmqueue_bulk()
2573 * head, thus also in the physical page order. This is useful in rmqueue_bulk()
2577 list_add_tail(&page->pcp_list, list); in rmqueue_bulk()
2856 * May return a freed pcp, if during page freeing the pcp spinlock cannot be
2860 struct per_cpu_pages *pcp, struct page *page, int migratetype, in free_frozen_page_commit() argument
2878 list_add(&page->pcp_list, &pcp->lists[pindex]); in free_frozen_page_commit()
2962 * Free a pcp page
2964 static void __free_frozen_pages(struct page *page, unsigned int order, in __free_frozen_pages() argument
2970 unsigned long pfn = page_to_pfn(page); in __free_frozen_pages()
2974 __free_pages_ok(page, order, fpi_flags); in __free_frozen_pages()
2978 if (!__free_pages_prepare(page, order, fpi_flags)) in __free_frozen_pages()
2986 * excessively into the page allocator in __free_frozen_pages()
2988 zone = page_zone(page); in __free_frozen_pages()
2989 migratetype = get_pfnblock_migratetype(page, pfn); in __free_frozen_pages()
2992 free_one_page(zone, page, pfn, order, fpi_flags); in __free_frozen_pages()
3000 add_page_to_zone_llist(zone, page, order); in __free_frozen_pages()
3005 if (!free_frozen_page_commit(zone, pcp, page, migratetype, in __free_frozen_pages()
3010 free_one_page(zone, page, pfn, order, fpi_flags); in __free_frozen_pages()
3014 void free_frozen_pages(struct page *page, unsigned int order) in free_frozen_pages() argument
3016 __free_frozen_pages(page, order, FPI_NONE); in free_frozen_pages()
3019 void free_frozen_pages_nolock(struct page *page, unsigned int order) in free_frozen_pages_nolock() argument
3021 __free_frozen_pages(page, order, FPI_TRYLOCK); in free_frozen_pages_nolock()
3040 if (!__free_pages_prepare(&folio->page, order, FPI_NONE)) in free_unref_folios()
3047 free_one_page(folio_zone(folio), &folio->page, in free_unref_folios()
3066 migratetype = get_pfnblock_migratetype(&folio->page, pfn); in free_unref_folios()
3082 free_one_page(zone, &folio->page, pfn, in free_unref_folios()
3093 free_one_page(zone, &folio->page, pfn, in free_unref_folios()
3107 trace_mm_page_free_batched(&folio->page); in free_unref_folios()
3108 if (!free_frozen_page_commit(zone, pcp, &folio->page, in free_unref_folios()
3120 static void __split_page(struct page *page, unsigned int order) in __split_page() argument
3122 VM_WARN_ON_PAGE(PageCompound(page), page); in __split_page()
3124 split_page_owner(page, order, 0); in __split_page()
3125 pgalloc_tag_split(page_folio(page), order, 0); in __split_page()
3126 split_page_memcg(page, order); in __split_page()
3130 * split_page takes a non-compound higher-order page, and splits it into
3131 * n (1<<order) sub-pages: page[0..n]
3132 * Each sub-page must be freed individually.
3137 void split_page(struct page *page, unsigned int order) in split_page() argument
3141 VM_WARN_ON_PAGE(!page_count(page), page); in split_page()
3144 set_page_refcounted(page + i); in split_page()
3146 __split_page(page, order); in split_page()
3150 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
3152 struct zone *zone = page_zone(page); in __isolate_free_page()
3153 int mt = get_pageblock_migratetype(page); in __isolate_free_page()
3158 * Obey watermarks as if the page was being allocated. We can in __isolate_free_page()
3160 * watermark, because we already know our high-order page in __isolate_free_page()
3168 del_page_from_free_list(page, zone, order, mt); in __isolate_free_page()
3171 * Set the pageblock if the isolated page is at least half of a in __isolate_free_page()
3175 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
3176 for (; page < endpage; page += pageblock_nr_pages) { in __isolate_free_page()
3177 int mt = get_pageblock_migratetype(page); in __isolate_free_page()
3183 move_freepages_block(zone, page, mt, in __isolate_free_page()
3192 * __putback_isolated_page - Return a now-isolated page back where we got it
3193 * @page: Page that was isolated
3194 * @order: Order of the isolated page
3195 * @mt: The page's pageblock's migratetype
3197 * This function is meant to return a page pulled from the free lists via
3200 void __putback_isolated_page(struct page *page, unsigned int order, int mt) in __putback_isolated_page() argument
3202 struct zone *zone = page_zone(page); in __putback_isolated_page()
3207 /* Return isolated page to tail of freelist. */ in __putback_isolated_page()
3208 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
3239 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, in rmqueue_buddy()
3243 struct page *page; in rmqueue_buddy() local
3247 page = NULL; in rmqueue_buddy()
3255 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue_buddy()
3256 if (!page) { in rmqueue_buddy()
3259 page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm); in rmqueue_buddy()
3267 if (!page && (alloc_flags & (ALLOC_OOM|ALLOC_NON_BLOCK))) in rmqueue_buddy()
3268 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue_buddy()
3270 if (!page) { in rmqueue_buddy()
3276 } while (check_new_pages(page, order)); in rmqueue_buddy()
3278 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue_buddy()
3281 return page; in rmqueue_buddy()
3334 /* Remove page from the per-cpu list, caller must protect the list */
3336 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, in __rmqueue_pcplist()
3342 struct page *page; in __rmqueue_pcplist() local
3358 page = list_first_entry(list, struct page, pcp_list); in __rmqueue_pcplist()
3359 list_del(&page->pcp_list); in __rmqueue_pcplist()
3361 } while (check_new_pages(page, order)); in __rmqueue_pcplist()
3363 return page; in __rmqueue_pcplist()
3366 /* Lock and remove page from the per-cpu list */
3367 static struct page *rmqueue_pcplist(struct zone *preferred_zone, in rmqueue_pcplist()
3373 struct page *page; in rmqueue_pcplist() local
3388 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); in rmqueue_pcplist()
3390 if (page) { in rmqueue_pcplist()
3391 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue_pcplist()
3394 return page; in rmqueue_pcplist()
3398 * Allocate a page from the given zone.
3410 struct page *rmqueue(struct zone *preferred_zone, in rmqueue()
3415 struct page *page; in rmqueue() local
3418 page = rmqueue_pcplist(preferred_zone, zone, order, in rmqueue()
3420 if (likely(page)) in rmqueue()
3424 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, in rmqueue()
3435 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); in rmqueue()
3436 return page; in rmqueue()
3442 * empty page blocks that contain a page with a suitable order
3444 static void reserve_highatomic_pageblock(struct page *page, int order, in reserve_highatomic_pageblock() argument
3469 mt = get_pageblock_migratetype(page); in reserve_highatomic_pageblock()
3475 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) in reserve_highatomic_pageblock()
3479 change_pageblock_range(page, order, MIGRATE_HIGHATOMIC); in reserve_highatomic_pageblock()
3503 struct page *page; in unreserve_highatomic_pageblock() local
3522 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); in unreserve_highatomic_pageblock()
3523 if (!page) in unreserve_highatomic_pageblock()
3548 ret = move_freepages_block(zone, page, in unreserve_highatomic_pageblock()
3552 move_to_free_list(page, zone, order, in unreserve_highatomic_pageblock()
3555 change_pageblock_range(page, order, in unreserve_highatomic_pageblock()
3599 * one free page of a suitable size. Checking now avoids taking the zone lock
3644 * even if a suitable page happened to be free. in __zone_watermark_ok()
3653 /* For a high-order request, check at least one suitable page is free */ in __zone_watermark_ok()
3805 * a page.
3807 static struct page *
3828 struct page *page; in get_page_from_freelist() local
3836 * When allocating a page cache page for writing, we in get_page_from_freelist()
3899 * premature page reclaiming. Detection is done here to in get_page_from_freelist()
3959 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, in get_page_from_freelist()
3961 if (page) { in get_page_from_freelist()
3962 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist()
3969 reserve_highatomic_pageblock(page, order, zone); in get_page_from_freelist()
3971 return page; in get_page_from_freelist()
4050 static inline struct page *
4055 struct page *page; in __alloc_pages_cpuset_fallback() local
4057 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4063 if (!page) in __alloc_pages_cpuset_fallback()
4064 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4066 return page; in __alloc_pages_cpuset_fallback()
4069 static inline struct page *
4080 struct page *page; in __alloc_pages_may_oom() local
4101 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & in __alloc_pages_may_oom()
4104 if (page) in __alloc_pages_may_oom()
4148 page = __alloc_pages_cpuset_fallback(gfp_mask, order, in __alloc_pages_may_oom()
4153 return page; in __alloc_pages_may_oom()
4164 static struct page *
4169 struct page *page = NULL; in __alloc_pages_direct_compact() local
4181 prio, &page); in __alloc_pages_direct_compact()
4195 /* Prep a captured page if available */ in __alloc_pages_direct_compact()
4196 if (page) in __alloc_pages_direct_compact()
4197 prep_new_page(page, order, gfp_mask, alloc_flags); in __alloc_pages_direct_compact()
4199 /* Try get a page from the freelist if available */ in __alloc_pages_direct_compact()
4200 if (!page) in __alloc_pages_direct_compact()
4201 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
4203 if (page) { in __alloc_pages_direct_compact()
4204 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact()
4209 return page; in __alloc_pages_direct_compact()
4251 * Compaction managed to coalesce some page blocks, but the in should_compact_retry()
4289 static inline struct page *
4409 /* Perform direct synchronous page reclaim */
4436 static inline struct page *
4441 struct page *page = NULL; in __alloc_pages_direct_reclaim() local
4451 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
4458 if (!page && !drained) { in __alloc_pages_direct_reclaim()
4467 return page; in __alloc_pages_direct_reclaim()
4509 * The caller may dip into page reserves a bit more if the caller in gfp_to_alloc_flags()
4709 static inline struct page *
4717 struct page *page = NULL; in __alloc_pages_slowpath() local
4805 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4806 if (page) in __alloc_pages_slowpath()
4846 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, in __alloc_pages_slowpath()
4848 if (page) in __alloc_pages_slowpath()
4853 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4855 if (page) in __alloc_pages_slowpath()
4860 * THP page faults may attempt local node only first, but are in __alloc_pages_slowpath()
4937 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
4938 if (page) in __alloc_pages_slowpath()
4982 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); in __alloc_pages_slowpath()
4983 if (page) in __alloc_pages_slowpath()
4991 "page allocation failure: order:%u", order); in __alloc_pages_slowpath()
4993 return page; in __alloc_pages_slowpath()
5052 * This is a batched version of the page allocator that attempts to allocate
5067 struct page **page_array) in alloc_pages_bulk_noprof()
5069 struct page *page; in alloc_pages_bulk_noprof() local
5099 /* Use the single page allocator for one page. */ in alloc_pages_bulk_noprof()
5108 * force the caller to allocate one page at a time as it'll have in alloc_pages_bulk_noprof()
5115 /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ in alloc_pages_bulk_noprof()
5158 * try to allocate a single page and reclaim if necessary. in alloc_pages_bulk_noprof()
5178 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, in alloc_pages_bulk_noprof()
5180 if (unlikely(!page)) { in alloc_pages_bulk_noprof()
5181 /* Try and allocate at least one page */ in alloc_pages_bulk_noprof()
5190 prep_new_page(page, 0, gfp, 0); in alloc_pages_bulk_noprof()
5191 set_page_refcounted(page); in alloc_pages_bulk_noprof()
5192 page_array[nr_populated++] = page; in alloc_pages_bulk_noprof()
5204 page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask); in alloc_pages_bulk_noprof()
5205 if (page) in alloc_pages_bulk_noprof()
5206 page_array[nr_populated++] = page; in alloc_pages_bulk_noprof()
5214 struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order, in __alloc_frozen_pages_noprof()
5217 struct page *page; in __alloc_frozen_pages_noprof() local
5250 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); in __alloc_frozen_pages_noprof()
5251 if (likely(page)) in __alloc_frozen_pages_noprof()
5263 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); in __alloc_frozen_pages_noprof()
5266 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && in __alloc_frozen_pages_noprof()
5267 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { in __alloc_frozen_pages_noprof()
5268 free_frozen_pages(page, order); in __alloc_frozen_pages_noprof()
5269 page = NULL; in __alloc_frozen_pages_noprof()
5272 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); in __alloc_frozen_pages_noprof()
5273 kmsan_alloc_page(page, order, alloc_gfp); in __alloc_frozen_pages_noprof()
5275 return page; in __alloc_frozen_pages_noprof()
5279 struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, in __alloc_pages_noprof()
5282 struct page *page; in __alloc_pages_noprof() local
5284 page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask); in __alloc_pages_noprof()
5285 if (page) in __alloc_pages_noprof()
5286 set_page_refcounted(page); in __alloc_pages_noprof()
5287 return page; in __alloc_pages_noprof()
5294 struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order, in __folio_alloc_noprof() local
5296 return page_rmappable_folio(page); in __folio_alloc_noprof()
5307 struct page *page; in get_free_pages_noprof() local
5309 page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); in get_free_pages_noprof()
5310 if (!page) in get_free_pages_noprof()
5312 return (unsigned long) page_address(page); in get_free_pages_noprof()
5322 static void ___free_pages(struct page *page, unsigned int order, in ___free_pages() argument
5326 int head = PageHead(page); in ___free_pages()
5327 /* get alloc tag in case the page is released by others */ in ___free_pages()
5328 struct alloc_tag *tag = pgalloc_tag_get(page); in ___free_pages()
5330 if (put_page_testzero(page)) in ___free_pages()
5331 __free_frozen_pages(page, order, fpi_flags); in ___free_pages()
5337 * page will have no code tags, so to avoid warnings in ___free_pages()
5340 clear_page_tag_ref(page + (1 << order)); in ___free_pages()
5341 __free_frozen_pages(page + (1 << order), order, in ___free_pages()
5349 * @page: The page pointer returned from alloc_pages().
5352 * This function can free multi-page allocations that are not compound
5357 * If the last reference to this page is speculative, it will be released
5358 * by put_page() which only frees the first page of a non-compound
5360 * the subsequent pages here. If you want to use the page's reference
5362 * compound page, and use put_page() instead of __free_pages().
5367 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
5369 ___free_pages(page, order, FPI_NONE); in __free_pages()
5375 * page type (not only those that came from alloc_pages_nolock)
5377 void free_pages_nolock(struct page *page, unsigned int order) in free_pages_nolock() argument
5379 ___free_pages(page, order, FPI_TRYLOCK); in free_pages_nolock()
5384 * @addr: The virtual address tied to a page returned from __get_free_pages().
5389 * the page, call __free_pages() instead.
5406 struct page *page = virt_to_page((void *)addr); in make_alloc_exact() local
5407 struct page *last = page + nr; in make_alloc_exact()
5409 __split_page(page, order); in make_alloc_exact()
5410 while (page < --last) in make_alloc_exact()
5413 last = page + (1UL << order); in make_alloc_exact()
5414 for (page += nr; page < last; page++) in make_alloc_exact()
5415 __free_pages_ok(page, 0, FPI_TO_TAIL); in make_alloc_exact()
5463 struct page *p; in alloc_pages_exact_nid_noprof()
5872 * F.e. the percpu allocator needs the page allocator which in build_all_zonelists_init()
5944 * of pages of one half of the possible page colors in zone_batchsize()
5956 * of contiguous memory as there's no hardware page translation to in zone_batchsize()
6137 * page high values need to be recalculated.
6229 void adjust_managed_page_count(struct page *page, long count) in adjust_managed_page_count() argument
6231 atomic_long_add(count, &page_zone(page)->managed_pages); in adjust_managed_page_count()
6245 struct page *page = virt_to_page(pos); in free_reserved_area() local
6255 direct_map_addr = page_address(page); in free_reserved_area()
6264 free_reserved_page(page); in free_reserved_area()
6273 void free_reserved_page(struct page *page) in free_reserved_page() argument
6275 clear_page_tag_ref(page); in free_reserved_page()
6276 ClearPageReserved(page); in free_reserved_page()
6277 init_page_count(page); in free_reserved_page()
6278 __free_page(page); in free_reserved_page()
6279 adjust_managed_page_count(page, 1); in free_reserved_page()
6459 * deltas control async page reclaim, and so should in __setup_per_zone_wmarks()
6834 struct page *page; in alloc_contig_dump_pages() local
6837 list_for_each_entry(page, page_list, lru) in alloc_contig_dump_pages()
6838 dump_page(page, "migration failure"); in alloc_contig_dump_pages()
6907 struct page *page, *next; in split_free_frozen_pages() local
6910 list_for_each_entry_safe(page, next, &list[order], lru) { in split_free_frozen_pages()
6913 post_alloc_hook(page, order, gfp_mask); in split_free_frozen_pages()
6917 __split_page(page, order); in split_free_frozen_pages()
6920 list_del(&page->lru); in split_free_frozen_pages()
6922 list_add_tail(&page[i].lru, &list[0]); in split_free_frozen_pages()
6950 * Flags to control page compaction/migration/reclaim, to free up our in __alloc_contig_verify_gfp_mask()
6951 * page range. Migratable pages are movable, __GFP_MOVABLE is implied in __alloc_contig_verify_gfp_mask()
7027 * have different sizes, and due to the way page allocator in alloc_contig_frozen_range_noprof()
7033 * range back to page allocator as MIGRATE_ISOLATE. in alloc_contig_frozen_range_noprof()
7035 * When this is done, we take the pages in range from page in alloc_contig_frozen_range_noprof()
7037 * page allocator will never consider using them. in alloc_contig_frozen_range_noprof()
7042 * put back to page allocator so that buddy can use them. in alloc_contig_frozen_range_noprof()
7052 * In case of -EBUSY, we'd like to know which page causes problem. in alloc_contig_frozen_range_noprof()
7054 * which will report the busy page. in alloc_contig_frozen_range_noprof()
7079 * more, all pages in [start, end) are free in page allocator. in alloc_contig_frozen_range_noprof()
7081 * [start, end) (that is remove them from page allocator). in alloc_contig_frozen_range_noprof()
7085 * page allocator holds, ie. they can be part of higher order in alloc_contig_frozen_range_noprof()
7116 struct page *head = pfn_to_page(start); in alloc_contig_frozen_range_noprof()
7139 * be used to allocate compound pages, the refcount of each allocated page
7144 * __free_page() on each allocated page.
7169 struct page *page; in pfn_range_valid_contig() local
7174 page = pfn_to_online_page(start_pfn); in pfn_range_valid_contig()
7175 if (!page) in pfn_range_valid_contig()
7178 if (page_zone(page) != z) in pfn_range_valid_contig()
7181 if (page_is_unmovable(z, page, PB_ISOLATE_MODE_OTHER, &step)) in pfn_range_valid_contig()
7193 if (PageHuge(page)) { in pfn_range_valid_contig()
7201 page = compound_head(page); in pfn_range_valid_contig()
7202 order = compound_order(page); in pfn_range_valid_contig()
7236 * The allocated memory is always aligned to a page boundary. If nr_pages is a
7242 * non-compound page, for compound frozen pages could be freed with
7247 struct page *alloc_contig_frozen_pages_noprof(unsigned long nr_pages, in alloc_contig_frozen_pages_noprof()
7311 * be used to allocate compound pages, the refcount of each allocated page
7315 * calling __free_page() on each allocated page.
7319 struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, in alloc_contig_pages_noprof()
7322 struct page *page; in alloc_contig_pages_noprof() local
7327 page = alloc_contig_frozen_pages_noprof(nr_pages, gfp_mask, nid, in alloc_contig_pages_noprof()
7329 if (page) in alloc_contig_pages_noprof()
7330 set_pages_refcounted(page, nr_pages); in alloc_contig_pages_noprof()
7332 return page; in alloc_contig_pages_noprof()
7345 struct page *first_page = pfn_to_page(pfn); in free_contig_frozen_range()
7381 * and draining all cpus. A concurrent page freeing on another CPU that's about
7382 * to put the page on pcplist will either finish before the drain and the page
7426 * number of pages for which memory offlining code must adjust managed page
7434 struct page *page; in __offline_isolated_pages() local
7442 page = pfn_to_page(pfn); in __offline_isolated_pages()
7444 * The HWPoisoned page may be not in buddy system, and in __offline_isolated_pages()
7447 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { in __offline_isolated_pages()
7455 if (PageOffline(page)) { in __offline_isolated_pages()
7456 BUG_ON(page_count(page)); in __offline_isolated_pages()
7457 BUG_ON(PageBuddy(page)); in __offline_isolated_pages()
7463 BUG_ON(page_count(page)); in __offline_isolated_pages()
7464 BUG_ON(!PageBuddy(page)); in __offline_isolated_pages()
7465 VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE); in __offline_isolated_pages()
7466 order = buddy_order(page); in __offline_isolated_pages()
7467 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE); in __offline_isolated_pages()
7479 bool is_free_buddy_page(const struct page *page) in is_free_buddy_page() argument
7481 unsigned long pfn = page_to_pfn(page); in is_free_buddy_page()
7485 const struct page *head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
7497 static inline void add_to_free_list(struct page *page, struct zone *zone, in add_to_free_list() argument
7501 __add_to_free_list(page, zone, order, migratetype, tail); in add_to_free_list()
7506 * Break down a higher-order page in sub-pages, and keep our target out of
7509 static void break_down_buddy_pages(struct zone *zone, struct page *page, in break_down_buddy_pages() argument
7510 struct page *target, int low, int high, in break_down_buddy_pages()
7514 struct page *current_buddy; in break_down_buddy_pages()
7520 if (target >= &page[size]) { in break_down_buddy_pages()
7521 current_buddy = page; in break_down_buddy_pages()
7522 page = page + size; in break_down_buddy_pages()
7524 current_buddy = page + size; in break_down_buddy_pages()
7536 * Take a page that will be marked as poisoned off the buddy allocator.
7538 bool take_page_off_buddy(struct page *page) in take_page_off_buddy() argument
7540 struct zone *zone = page_zone(page); in take_page_off_buddy()
7541 unsigned long pfn = page_to_pfn(page); in take_page_off_buddy()
7548 struct page *page_head = page - (pfn & ((1 << order) - 1)); in take_page_off_buddy()
7558 break_down_buddy_pages(zone, page_head, page, 0, in take_page_off_buddy()
7560 SetPageHWPoisonTakenOff(page); in take_page_off_buddy()
7574 bool put_page_back_buddy(struct page *page) in put_page_back_buddy() argument
7576 struct zone *zone = page_zone(page); in put_page_back_buddy()
7581 if (put_page_testzero(page)) { in put_page_back_buddy()
7582 unsigned long pfn = page_to_pfn(page); in put_page_back_buddy()
7583 int migratetype = get_pfnblock_migratetype(page, pfn); in put_page_back_buddy()
7585 ClearPageHWPoisonTakenOff(page); in put_page_back_buddy()
7586 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); in put_page_back_buddy()
7587 if (TestClearPageHWPoison(page)) { in put_page_back_buddy()
7626 static bool page_contains_unaccepted(struct page *page, unsigned int order) in page_contains_unaccepted() argument
7628 phys_addr_t start = page_to_phys(page); in page_contains_unaccepted()
7634 struct page *page) in __accept_page() argument
7636 list_del(&page->lru); in __accept_page()
7639 __ClearPageUnaccepted(page); in __accept_page()
7642 accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER); in __accept_page()
7644 __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL); in __accept_page()
7647 void accept_page(struct page *page) in accept_page() argument
7649 struct zone *zone = page_zone(page); in accept_page()
7653 if (!PageUnaccepted(page)) { in accept_page()
7659 __accept_page(zone, &flags, page); in accept_page()
7665 struct page *page; in try_to_accept_memory_one() local
7668 page = list_first_entry_or_null(&zone->unaccepted_pages, in try_to_accept_memory_one()
7669 struct page, lru); in try_to_accept_memory_one()
7670 if (!page) { in try_to_accept_memory_one()
7676 __accept_page(zone, &flags, page); in try_to_accept_memory_one()
7699 * Accepting one MAX_ORDER page to ensure progress. in cond_accept_memory()
7720 static bool __free_unaccepted(struct page *page) in __free_unaccepted() argument
7722 struct zone *zone = page_zone(page); in __free_unaccepted()
7729 list_add_tail(&page->lru, &zone->unaccepted_pages); in __free_unaccepted()
7732 __SetPageUnaccepted(page); in __free_unaccepted()
7740 static bool page_contains_unaccepted(struct page *page, unsigned int order) in page_contains_unaccepted() argument
7751 static bool __free_unaccepted(struct page *page) in __free_unaccepted() argument
7759 struct page *alloc_frozen_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order) in alloc_frozen_pages_nolock_noprof()
7774 * is safe in any context. Also zeroing the page is mandatory for in alloc_frozen_pages_nolock_noprof()
7785 struct page *page; in alloc_frozen_pages_nolock_noprof() local
7818 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); in alloc_frozen_pages_nolock_noprof()
7822 if (memcg_kmem_online() && page && (gfp_flags & __GFP_ACCOUNT) && in alloc_frozen_pages_nolock_noprof()
7823 unlikely(__memcg_kmem_charge_page(page, alloc_gfp, order) != 0)) { in alloc_frozen_pages_nolock_noprof()
7824 __free_frozen_pages(page, order, FPI_TRYLOCK); in alloc_frozen_pages_nolock_noprof()
7825 page = NULL; in alloc_frozen_pages_nolock_noprof()
7827 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); in alloc_frozen_pages_nolock_noprof()
7828 kmsan_alloc_page(page, order, alloc_gfp); in alloc_frozen_pages_nolock_noprof()
7829 return page; in alloc_frozen_pages_nolock_noprof()
7844 * Return: allocated page or NULL on failure. NULL does not mean EBUSY or EAGAIN.
7847 struct page *alloc_pages_nolock_noprof(gfp_t gfp_flags, int nid, unsigned int order) in alloc_pages_nolock_noprof()
7849 struct page *page; in alloc_pages_nolock_noprof() local
7851 page = alloc_frozen_pages_nolock_noprof(gfp_flags, nid, order); in alloc_pages_nolock_noprof()
7852 if (page) in alloc_pages_nolock_noprof()
7853 set_page_refcounted(page); in alloc_pages_nolock_noprof()
7854 return page; in alloc_pages_nolock_noprof()