Lines Matching defs:page
7 #include <linux/page-isolation.h>
28 * Returns a page without holding a reference. If the caller wants to
29 * dereference that page (e.g., dumping), it has to make sure that it
33 static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long end_pfn,
36 struct page *page = pfn_to_page(start_pfn);
37 struct zone *zone = page_zone(page);
43 if (is_migrate_cma_page(page)) {
52 return page;
56 page = pfn_to_page(pfn);
64 if (PageReserved(page))
65 return page;
79 * handle each tail page individually in migration.
81 if (PageHuge(page) || PageTransCompound(page)) {
82 struct folio *folio = page_folio(page);
85 if (PageHuge(page)) {
89 * The huge page may be freed so can not
94 return page;
96 return page;
99 skip_pages = folio_nr_pages(folio) - folio_page_idx(folio, page);
105 * We can't use page_count without pin a page
106 * because another CPU can free compound page.
108 * because their page->_refcount is zero at all time.
110 if (!page_ref_count(page)) {
111 if (PageBuddy(page))
112 pfn += (1 << buddy_order(page)) - 1;
117 * The HWPoisoned page may be not in buddy system, and
120 if ((mode == PB_ISOLATE_MODE_MEM_OFFLINE) && PageHWPoison(page))
133 if ((mode == PB_ISOLATE_MODE_MEM_OFFLINE) && PageOffline(page))
136 if (PageLRU(page) || page_has_movable_ops(page))
144 return page;
150 * This function set pageblock migratetype to isolate if no unmovable page is
154 static int set_migratetype_isolate(struct page *page, enum pb_isolate_mode mode,
157 struct zone *zone = page_zone(page);
158 struct page *unmovable;
162 if (PageUnaccepted(page))
163 accept_page(page);
172 if (is_migrate_isolate_page(page)) {
181 * Pass the intersection of [start_pfn, end_pfn) and the page's pageblock
184 check_unmovable_start = max(page_to_pfn(page), start_pfn);
185 check_unmovable_end = min(pageblock_end_pfn(page_to_pfn(page)),
191 if (!pageblock_isolate_and_move_free_pages(zone, page)) {
206 dump_page(unmovable, "unmovable page");
212 static void unset_migratetype_isolate(struct page *page)
218 struct page *buddy;
220 zone = page_zone(page);
222 if (!is_migrate_isolate_page(page))
228 * it is possible that there is free buddy page.
233 if (PageBuddy(page)) {
234 order = buddy_order(page);
236 buddy = find_buddy_page_pfn(page, page_to_pfn(page),
239 isolated_page = !!__isolate_free_page(page, order);
241 * Isolating a free page in an isolated pageblock
265 WARN_ON_ONCE(!pageblock_unisolate_and_move_free_pages(zone, page));
267 clear_pageblock_isolate(page);
268 __putback_isolated_page(page, order, get_pageblock_migratetype(page));
275 static inline struct page *
281 struct page *page;
283 page = pfn_to_online_page(pfn + i);
284 if (!page)
286 return page;
293 * within a free or in-use page.
294 * @boundary_pfn: pageblock-aligned pfn that a page might cross
301 * pageblock. When not all pageblocks within a page are isolated at the same
302 * time, free page accounting can go wrong. For example, in the case of
303 * MAX_PAGE_ORDER = pageblock_order + 1, a MAX_PAGE_ORDER page has two
307 * When either pageblock is isolated, if it is a free page, the page is not
309 * in-use page and freed later, __free_one_page() does not split the free page
310 * either. The function handles this by splitting the free page or migrating
311 * the in-use page then splitting the free page.
333 * free or in-use page. Also make sure all to-be-isolated pageblocks
353 * a free or in-use page across boundary_pfn:
355 * 1. isolate before boundary_pfn: the page after is not online
356 * 2. isolate after boundary_pfn: the page before is not online
372 struct page *page = __first_valid_page(pfn, boundary_pfn - pfn);
374 VM_BUG_ON(!page);
375 pfn = page_to_pfn(page);
377 if (PageUnaccepted(page)) {
382 if (PageBuddy(page)) {
383 int order = buddy_order(page);
393 * If a compound page is straddling our block, attempt
397 * free page that straddles into our block: gigantic
405 if (PageCompound(page)) {
406 struct page *head = compound_head(page);
411 PageHuge(page)) {
423 VM_WARN_ON_ONCE_PAGE(PageLRU(page), page);
424 VM_WARN_ON_ONCE_PAGE(page_has_movable_ops(page), page);
440 * start_isolate_page_range() - mark page range MIGRATE_ISOLATE
445 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
464 * Please note that there is no strong synchronization with the page allocator
465 * either. Pages might be freed while their page blocks are marked ISOLATED.
479 struct page *page;
480 /* isolation is done at page block granularity */
506 page = __first_valid_page(pfn, pageblock_nr_pages);
507 if (page && set_migratetype_isolate(page, mode, start_pfn,
523 * This finds and unsets every MIGRATE_ISOLATE page block in the given range
528 struct page *page;
535 page = __first_valid_page(pfn, pageblock_nr_pages);
536 if (!page || !is_migrate_isolate_page(page))
538 unset_migratetype_isolate(page);
552 struct page *page;
555 page = pfn_to_page(pfn);
556 if (PageBuddy(page))
558 * If the page is on a free list, it has to be on
562 pfn += 1 << buddy_order(page);
564 PageHWPoison(page))
565 /* A HWPoisoned page cannot be also PageBuddy */
568 PageOffline(page) && !page_count(page))
601 struct page *page;
621 page = __first_valid_page(pfn, pageblock_nr_pages);
622 if (page && !is_migrate_isolate_page(page))
625 page = __first_valid_page(start_pfn, end_pfn - start_pfn);
626 if ((pfn < end_pfn) || !page) {
632 zone = page_zone(page);