Lines Matching full:zone

13  *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
82 * shuffle the whole zone).
94 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
281 /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
293 static bool cond_accept_memory(struct zone *zone, unsigned int order,
319 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
321 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
329 static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
434 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
442 seq = zone_span_seqbegin(zone); in page_outside_zone_boundaries()
443 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
444 sp = zone->spanned_pages; in page_outside_zone_boundaries()
445 ret = !zone_spans_pfn(zone, pfn); in page_outside_zone_boundaries()
446 } while (zone_span_seqretry(zone, seq)); in page_outside_zone_boundaries()
449 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", in page_outside_zone_boundaries()
450 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
457 * Temporary debugging check for pages not lying within a given zone.
459 static bool __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
461 if (page_outside_zone_boundaries(zone, page)) in bad_range()
463 if (zone != page_zone(page)) in bad_range()
469 static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
589 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
596 capc->cc->zone == zone ? capc : NULL; in task_capc()
631 static inline struct capture_control *task_capc(struct zone *zone) in task_capc() argument
644 static inline void account_freepages(struct zone *zone, int nr_pages, in account_freepages() argument
647 lockdep_assert_held(&zone->lock); in account_freepages()
652 __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); in account_freepages()
655 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); in account_freepages()
657 WRITE_ONCE(zone->nr_free_highatomic, in account_freepages()
658 zone->nr_free_highatomic + nr_pages); in account_freepages()
662 static inline void __add_to_free_list(struct page *page, struct zone *zone, in __add_to_free_list() argument
666 struct free_area *area = &zone->free_area[order]; in __add_to_free_list()
680 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); in __add_to_free_list()
688 static inline void move_to_free_list(struct page *page, struct zone *zone, in move_to_free_list() argument
691 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
701 account_freepages(zone, -nr_pages, old_mt); in move_to_free_list()
702 account_freepages(zone, nr_pages, new_mt); in move_to_free_list()
708 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages); in move_to_free_list()
712 static inline void __del_page_from_free_list(struct page *page, struct zone *zone, in __del_page_from_free_list() argument
728 zone->free_area[order].nr_free--; in __del_page_from_free_list()
731 __mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages); in __del_page_from_free_list()
734 static inline void del_page_from_free_list(struct page *page, struct zone *zone, in del_page_from_free_list() argument
737 __del_page_from_free_list(page, zone, order, migratetype); in del_page_from_free_list()
738 account_freepages(zone, -(1 << order), migratetype); in del_page_from_free_list()
799 struct zone *zone, unsigned int order, in __free_one_page() argument
802 struct capture_control *capc = task_capc(zone); in __free_one_page()
808 VM_BUG_ON(!zone_is_initialized(zone)); in __free_one_page()
813 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
815 account_freepages(zone, 1 << order, migratetype); in __free_one_page()
821 account_freepages(zone, -(1 << order), migratetype); in __free_one_page()
849 clear_page_guard(zone, buddy, order); in __free_one_page()
851 __del_page_from_free_list(buddy, zone, order, buddy_mt); in __free_one_page()
878 __add_to_free_list(page, zone, order, migratetype, to_tail); in __free_one_page()
1293 * Assumes all pages on list are in same zone.
1296 static void free_pcppages_bulk(struct zone *zone, int count, in free_pcppages_bulk() argument
1313 spin_lock_irqsave(&zone->lock, flags); in free_pcppages_bulk()
1341 __free_one_page(page, pfn, zone, order, mt, FPI_NONE); in free_pcppages_bulk()
1346 spin_unlock_irqrestore(&zone->lock, flags); in free_pcppages_bulk()
1350 static void split_large_buddy(struct zone *zone, struct page *page, in split_large_buddy() argument
1365 __free_one_page(page, pfn, zone, order, mt, fpi); in split_large_buddy()
1373 static void add_page_to_zone_llist(struct zone *zone, struct page *page, in add_page_to_zone_llist() argument
1379 llist_add(&page->pcp_llist, &zone->trylock_free_pages); in add_page_to_zone_llist()
1382 static void free_one_page(struct zone *zone, struct page *page, in free_one_page() argument
1390 if (!spin_trylock_irqsave(&zone->lock, flags)) { in free_one_page()
1391 add_page_to_zone_llist(zone, page, order); in free_one_page()
1395 spin_lock_irqsave(&zone->lock, flags); in free_one_page()
1399 llhead = &zone->trylock_free_pages; in free_one_page()
1408 split_large_buddy(zone, p, page_to_pfn(p), p_order, fpi_flags); in free_one_page()
1412 split_large_buddy(zone, page, pfn, order, fpi_flags); in free_one_page()
1413 spin_unlock_irqrestore(&zone->lock, flags); in free_one_page()
1422 struct zone *zone = page_zone(page); in __free_pages_ok() local
1425 free_one_page(zone, page, pfn, order, fpi_flags); in __free_pages_ok()
1478 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
1485 * belong to a single zone. We assume that a border between node0 and node1
1501 unsigned long end_pfn, struct zone *zone) in __pageblock_pfn_to_page() argument
1516 if (page_zone(start_page) != zone) in __pageblock_pfn_to_page()
1542 static inline unsigned int expand(struct zone *zone, struct page *page, int low, in expand() argument
1551 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
1559 if (set_page_guard(zone, &page[size], high)) in expand()
1562 __add_to_free_list(&page[size], zone, high, migratetype, false); in expand()
1570 static __always_inline void page_del_and_expand(struct zone *zone, in page_del_and_expand() argument
1576 __del_page_from_free_list(page, zone, high, migratetype); in page_del_and_expand()
1577 nr_pages -= expand(zone, page, low, high, migratetype); in page_del_and_expand()
1578 account_freepages(zone, -nr_pages, migratetype); in page_del_and_expand()
1734 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
1743 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
1748 page_del_and_expand(zone, page, order, current_order, in __rmqueue_smallest()
1773 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
1776 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
1779 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback() argument
1787 static int __move_freepages_block(struct zone *zone, unsigned long start_pfn, in __move_freepages_block() argument
1806 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in __move_freepages_block()
1807 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in __move_freepages_block()
1811 move_to_free_list(page, zone, order, old_mt, new_mt); in __move_freepages_block()
1822 static bool prep_move_freepages_block(struct zone *zone, struct page *page, in prep_move_freepages_block() argument
1833 * The caller only has the lock for @zone, don't touch ranges in prep_move_freepages_block()
1835 * the range that's inside the zone, this call is usually in prep_move_freepages_block()
1839 if (!zone_spans_pfn(zone, start)) in prep_move_freepages_block()
1841 if (!zone_spans_pfn(zone, end - 1)) in prep_move_freepages_block()
1872 static int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
1877 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) in move_freepages_block()
1880 return __move_freepages_block(zone, start_pfn, old_mt, new_mt); in move_freepages_block()
1910 * @zone: the zone
1927 bool move_freepages_block_isolate(struct zone *zone, struct page *page, in move_freepages_block_isolate() argument
1932 if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL)) in move_freepages_block_isolate()
1945 del_page_from_free_list(buddy, zone, order, in move_freepages_block_isolate()
1948 split_large_buddy(zone, buddy, pfn, order, FPI_NONE); in move_freepages_block_isolate()
1956 del_page_from_free_list(page, zone, order, in move_freepages_block_isolate()
1959 split_large_buddy(zone, page, pfn, order, FPI_NONE); in move_freepages_block_isolate()
1963 __move_freepages_block(zone, start_pfn, in move_freepages_block_isolate()
1981 static inline bool boost_watermark(struct zone *zone) in boost_watermark() argument
1993 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) in boost_watermark()
1996 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], in boost_watermark()
2012 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, in boost_watermark()
2100 try_to_claim_block(struct zone *zone, struct page *page, in try_to_claim_block() argument
2111 del_page_from_free_list(page, zone, current_order, block_type); in try_to_claim_block()
2113 nr_added = expand(zone, page, order, current_order, start_type); in try_to_claim_block()
2114 account_freepages(zone, nr_added, start_type); in try_to_claim_block()
2123 if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD)) in try_to_claim_block()
2124 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in try_to_claim_block()
2126 /* moving whole block can fail due to zone boundary conditions */ in try_to_claim_block()
2127 if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages, in try_to_claim_block()
2158 __move_freepages_block(zone, start_pfn, block_type, start_type); in try_to_claim_block()
2159 return __rmqueue_smallest(zone, order, start_type); in try_to_claim_block()
2174 __rmqueue_claim(struct zone *zone, int order, int start_migratetype, in __rmqueue_claim() argument
2198 area = &(zone->free_area[current_order]); in __rmqueue_claim()
2211 page = try_to_claim_block(zone, page, current_order, order, in __rmqueue_claim()
2229 __rmqueue_steal(struct zone *zone, int order, int start_migratetype) in __rmqueue_steal() argument
2237 area = &(zone->free_area[current_order]); in __rmqueue_steal()
2244 page_del_and_expand(zone, page, order, current_order, fallback_mt); in __rmqueue_steal()
2262 * Call me with the zone->lock already held.
2265 __rmqueue(struct zone *zone, unsigned int order, int migratetype, in __rmqueue() argument
2273 * allocating from CMA when over half of the zone's free memory in __rmqueue()
2277 zone_page_state(zone, NR_FREE_CMA_PAGES) > in __rmqueue()
2278 zone_page_state(zone, NR_FREE_PAGES) / 2) { in __rmqueue()
2279 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2290 * a loop with the zone->lock held, meaning the freelists are in __rmqueue()
2296 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2302 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2310 page = __rmqueue_claim(zone, order, migratetype, alloc_flags); in __rmqueue()
2319 page = __rmqueue_steal(zone, order, migratetype); in __rmqueue()
2334 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
2343 if (!spin_trylock_irqsave(&zone->lock, flags)) in rmqueue_bulk()
2346 spin_lock_irqsave(&zone->lock, flags); in rmqueue_bulk()
2349 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk()
2366 spin_unlock_irqrestore(&zone->lock, flags); in rmqueue_bulk()
2375 int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp) in decay_pcp_high() argument
2397 free_pcppages_bulk(zone, to_drain, pcp, 0); in decay_pcp_high()
2411 void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) in drain_zone_pages() argument
2419 free_pcppages_bulk(zone, to_drain, pcp, 0); in drain_zone_pages()
2426 * Drain pcplists of the indicated processor and zone.
2428 static void drain_pages_zone(unsigned int cpu, struct zone *zone) in drain_pages_zone() argument
2430 struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in drain_pages_zone()
2440 free_pcppages_bulk(zone, to_drain, pcp, 0); in drain_pages_zone()
2452 struct zone *zone; in drain_pages() local
2454 for_each_populated_zone(zone) { in drain_pages()
2455 drain_pages_zone(cpu, zone); in drain_pages()
2462 void drain_local_pages(struct zone *zone) in drain_local_pages() argument
2466 if (zone) in drain_local_pages()
2467 drain_pages_zone(cpu, zone); in drain_local_pages()
2482 static void __drain_all_pages(struct zone *zone, bool force_all_cpus) in __drain_all_pages() argument
2494 * a zone. Such callers are primarily CMA and memory hotplug and need in __drain_all_pages()
2498 if (!zone) in __drain_all_pages()
2511 struct zone *z; in __drain_all_pages()
2520 } else if (zone) { in __drain_all_pages()
2521 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in __drain_all_pages()
2541 if (zone) in __drain_all_pages()
2542 drain_pages_zone(cpu, zone); in __drain_all_pages()
2553 * When zone parameter is non-NULL, spill just the single zone's pages.
2555 void drain_all_pages(struct zone *zone) in drain_all_pages() argument
2557 __drain_all_pages(zone, false); in drain_all_pages()
2578 * freed pages to reduce zone lock contention. in nr_pcp_free()
2585 static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, in nr_pcp_high() argument
2607 if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) { in nr_pcp_high()
2617 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) { in nr_pcp_high()
2633 static void free_frozen_page_commit(struct zone *zone, in free_frozen_page_commit() argument
2673 * Do not attempt to take a zone lock. Let pcp->count get in free_frozen_page_commit()
2678 high = nr_pcp_high(pcp, zone, batch, free_high); in free_frozen_page_commit()
2680 free_pcppages_bulk(zone, nr_pcp_free(pcp, batch, high, free_high), in free_frozen_page_commit()
2682 if (test_bit(ZONE_BELOW_HIGH, &zone->flags) && in free_frozen_page_commit()
2683 zone_watermark_ok(zone, 0, high_wmark_pages(zone), in free_frozen_page_commit()
2685 clear_bit(ZONE_BELOW_HIGH, &zone->flags); in free_frozen_page_commit()
2697 struct zone *zone; in __free_frozen_pages() local
2716 zone = page_zone(page); in __free_frozen_pages()
2720 free_one_page(zone, page, pfn, order, fpi_flags); in __free_frozen_pages()
2728 add_page_to_zone_llist(zone, page, order); in __free_frozen_pages()
2732 pcp = pcp_spin_trylock(zone->per_cpu_pageset); in __free_frozen_pages()
2734 free_frozen_page_commit(zone, pcp, page, migratetype, order, fpi_flags); in __free_frozen_pages()
2737 free_one_page(zone, page, pfn, order, fpi_flags); in __free_frozen_pages()
2754 struct zone *locked_zone = NULL; in free_unref_folios()
2783 struct zone *zone = folio_zone(folio); in free_unref_folios() local
2791 /* Different zone requires a different pcp lock */ in free_unref_folios()
2792 if (zone != locked_zone || in free_unref_folios()
2806 free_one_page(zone, &folio->page, pfn, in free_unref_folios()
2816 pcp = pcp_spin_trylock(zone->per_cpu_pageset); in free_unref_folios()
2819 free_one_page(zone, &folio->page, pfn, in free_unref_folios()
2823 locked_zone = zone; in free_unref_folios()
2834 free_frozen_page_commit(zone, pcp, &folio->page, migratetype, in free_unref_folios()
2870 struct zone *zone = page_zone(page); in __isolate_free_page() local
2881 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
2882 if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) in __isolate_free_page()
2886 del_page_from_free_list(page, zone, order, mt); in __isolate_free_page()
2901 move_freepages_block(zone, page, mt, in __isolate_free_page()
2920 struct zone *zone = page_zone(page); in __putback_isolated_page() local
2922 /* zone lock should be held when this function is called */ in __putback_isolated_page()
2923 lockdep_assert_held(&zone->lock); in __putback_isolated_page()
2926 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
2933 static inline void zone_statistics(struct zone *preferred_zone, struct zone *z, in zone_statistics()
2957 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, in rmqueue_buddy() argument
2967 if (!spin_trylock_irqsave(&zone->lock, flags)) in rmqueue_buddy()
2970 spin_lock_irqsave(&zone->lock, flags); in rmqueue_buddy()
2973 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue_buddy()
2977 page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm); in rmqueue_buddy()
2986 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue_buddy()
2989 spin_unlock_irqrestore(&zone->lock, flags); in rmqueue_buddy()
2993 spin_unlock_irqrestore(&zone->lock, flags); in rmqueue_buddy()
2997 zone_statistics(preferred_zone, zone, 1); in rmqueue_buddy()
3002 static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order) in nr_pcp_alloc() argument
3023 * zone. in nr_pcp_alloc()
3025 if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags)) in nr_pcp_alloc()
3054 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, in __rmqueue_pcplist() argument
3064 int batch = nr_pcp_alloc(pcp, zone, order); in __rmqueue_pcplist()
3067 alloced = rmqueue_bulk(zone, order, in __rmqueue_pcplist()
3085 static struct page *rmqueue_pcplist(struct zone *preferred_zone, in rmqueue_pcplist()
3086 struct zone *zone, unsigned int order, in rmqueue_pcplist() argument
3096 pcp = pcp_spin_trylock(zone->per_cpu_pageset); in rmqueue_pcplist()
3109 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); in rmqueue_pcplist()
3114 zone_statistics(preferred_zone, zone, 1); in rmqueue_pcplist()
3120 * Allocate a page from the given zone.
3132 struct page *rmqueue(struct zone *preferred_zone, in rmqueue()
3133 struct zone *zone, unsigned int order, in rmqueue() argument
3140 page = rmqueue_pcplist(preferred_zone, zone, order, in rmqueue()
3146 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, in rmqueue()
3152 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { in rmqueue()
3153 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in rmqueue()
3154 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); in rmqueue()
3157 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); in rmqueue()
3167 struct zone *zone) in reserve_highatomic_pageblock() argument
3174 * roughly 1% of a zone. But if 1% of a zone falls below a in reserve_highatomic_pageblock()
3178 if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages) in reserve_highatomic_pageblock()
3180 max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages); in reserve_highatomic_pageblock()
3181 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
3184 spin_lock_irqsave(&zone->lock, flags); in reserve_highatomic_pageblock()
3187 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
3197 if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1) in reserve_highatomic_pageblock()
3199 zone->nr_reserved_highatomic += pageblock_nr_pages; in reserve_highatomic_pageblock()
3202 zone->nr_reserved_highatomic += 1 << order; in reserve_highatomic_pageblock()
3206 spin_unlock_irqrestore(&zone->lock, flags); in reserve_highatomic_pageblock()
3224 struct zone *zone; in unreserve_highatomic_pageblock() local
3229 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, in unreserve_highatomic_pageblock()
3235 if (!force && zone->nr_reserved_highatomic <= in unreserve_highatomic_pageblock()
3239 spin_lock_irqsave(&zone->lock, flags); in unreserve_highatomic_pageblock()
3241 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
3256 if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic)) in unreserve_highatomic_pageblock()
3257 size = zone->nr_reserved_highatomic; in unreserve_highatomic_pageblock()
3258 zone->nr_reserved_highatomic -= size; in unreserve_highatomic_pageblock()
3270 ret = move_freepages_block(zone, page, in unreserve_highatomic_pageblock()
3274 move_to_free_list(page, zone, order, in unreserve_highatomic_pageblock()
3283 * so this should not fail on zone boundaries. in unreserve_highatomic_pageblock()
3287 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
3291 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
3297 static inline long __zone_watermark_unusable_free(struct zone *z, in __zone_watermark_unusable_free()
3321 * one free page of a suitable size. Checking now avoids taking the zone lock
3324 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok()
3402 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok()
3409 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast()
3457 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3459 return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <= in zone_allows_reclaim()
3463 static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone) in zone_allows_reclaim() argument
3470 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
3471 * fragmentation is subtle. If the preferred zone was HIGHMEM then
3472 * premature use of a lower zone may cause lowmem pressure problems that
3473 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
3478 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
3494 if (!zone) in alloc_flags_nofragment()
3497 if (zone_idx(zone) != ZONE_NORMAL) in alloc_flags_nofragment()
3502 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume in alloc_flags_nofragment()
3506 if (nr_online_nodes > 1 && !populated_zone(--zone)) in alloc_flags_nofragment()
3534 struct zone *zone; in get_page_from_freelist() local
3541 * Scan zonelist, looking for a zone with enough free. in get_page_from_freelist()
3546 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, in get_page_from_freelist()
3553 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
3575 if (last_pgdat != zone->zone_pgdat) { in get_page_from_freelist()
3576 last_pgdat = zone->zone_pgdat; in get_page_from_freelist()
3577 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); in get_page_from_freelist()
3585 zone != zonelist_zone(ac->preferred_zoneref)) { in get_page_from_freelist()
3594 if (zone_to_nid(zone) != local_nid) { in get_page_from_freelist()
3600 cond_accept_memory(zone, order, alloc_flags); in get_page_from_freelist()
3609 if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) in get_page_from_freelist()
3612 mark = high_wmark_pages(zone); in get_page_from_freelist()
3613 if (zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3618 set_bit(ZONE_BELOW_HIGH, &zone->flags); in get_page_from_freelist()
3621 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); in get_page_from_freelist()
3622 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3627 if (cond_accept_memory(zone, order, alloc_flags)) in get_page_from_freelist()
3631 * Watermark failed for this zone, but see if we can in get_page_from_freelist()
3632 * grow this zone if it contains deferred pages. in get_page_from_freelist()
3635 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3644 !zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone)) in get_page_from_freelist()
3647 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3657 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
3666 page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order, in get_page_from_freelist()
3676 reserve_highatomic_pageblock(page, order, zone); in get_page_from_freelist()
3680 if (cond_accept_memory(zone, order, alloc_flags)) in get_page_from_freelist()
3683 /* Try again if zone has deferred pages */ in get_page_from_freelist()
3685 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3887 * At least in one zone compaction wasn't deferred or skipped, so let's in __alloc_pages_direct_compact()
3901 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact() local
3903 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
3904 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
4001 struct zone *zone; in should_compact_retry() local
4013 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_compact_retry()
4015 if (zone_watermark_ok(zone, 0, min_wmark_pages(zone), in should_compact_retry()
4171 struct zone *zone; in wake_all_kswapds() local
4181 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, in wake_all_kswapds()
4183 if (!managed_zone(zone)) in wake_all_kswapds()
4185 if (last_pgdat == zone->zone_pgdat) in wake_all_kswapds()
4187 wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx); in wake_all_kswapds()
4188 last_pgdat = zone->zone_pgdat; in wake_all_kswapds()
4301 struct zone *zone; in should_reclaim_retry() local
4325 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_reclaim_retry()
4329 unsigned long min_wmark = min_wmark_pages(zone); in should_reclaim_retry()
4334 !__cpuset_zone_allowed(zone, gfp_mask)) in should_reclaim_retry()
4337 available = reclaimable = zone_reclaimable_pages(zone); in should_reclaim_retry()
4338 available += zone_page_state_snapshot(zone, NR_FREE_PAGES); in should_reclaim_retry()
4344 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
4472 * any suitable zone to satisfy the request - e.g. non-movable in __alloc_pages_slowpath()
4739 /* Dirty zone balancing only done in the fast path */ in prepare_alloc_pages()
4743 * The preferred zone is used for statistics but crucially it is in prepare_alloc_pages()
4775 struct zone *zone; in alloc_pages_bulk_noprof() local
4826 /* Find an allowed local zone that meets the low watermark. */ in alloc_pages_bulk_noprof()
4828 for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) { in alloc_pages_bulk_noprof()
4832 !__cpuset_zone_allowed(zone, gfp)) { in alloc_pages_bulk_noprof()
4836 if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) && in alloc_pages_bulk_noprof()
4837 zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) { in alloc_pages_bulk_noprof()
4841 cond_accept_memory(zone, 0, alloc_flags); in alloc_pages_bulk_noprof()
4843 mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages; in alloc_pages_bulk_noprof()
4844 if (zone_watermark_fast(zone, 0, mark, in alloc_pages_bulk_noprof()
4850 if (cond_accept_memory(zone, 0, alloc_flags)) in alloc_pages_bulk_noprof()
4853 /* Try again if zone has deferred pages */ in alloc_pages_bulk_noprof()
4855 if (_deferred_grow_zone(zone, 0)) in alloc_pages_bulk_noprof()
4864 if (unlikely(!zone)) in alloc_pages_bulk_noprof()
4869 pcp = pcp_spin_trylock(zone->per_cpu_pageset); in alloc_pages_bulk_noprof()
4883 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, in alloc_pages_bulk_noprof()
4903 __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); in alloc_pages_bulk_noprof()
4904 zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account); in alloc_pages_bulk_noprof()
5190 * @offset: The zone index of the highest zone
5193 * high watermark within all zones at or below a given zone index. For each
5194 * zone, the number of pages is calculated as:
5203 struct zone *zone; in nr_free_zone_pages() local
5210 for_each_zone_zonelist(zone, z, zonelist, offset) { in nr_free_zone_pages()
5211 unsigned long size = zone_managed_pages(zone); in nr_free_zone_pages()
5212 unsigned long high = high_wmark_pages(zone); in nr_free_zone_pages()
5235 static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) in zoneref_set_zone() argument
5237 zoneref->zone = zone; in zoneref_set_zone()
5238 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
5242 * Builds allocation fallback zone lists.
5248 struct zone *zone; in build_zonerefs_node() local
5254 zone = pgdat->node_zones + zone_type; in build_zonerefs_node()
5255 if (populated_zone(zone)) { in build_zonerefs_node()
5256 zoneref_set_zone(zone, &zonerefs[nr_zones++]); in build_zonerefs_node()
5361 * This results in maximum locality--normal zone overflows into local
5362 * DMA zone, if any--but risks exhausting DMA zone.
5380 zonerefs->zone = NULL; in build_zonelists_in_node_order()
5395 zonerefs->zone = NULL; in build_thisnode_zonelists()
5436 * I.e., first node id of first zone in arg node's generic zonelist.
5464 zonerefs->zone = NULL; in build_zonelists()
5483 * Other parts of the kernel may not check if the zone is available.
5536 * i.e., the node of the first zone in the generic zonelist. in __build_all_zonelists()
5561 * each zone will be allocated later when the per cpu in build_all_zonelists_init()
5599 * more accurate, but expensive to check per-zone. This check is in build_all_zonelists()
5613 pr_info("Policy zone: %s\n", zone_names[policy_zone]); in build_all_zonelists()
5617 static int zone_batchsize(struct zone *zone) in zone_batchsize() argument
5624 * of the zone or 1MB, whichever is smaller. The batch in zone_batchsize()
5626 * and zone lock contention. in zone_batchsize()
5628 batch = min(zone_managed_pages(zone) >> 10, SZ_1M / PAGE_SIZE); in zone_batchsize()
5666 static int zone_highsize(struct zone *zone, int batch, int cpu_online, in zone_highsize() argument
5676 * By default, the high value of the pcp is based on the zone in zone_highsize()
5680 total_pages = low_wmark_pages(zone); in zone_highsize()
5685 * zone. in zone_highsize()
5687 total_pages = zone_managed_pages(zone) / high_fraction; in zone_highsize()
5691 * Split the high value across all online CPUs local to the zone. Note in zone_highsize()
5698 nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online; in zone_highsize()
5763 static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min, in __zone_set_pageset_high_and_batch() argument
5770 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in __zone_set_pageset_high_and_batch()
5777 * zone based on the zone's size.
5779 static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online) in zone_set_pageset_high_and_batch() argument
5783 new_batch = max(1, zone_batchsize(zone)); in zone_set_pageset_high_and_batch()
5785 new_high_min = zone_highsize(zone, new_batch, cpu_online, in zone_set_pageset_high_and_batch()
5793 new_high_min = zone_highsize(zone, new_batch, cpu_online, 0); in zone_set_pageset_high_and_batch()
5794 new_high_max = zone_highsize(zone, new_batch, cpu_online, in zone_set_pageset_high_and_batch()
5798 if (zone->pageset_high_min == new_high_min && in zone_set_pageset_high_and_batch()
5799 zone->pageset_high_max == new_high_max && in zone_set_pageset_high_and_batch()
5800 zone->pageset_batch == new_batch) in zone_set_pageset_high_and_batch()
5803 zone->pageset_high_min = new_high_min; in zone_set_pageset_high_and_batch()
5804 zone->pageset_high_max = new_high_max; in zone_set_pageset_high_and_batch()
5805 zone->pageset_batch = new_batch; in zone_set_pageset_high_and_batch()
5807 __zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max, in zone_set_pageset_high_and_batch()
5811 void __meminit setup_zone_pageset(struct zone *zone) in setup_zone_pageset() argument
5817 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); in setup_zone_pageset()
5819 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); in setup_zone_pageset()
5824 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in setup_zone_pageset()
5825 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in setup_zone_pageset()
5829 zone_set_pageset_high_and_batch(zone, 0); in setup_zone_pageset()
5833 * The zone indicated has a new number of managed_pages; batch sizes and percpu
5836 static void zone_pcp_update(struct zone *zone, int cpu_online) in zone_pcp_update() argument
5839 zone_set_pageset_high_and_batch(zone, cpu_online); in zone_pcp_update()
5843 static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu) in zone_pcp_update_cacheinfo() argument
5848 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in zone_pcp_update_cacheinfo()
5854 * This can reduce zone lock contention without hurting in zone_pcp_update_cacheinfo()
5867 struct zone *zone; in setup_pcp_cacheinfo() local
5869 for_each_populated_zone(zone) in setup_pcp_cacheinfo()
5870 zone_pcp_update_cacheinfo(zone, cpu); in setup_pcp_cacheinfo()
5880 struct zone *zone; in setup_per_cpu_pageset() local
5883 for_each_populated_zone(zone) in setup_per_cpu_pageset()
5884 setup_zone_pageset(zone); in setup_per_cpu_pageset()
5905 __meminit void zone_pcp_init(struct zone *zone) in zone_pcp_init() argument
5912 zone->per_cpu_pageset = &boot_pageset; in zone_pcp_init()
5913 zone->per_cpu_zonestats = &boot_zonestats; in zone_pcp_init()
5914 zone->pageset_high_min = BOOT_PAGESET_HIGH; in zone_pcp_init()
5915 zone->pageset_high_max = BOOT_PAGESET_HIGH; in zone_pcp_init()
5916 zone->pageset_batch = BOOT_PAGESET_BATCH; in zone_pcp_init()
5918 if (populated_zone(zone)) in zone_pcp_init()
5919 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, in zone_pcp_init()
5920 zone->present_pages, zone_batchsize(zone)); in zone_pcp_init()
5981 struct zone *zone; in page_alloc_cpu_dead() local
6004 for_each_populated_zone(zone) in page_alloc_cpu_dead()
6005 zone_pcp_update(zone, 0); in page_alloc_cpu_dead()
6012 struct zone *zone; in page_alloc_cpu_online() local
6014 for_each_populated_zone(zone) in page_alloc_cpu_online()
6015 zone_pcp_update(zone, 1); in page_alloc_cpu_online()
6045 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages() local
6047 unsigned long managed_pages = zone_managed_pages(zone); in calculate_totalreserve_pages()
6049 /* Find valid and maximum lowmem_reserve in the zone */ in calculate_totalreserve_pages()
6051 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
6052 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
6056 max += high_wmark_pages(zone); in calculate_totalreserve_pages()
6072 * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
6074 * pages are left in the zone after a successful __alloc_pages().
6083 struct zone *zone = &pgdat->node_zones[i]; in setup_per_zone_lowmem_reserve() local
6085 bool clear = !ratio || !zone_managed_pages(zone); in setup_per_zone_lowmem_reserve()
6089 struct zone *upper_zone = &pgdat->node_zones[j]; in setup_per_zone_lowmem_reserve()
6094 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
6096 zone->lowmem_reserve[j] = managed_pages / ratio; in setup_per_zone_lowmem_reserve()
6097 trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone, in setup_per_zone_lowmem_reserve()
6098 zone->lowmem_reserve[j]); in setup_per_zone_lowmem_reserve()
6111 struct zone *zone; in __setup_per_zone_wmarks() local
6115 for_each_zone(zone) { in __setup_per_zone_wmarks()
6116 if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE) in __setup_per_zone_wmarks()
6117 lowmem_pages += zone_managed_pages(zone); in __setup_per_zone_wmarks()
6120 for_each_zone(zone) { in __setup_per_zone_wmarks()
6123 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
6124 tmp = (u64)pages_min * zone_managed_pages(zone); in __setup_per_zone_wmarks()
6126 if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) { in __setup_per_zone_wmarks()
6138 min_pages = zone_managed_pages(zone) / 1024; in __setup_per_zone_wmarks()
6140 zone->_watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
6143 * If it's a lowmem zone, reserve a number of pages in __setup_per_zone_wmarks()
6144 * proportionate to the zone's size. in __setup_per_zone_wmarks()
6146 zone->_watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
6155 mult_frac(zone_managed_pages(zone), in __setup_per_zone_wmarks()
6158 zone->watermark_boost = 0; in __setup_per_zone_wmarks()
6159 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
6160 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
6161 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
6162 trace_mm_setup_per_zone_wmarks(zone); in __setup_per_zone_wmarks()
6164 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
6175 * Ensures that the watermark[min,low,high] values for each zone are set
6180 struct zone *zone; in setup_per_zone_wmarks() local
6191 for_each_zone(zone) in setup_per_zone_wmarks()
6192 zone_pcp_update(zone, 0); in setup_per_zone_wmarks()
6293 struct zone *zone; in setup_min_unmapped_ratio() local
6298 for_each_zone(zone) in setup_min_unmapped_ratio()
6299 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * in setup_min_unmapped_ratio()
6321 struct zone *zone; in setup_min_slab_ratio() local
6326 for_each_zone(zone) in setup_min_slab_ratio()
6327 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * in setup_min_slab_ratio()
6353 * if in function of the boot time zone sizes.
6372 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
6373 * cpu. It is the fraction of total pages in each zone that a hot per cpu
6379 struct zone *zone; in percpu_pagelist_high_fraction_sysctl_handler() local
6402 for_each_populated_zone(zone) in percpu_pagelist_high_fraction_sysctl_handler()
6403 zone_set_pageset_high_and_batch(zone, 0); in percpu_pagelist_high_fraction_sysctl_handler()
6509 * [start, end) must belong to a single zone.
6522 .nid = zone_to_nid(cc->zone), in __alloc_contig_migrate_range()
6551 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
6661 * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some
6666 * belong to a single zone.
6685 .zone = page_zone(pfn_to_page(start)), in alloc_contig_range_noprof()
6722 drain_all_pages(cc.zone); in alloc_contig_range_noprof()
6762 * We don't have to hold zone->lock here because the pages are in alloc_contig_range_noprof()
6815 static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, in pfn_range_valid_contig()
6838 static bool zone_spans_last_pfn(const struct zone *zone, in zone_spans_last_pfn() argument
6843 return zone_spans_pfn(zone, last_pfn); in zone_spans_last_pfn()
6849 * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some
6874 struct zone *zone; in alloc_contig_pages_noprof() local
6878 for_each_zone_zonelist_nodemask(zone, z, zonelist, in alloc_contig_pages_noprof()
6880 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages_noprof()
6882 pfn = ALIGN(zone->zone_start_pfn, nr_pages); in alloc_contig_pages_noprof()
6883 while (zone_spans_last_pfn(zone, pfn, nr_pages)) { in alloc_contig_pages_noprof()
6884 if (pfn_range_valid_contig(zone, pfn, nr_pages)) { in alloc_contig_pages_noprof()
6886 * We release the zone lock here because in alloc_contig_pages_noprof()
6887 * alloc_contig_range() will also lock the zone in alloc_contig_pages_noprof()
6892 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages_noprof()
6897 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages_noprof()
6901 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages_noprof()
6934 * Effectively disable pcplists for the zone by setting the high limit to 0
6941 void zone_pcp_disable(struct zone *zone) in zone_pcp_disable() argument
6944 __zone_set_pageset_high_and_batch(zone, 0, 0, 1); in zone_pcp_disable()
6945 __drain_all_pages(zone, true); in zone_pcp_disable()
6948 void zone_pcp_enable(struct zone *zone) in zone_pcp_enable() argument
6950 __zone_set_pageset_high_and_batch(zone, zone->pageset_high_min, in zone_pcp_enable()
6951 zone->pageset_high_max, zone->pageset_batch); in zone_pcp_enable()
6955 void zone_pcp_reset(struct zone *zone) in zone_pcp_reset() argument
6960 if (zone->per_cpu_pageset != &boot_pageset) { in zone_pcp_reset()
6962 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in zone_pcp_reset()
6963 drain_zonestat(zone, pzstats); in zone_pcp_reset()
6965 free_percpu(zone->per_cpu_pageset); in zone_pcp_reset()
6966 zone->per_cpu_pageset = &boot_pageset; in zone_pcp_reset()
6967 if (zone->per_cpu_zonestats != &boot_zonestats) { in zone_pcp_reset()
6968 free_percpu(zone->per_cpu_zonestats); in zone_pcp_reset()
6969 zone->per_cpu_zonestats = &boot_zonestats; in zone_pcp_reset()
6976 * All pages in the range must be in a single zone, must not contain holes,
6989 struct zone *zone; in __offline_isolated_pages() local
6993 zone = page_zone(pfn_to_page(pfn)); in __offline_isolated_pages()
6994 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
7021 del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE); in __offline_isolated_pages()
7024 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
7031 * This function returns a stable result only if called under zone lock.
7051 static inline void add_to_free_list(struct page *page, struct zone *zone, in add_to_free_list() argument
7055 __add_to_free_list(page, zone, order, migratetype, tail); in add_to_free_list()
7056 account_freepages(zone, 1 << order, migratetype); in add_to_free_list()
7063 static void break_down_buddy_pages(struct zone *zone, struct page *page, in break_down_buddy_pages() argument
7081 if (set_page_guard(zone, current_buddy, high)) in break_down_buddy_pages()
7084 add_to_free_list(current_buddy, zone, high, migratetype, false); in break_down_buddy_pages()
7094 struct zone *zone = page_zone(page); in take_page_off_buddy() local
7100 spin_lock_irqsave(&zone->lock, flags); in take_page_off_buddy()
7110 del_page_from_free_list(page_head, zone, page_order, in take_page_off_buddy()
7112 break_down_buddy_pages(zone, page_head, page, 0, in take_page_off_buddy()
7121 spin_unlock_irqrestore(&zone->lock, flags); in take_page_off_buddy()
7130 struct zone *zone = page_zone(page); in put_page_back_buddy() local
7134 spin_lock_irqsave(&zone->lock, flags); in put_page_back_buddy()
7140 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); in put_page_back_buddy()
7145 spin_unlock_irqrestore(&zone->lock, flags); in put_page_back_buddy()
7157 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; in has_managed_dma() local
7159 if (managed_zone(zone)) in has_managed_dma()
7191 static void __accept_page(struct zone *zone, unsigned long *flags, in __accept_page() argument
7195 account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); in __accept_page()
7196 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); in __accept_page()
7198 spin_unlock_irqrestore(&zone->lock, *flags); in __accept_page()
7207 struct zone *zone = page_zone(page); in accept_page() local
7210 spin_lock_irqsave(&zone->lock, flags); in accept_page()
7212 spin_unlock_irqrestore(&zone->lock, flags); in accept_page()
7216 /* Unlocks zone->lock */ in accept_page()
7217 __accept_page(zone, &flags, page); in accept_page()
7220 static bool try_to_accept_memory_one(struct zone *zone) in try_to_accept_memory_one() argument
7225 spin_lock_irqsave(&zone->lock, flags); in try_to_accept_memory_one()
7226 page = list_first_entry_or_null(&zone->unaccepted_pages, in try_to_accept_memory_one()
7229 spin_unlock_irqrestore(&zone->lock, flags); in try_to_accept_memory_one()
7233 /* Unlocks zone->lock */ in try_to_accept_memory_one()
7234 __accept_page(zone, &flags, page); in try_to_accept_memory_one()
7239 static bool cond_accept_memory(struct zone *zone, unsigned int order, in cond_accept_memory() argument
7245 if (list_empty(&zone->unaccepted_pages)) in cond_accept_memory()
7252 wmark = promo_wmark_pages(zone); in cond_accept_memory()
7260 return try_to_accept_memory_one(zone); in cond_accept_memory()
7264 (zone_page_state(zone, NR_FREE_PAGES) - in cond_accept_memory()
7265 __zone_watermark_unusable_free(zone, order, 0) - in cond_accept_memory()
7266 zone_page_state(zone, NR_UNACCEPTED)); in cond_accept_memory()
7269 if (!try_to_accept_memory_one(zone)) in cond_accept_memory()
7280 struct zone *zone = page_zone(page); in __free_unaccepted() local
7286 spin_lock_irqsave(&zone->lock, flags); in __free_unaccepted()
7287 list_add_tail(&page->lru, &zone->unaccepted_pages); in __free_unaccepted()
7288 account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); in __free_unaccepted()
7289 __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); in __free_unaccepted()
7291 spin_unlock_irqrestore(&zone->lock, flags); in __free_unaccepted()
7303 static bool cond_accept_memory(struct zone *zone, unsigned int order, in cond_accept_memory() argument
7388 * If it's empty attempt to spin_trylock zone->lock. in alloc_pages_nolock_noprof()