Lines Matching full:zone
158 * specifying a zone (MMOP_ONLINE)
160 * "contig-zones": keep zone contiguous
433 static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, in find_smallest_section_pfn() argument
444 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn()
454 static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, in find_biggest_section_pfn() argument
469 if (zone != page_zone(pfn_to_page(pfn))) in find_biggest_section_pfn()
478 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument
482 int nid = zone_to_nid(zone); in shrink_zone_span()
484 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span()
486 * If the section is smallest section in the zone, it need in shrink_zone_span()
487 * shrink zone->zone_start_pfn and zone->zone_spanned_pages. in shrink_zone_span()
489 * for shrinking zone. in shrink_zone_span()
491 pfn = find_smallest_section_pfn(nid, zone, end_pfn, in shrink_zone_span()
492 zone_end_pfn(zone)); in shrink_zone_span()
494 zone->spanned_pages = zone_end_pfn(zone) - pfn; in shrink_zone_span()
495 zone->zone_start_pfn = pfn; in shrink_zone_span()
497 zone->zone_start_pfn = 0; in shrink_zone_span()
498 zone->spanned_pages = 0; in shrink_zone_span()
500 } else if (zone_end_pfn(zone) == end_pfn) { in shrink_zone_span()
502 * If the section is biggest section in the zone, it need in shrink_zone_span()
503 * shrink zone->spanned_pages. in shrink_zone_span()
505 * shrinking zone. in shrink_zone_span()
507 pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn, in shrink_zone_span()
510 zone->spanned_pages = pfn - zone->zone_start_pfn + 1; in shrink_zone_span()
512 zone->zone_start_pfn = 0; in shrink_zone_span()
513 zone->spanned_pages = 0; in shrink_zone_span()
521 struct zone *zone; in update_pgdat_span() local
523 for (zone = pgdat->node_zones; in update_pgdat_span()
524 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { in update_pgdat_span()
525 unsigned long end_pfn = zone_end_pfn(zone); in update_pgdat_span()
528 if (!zone->spanned_pages) in update_pgdat_span()
531 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span()
538 if (zone->zone_start_pfn < node_start_pfn) in update_pgdat_span()
539 node_start_pfn = zone->zone_start_pfn; in update_pgdat_span()
546 void remove_pfn_range_from_zone(struct zone *zone, in remove_pfn_range_from_zone() argument
551 struct pglist_data *pgdat = zone->zone_pgdat; in remove_pfn_range_from_zone()
566 * Zone shrinking code cannot properly deal with ZONE_DEVICE. So in remove_pfn_range_from_zone()
570 if (zone_is_zone_device(zone)) in remove_pfn_range_from_zone()
573 clear_zone_contiguous(zone); in remove_pfn_range_from_zone()
575 shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); in remove_pfn_range_from_zone()
578 set_zone_contiguous(zone); in remove_pfn_range_from_zone()
666 * zone ("present"). in online_pages_range()
704 struct zone *zone, struct memory_notify *arg) in node_states_check_changes_online() argument
706 int nid = zone_to_nid(zone); in node_states_check_changes_online()
713 if (zone_idx(zone) <= ZONE_NORMAL && !node_state(nid, N_NORMAL_MEMORY)) in node_states_check_changes_online()
726 static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn, in resize_zone_range() argument
729 unsigned long old_end_pfn = zone_end_pfn(zone); in resize_zone_range()
731 if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn) in resize_zone_range()
732 zone->zone_start_pfn = start_pfn; in resize_zone_range()
734 zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn; in resize_zone_range()
763 * Associate the pfn range with the given zone, initializing the memmaps
764 * and resizing the pgdat/zone data to span the added pages. After this
769 * zone stats (e.g., nr_isolate_pageblock) are touched.
771 void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, in move_pfn_range_to_zone() argument
775 struct pglist_data *pgdat = zone->zone_pgdat; in move_pfn_range_to_zone()
778 clear_zone_contiguous(zone); in move_pfn_range_to_zone()
780 if (zone_is_empty(zone)) in move_pfn_range_to_zone()
781 init_currently_empty_zone(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
782 resize_zone_range(zone, start_pfn, nr_pages); in move_pfn_range_to_zone()
791 if (zone_is_zone_device(zone)) { in move_pfn_range_to_zone()
800 * with their zone properly. Not nice but set_pfnblock_flags_mask in move_pfn_range_to_zone()
801 * expects the zone spans the pfn range. All the pages in the range in move_pfn_range_to_zone()
804 memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0, in move_pfn_range_to_zone()
807 set_zone_contiguous(zone); in move_pfn_range_to_zone()
816 struct zone *zone) in auto_movable_stats_account_zone() argument
818 if (zone_idx(zone) == ZONE_MOVABLE) { in auto_movable_stats_account_zone()
819 stats->movable_pages += zone->present_pages; in auto_movable_stats_account_zone()
821 stats->kernel_early_pages += zone->present_early_pages; in auto_movable_stats_account_zone()
827 stats->movable_pages += zone->cma_pages; in auto_movable_stats_account_zone()
828 stats->kernel_early_pages -= zone->cma_pages; in auto_movable_stats_account_zone()
853 * satisfy the configured zone ratio. in auto_movable_stats_account_group()
870 struct zone *zone; in auto_movable_can_online_movable() local
876 for_each_populated_zone(zone) in auto_movable_can_online_movable()
877 auto_movable_stats_account_zone(&stats, zone); in auto_movable_can_online_movable()
882 zone = pgdat->node_zones + i; in auto_movable_can_online_movable()
883 if (populated_zone(zone)) in auto_movable_can_online_movable()
884 auto_movable_stats_account_zone(&stats, zone); in auto_movable_can_online_movable()
915 * Returns a default kernel memory zone for the given pfn range.
916 * If no kernel zone covers this pfn range it will automatically go
919 static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn, in default_kernel_zone_for_pfn()
926 struct zone *zone = &pgdat->node_zones[zid]; in default_kernel_zone_for_pfn() local
928 if (zone_intersects(zone, start_pfn, nr_pages)) in default_kernel_zone_for_pfn()
929 return zone; in default_kernel_zone_for_pfn()
936 * Determine to which zone to online memory dynamically based on user
945 * We don't allow for hotplugged memory in a KERNEL zone to increase the
956 * hotunplugging, as implemented in hypervisors, could result in zone
985 static struct zone *auto_movable_zone_for_pfn(int nid, in auto_movable_zone_for_pfn()
1010 * to the same zone, because dynamic memory groups only deal in auto_movable_zone_for_pfn()
1046 static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn, in default_zone_for_pfn()
1049 struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn, in default_zone_for_pfn()
1051 struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in default_zone_for_pfn()
1056 * We inherit the existing zone in a simple case where zones do not in default_zone_for_pfn()
1063 * If the range doesn't belong to any zone or two zones overlap in the in default_zone_for_pfn()
1064 * given range then we use movable zone only if movable_node is in default_zone_for_pfn()
1065 * enabled because we always online to a kernel zone by default. in default_zone_for_pfn()
1070 struct zone *zone_for_pfn_range(int online_type, int nid, in zone_for_pfn_range()
1093 struct zone *zone = page_zone(page); in adjust_present_page_count() local
1094 const bool movable = zone_idx(zone) == ZONE_MOVABLE; in adjust_present_page_count()
1101 zone->present_early_pages += nr_pages; in adjust_present_page_count()
1102 zone->present_pages += nr_pages; in adjust_present_page_count()
1103 zone->zone_pgdat->node_present_pages += nr_pages; in adjust_present_page_count()
1112 struct zone *zone, bool mhp_off_inaccessible) in mhp_init_memmap_on_memory() argument
1130 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE); in mhp_init_memmap_on_memory()
1174 struct zone *zone, struct memory_group *group) in online_pages() argument
1178 const int nid = zone_to_nid(zone); in online_pages()
1194 /* associate pfn range with the zone */ in online_pages()
1195 move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE); in online_pages()
1199 node_states_check_changes_online(nr_pages, zone, &arg); in online_pages()
1210 spin_lock_irqsave(&zone->lock, flags); in online_pages()
1211 zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages; in online_pages()
1212 spin_unlock_irqrestore(&zone->lock, flags); in online_pages()
1215 * If this zone is not populated, then it is not in zonelist. in online_pages()
1216 * This means the page allocator ignores this zone. in online_pages()
1219 if (!populated_zone(zone)) { in online_pages()
1221 setup_zone_pageset(zone); in online_pages()
1237 * zone to make sure the just onlined pages are properly distributed in online_pages()
1240 shuffle_zone(zone); in online_pages()
1258 remove_pfn_range_from_zone(zone, pfn, nr_pages); in online_pages()
1279 * The node we allocated has no zone fallback lists. For avoiding in hotadd_init_pgdat()
1853 * We have checked that migration range is on a single zone so in do_migrate_range()
1891 struct zone *zone, struct memory_notify *arg) in node_states_check_changes_offline() argument
1893 struct pglist_data *pgdat = zone->zone_pgdat; in node_states_check_changes_offline()
1910 if (zone_idx(zone) <= ZONE_NORMAL && nr_pages >= present_pages) in node_states_check_changes_offline()
1911 arg->status_change_nid_normal = zone_to_nid(zone); in node_states_check_changes_offline()
1925 arg->status_change_nid = zone_to_nid(zone); in node_states_check_changes_offline()
1950 struct zone *zone, struct memory_group *group) in offline_pages() argument
1954 const int node = zone_to_nid(zone); in offline_pages()
1988 * We only support offlining of memory blocks managed by a single zone, in offline_pages()
1992 if (WARN_ON_ONCE(page_zone(pfn_to_page(start_pfn)) != zone || in offline_pages()
1993 page_zone(pfn_to_page(end_pfn - 1)) != zone)) { in offline_pages()
2003 zone_pcp_disable(zone); in offline_pages()
2017 node_states_check_changes_offline(nr_pages, zone, &arg); in offline_pages()
2081 spin_lock_irqsave(&zone->lock, flags); in offline_pages()
2082 zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages; in offline_pages()
2083 spin_unlock_irqrestore(&zone->lock, flags); in offline_pages()
2086 zone_pcp_enable(zone); in offline_pages()
2096 * Make sure to mark the node as memory-less before rebuilding the zone in offline_pages()
2100 if (!populated_zone(zone)) { in offline_pages()
2101 zone_pcp_reset(zone); in offline_pages()
2113 remove_pfn_range_from_zone(zone, start_pfn, nr_pages); in offline_pages()
2122 zone_pcp_enable(zone); in offline_pages()
2346 * Sense the online_type via the zone of the memory block. Offlining in try_offline_memory_block()