/linux/mm/ |
H A D | mmzone.c | 34 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone() 39 zone = pgdat->node_zones; in next_zone()
|
H A D | shuffle.c | 157 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in __shuffle_free_memory()
|
H A D | memory_hotplug.c | 524 for (zone = pgdat->node_zones; in update_pgdat_span() 525 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { in update_pgdat_span() 861 zone = pgdat->node_zones + i; in auto_movable_stats_account_group() 905 struct zone *zone = &pgdat->node_zones[zid]; in auto_movable_can_online_movable() 911 return &pgdat->node_zones[ZONE_NORMAL]; in auto_movable_can_online_movable() 1020 return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in auto_movable_zone_for_pfn() 1030 struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in auto_movable_zone_for_pfn() 1057 return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in default_zone_for_pfn()
|
H A D | mm_init.c | 80 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist() 680 struct zone *zone = &pgdat->node_zones[zid]; in __init_page_from_nid() 979 struct zone *zone = node->node_zones + j; in memmap_init() 1302 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { in reset_memoryless_node_totalpages() 1348 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages() 1589 struct zone *zone = pgdat->node_zones + z; in free_area_init_core_hotplug() 1606 struct zone *zone = pgdat->node_zones + j; in free_area_init_core() 1758 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory() 2200 zone = pgdat->node_zones in deferred_init_memmap() [all...] |
H A D | page_owner.c | 833 struct zone *node_zones = pgdat->node_zones; in init_zones_in_node() local 835 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { in init_zones_in_node()
|
H A D | vmstat.c | 329 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold() 992 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_node_page_state() 1006 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_numa_event_state() 1538 struct zone *node_zones = pgdat->node_zones; in walk_zones_in_node() local 1541 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { in walk_zones_in_node() 1750 struct zone *compare = &pgdat->node_zones[zid]; in is_zone_first_populated()
|
H A D | show_mem.c | 98 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node() 166 if (zone_managed_pages(pgdat->node_zones + zone_idx)) in node_has_managed_zones()
|
H A D | memremap.c | 228 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; in pagemap_range() 242 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], in pagemap_range()
|
H A D | compaction.c | 407 struct zone *zone = &pgdat->node_zones[zoneid]; in __reset_isolation_suitable() 2205 zone = &pgdat->node_zones[zoneid]; in fragmentation_score_zone() 2907 zone = &pgdat->node_zones[zoneid]; in try_to_compact_pages() 3039 zone = &pgdat->node_zones[zoneid]; in compact_store() 3081 zone = &pgdat->node_zones[zoneid]; in kcompactd_node_suitable()
|
H A D | migrate.c | 2609 struct zone *zone = pgdat->node_zones + z; in migrate_misplaced_folio_prepare() 2680 if (managed_zone(pgdat->node_zones + z)) 2691 wakeup_kswapd(pgdat->node_zones + z, 0,
|
H A D | page_alloc.c | 3691 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume in get_page_from_freelist() 5443 zone = pgdat->node_zones + zone_type; in local_memory_node() 6234 struct zone *zone = pgdat->node_zones + i; 6272 struct zone *zone = &pgdat->node_zones[i]; in postcore_initcall() 6278 struct zone *upper_zone = &pgdat->node_zones[j]; in watermark_scale_factor_sysctl_handler() 7321 struct zone *zone = &pgdat->node_zones[ZONE_DMA];
|
H A D | page-writeback.c | 258 struct zone *zone = pgdat->node_zones + z; in node_dirtyable_memory() 294 z = &NODE_DATA(node)->node_zones[i]; in highmem_dirtyable_memory()
|
H A D | memblock.c | 2382 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_managed_pages()
|
H A D | vmscan.c | 285 for ((idx) = 0, (zone) = (pgdat)->node_zones; \ 4883 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; in should_abort_scan() 6784 zone = pgdat->node_zones + i; in pgdat_watermark_boosted() 7052 zone = pgdat->node_zones + i; in balance_pgdat() 7184 zone = pgdat->node_zones + i; in balance_pgdat()
|
/linux/tools/testing/vma/linux/ |
H A D | mmzone.h | 34 struct zone node_zones[MAX_NR_ZONES]; member
|
/linux/tools/testing/memblock/linux/ |
H A D | mmzone.h | 34 struct zone node_zones[MAX_NR_ZONES]; member
|
/linux/tools/perf/util/bpf_skel/vmlinux/ |
H A D | vmlinux.h | 211 struct zone node_zones[6]; /* value for all possible config */ member
|
/linux/drivers/acpi/ |
H A D | acpi_mrrm.c | 43 struct zone *zone = NODE_DATA(nid)->node_zones + z; in get_node_num()
|
/linux/kernel/ |
H A D | vmcore_info.c | 179 VMCOREINFO_OFFSET(pglist_data, node_zones); in crash_save_vmcoreinfo_init()
|
/linux/include/linux/ |
H A D | mm_inline.h | 48 __mod_zone_page_state(&pgdat->node_zones[zid], in __update_lru_size()
|
H A D | mm.h | 1737 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; in page_zone()
|
/linux/tools/perf/util/bpf_skel/ |
H A D | lock_contention.bpf.c | 869 zone_off = offsetof(struct pglist_data, node_zones); in collect_zone_lock()
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_acpi.c | 873 zone_managed_pages(&pgdat->node_zones[zone_type]); in amdgpu_acpi_get_numa_size()
|
/linux/Documentation/admin-guide/kdump/ |
H A D | vmcoreinfo.rst | 150 (pglist_data, node_zones|nr_zones|node_mem_map|node_start_pfn|node_spanned_pages|node_id)
|
/linux/kernel/sched/ |
H A D | fair.c | 1818 struct zone *zone = pgdat->node_zones + z; in pgdat_free_space_enough()
|