Home
last modified time | relevance | path

Searched refs:node_zones (Results 1 – 25 of 25) sorted by relevance

/linux/mm/
H A Dmmzone.c34 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone()
39 zone = pgdat->node_zones; in next_zone()
H A Dshuffle.c157 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in __shuffle_free_memory()
H A Dmemory_hotplug.c524 for (zone = pgdat->node_zones; in update_pgdat_span()
525 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { in update_pgdat_span()
861 zone = pgdat->node_zones + i; in auto_movable_stats_account_group()
905 struct zone *zone = &pgdat->node_zones[zid]; in auto_movable_can_online_movable()
911 return &pgdat->node_zones[ZONE_NORMAL]; in auto_movable_can_online_movable()
1020 return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in auto_movable_zone_for_pfn()
1030 struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in auto_movable_zone_for_pfn()
1057 return &NODE_DATA(nid)->node_zones[ZONE_MOVABLE]; in default_zone_for_pfn()
H A Dmm_init.c80 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist()
680 struct zone *zone = &pgdat->node_zones[zid]; in __init_page_from_nid()
979 struct zone *zone = node->node_zones + j; in memmap_init()
1302 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { in reset_memoryless_node_totalpages()
1348 struct zone *zone = pgdat->node_zones + i; in calculate_node_totalpages()
1589 struct zone *zone = pgdat->node_zones + z; in free_area_init_core_hotplug()
1606 struct zone *zone = pgdat->node_zones + j; in free_area_init_core()
1758 struct zone *zone = &pgdat->node_zones[zone_type]; in check_for_memory()
2200 zone = pgdat->node_zones in deferred_init_memmap()
[all...]
H A Dpage_owner.c833 struct zone *node_zones = pgdat->node_zones; in init_zones_in_node() local
835 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { in init_zones_in_node()
H A Dvmstat.c329 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold()
992 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_node_page_state()
1006 struct zone *zones = NODE_DATA(node)->node_zones; in sum_zone_numa_event_state()
1538 struct zone *node_zones = pgdat->node_zones; in walk_zones_in_node() local
1541 for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) { in walk_zones_in_node()
1750 struct zone *compare = &pgdat->node_zones[zid]; in is_zone_first_populated()
H A Dshow_mem.c98 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node()
166 if (zone_managed_pages(pgdat->node_zones + zone_idx)) in node_has_managed_zones()
H A Dmemremap.c228 zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE]; in pagemap_range()
242 memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE], in pagemap_range()
H A Dcompaction.c407 struct zone *zone = &pgdat->node_zones[zoneid]; in __reset_isolation_suitable()
2205 zone = &pgdat->node_zones[zoneid]; in fragmentation_score_zone()
2907 zone = &pgdat->node_zones[zoneid]; in try_to_compact_pages()
3039 zone = &pgdat->node_zones[zoneid]; in compact_store()
3081 zone = &pgdat->node_zones[zoneid]; in kcompactd_node_suitable()
H A Dmigrate.c2609 struct zone *zone = pgdat->node_zones + z; in migrate_misplaced_folio_prepare()
2680 if (managed_zone(pgdat->node_zones + z))
2691 wakeup_kswapd(pgdat->node_zones + z, 0,
H A Dpage_alloc.c3691 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume in get_page_from_freelist()
5443 zone = pgdat->node_zones + zone_type; in local_memory_node()
6234 struct zone *zone = pgdat->node_zones + i;
6272 struct zone *zone = &pgdat->node_zones[i]; in postcore_initcall()
6278 struct zone *upper_zone = &pgdat->node_zones[j]; in watermark_scale_factor_sysctl_handler()
7321 struct zone *zone = &pgdat->node_zones[ZONE_DMA];
H A Dpage-writeback.c258 struct zone *zone = pgdat->node_zones + z; in node_dirtyable_memory()
294 z = &NODE_DATA(node)->node_zones[i]; in highmem_dirtyable_memory()
H A Dmemblock.c2382 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in reset_node_managed_pages()
H A Dvmscan.c285 for ((idx) = 0, (zone) = (pgdat)->node_zones; \
4883 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; in should_abort_scan()
6784 zone = pgdat->node_zones + i; in pgdat_watermark_boosted()
7052 zone = pgdat->node_zones + i; in balance_pgdat()
7184 zone = pgdat->node_zones + i; in balance_pgdat()
/linux/tools/testing/vma/linux/
H A Dmmzone.h34 struct zone node_zones[MAX_NR_ZONES]; member
/linux/tools/testing/memblock/linux/
H A Dmmzone.h34 struct zone node_zones[MAX_NR_ZONES]; member
/linux/tools/perf/util/bpf_skel/vmlinux/
H A Dvmlinux.h211 struct zone node_zones[6]; /* value for all possible config */ member
/linux/drivers/acpi/
H A Dacpi_mrrm.c43 struct zone *zone = NODE_DATA(nid)->node_zones + z; in get_node_num()
/linux/kernel/
H A Dvmcore_info.c179 VMCOREINFO_OFFSET(pglist_data, node_zones); in crash_save_vmcoreinfo_init()
/linux/include/linux/
H A Dmm_inline.h48 __mod_zone_page_state(&pgdat->node_zones[zid], in __update_lru_size()
H A Dmm.h1737 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; in page_zone()
/linux/tools/perf/util/bpf_skel/
H A Dlock_contention.bpf.c869 zone_off = offsetof(struct pglist_data, node_zones); in collect_zone_lock()
/linux/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_acpi.c873 zone_managed_pages(&pgdat->node_zones[zone_type]); in amdgpu_acpi_get_numa_size()
/linux/Documentation/admin-guide/kdump/
H A Dvmcoreinfo.rst150 (pglist_data, node_zones|nr_zones|node_mem_map|node_start_pfn|node_spanned_pages|node_id)
/linux/kernel/sched/
H A Dfair.c1818 struct zone *zone = pgdat->node_zones + z; in pgdat_free_space_enough()