Home
last modified time | relevance | path

Searched refs:pgdat (Results 1 – 25 of 37) sorted by relevance

12

/linux/mm/
H A Dmm_init.c68 pg_data_t *pgdat = NODE_DATA(nid); in mminit_verify_zonelist() local
79 zonelist = &pgdat->node_zonelists[listid]; in mminit_verify_zonelist()
80 zone = &pgdat->node_zones[zoneid]; in mminit_verify_zonelist()
674 pg_data_t *pgdat; in __init_page_from_nid() local
677 pgdat = NODE_DATA(nid); in __init_page_from_nid()
680 struct zone *zone = &pgdat->node_zones[zid]; in __init_page_from_nid()
693 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) in pgdat_set_deferred_range() argument
695 pgdat->first_deferred_pfn = ULONG_MAX; in pgdat_set_deferred_range()
756 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} in pgdat_set_deferred_range() argument
1117 struct pglist_data *pgdat = zone->zone_pgdat; in memmap_init_zone_device() local
[all …]
H A Dshow_mem.c95 pg_data_t *pgdat = NODE_DATA(nid); in si_meminfo_node() local
98 struct zone *zone = &pgdat->node_zones[zone_type]; in si_meminfo_node()
107 val->sharedram = node_page_state(pgdat, NR_SHMEM); in si_meminfo_node()
162 static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx) in node_has_managed_zones() argument
166 if (zone_managed_pages(pgdat->node_zones + zone_idx)) in node_has_managed_zones()
185 pg_data_t *pgdat; in show_free_areas() local
226 for_each_online_pgdat(pgdat) { in show_free_areas()
227 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) in show_free_areas()
229 if (!node_has_managed_zones(pgdat, max_zone_idx)) in show_free_areas()
258 pgdat->node_id, in show_free_areas()
[all …]
H A Dvmscan.c284 #define for_each_managed_zone_pgdat(zone, pgdat, idx, highidx) \ argument
285 for ((idx) = 0, (zone) = (pgdat)->node_zones; \
501 static bool skip_throttle_noprogress(pg_data_t *pgdat) in skip_throttle_noprogress() argument
510 if (atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES) in skip_throttle_noprogress()
518 for_each_managed_zone_pgdat(zone, pgdat, i, MAX_NR_ZONES - 1) { in skip_throttle_noprogress()
529 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason) in reclaim_throttle() argument
531 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; in reclaim_throttle()
560 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { in reclaim_throttle()
561 WRITE_ONCE(pgdat->nr_reclaim_start, in reclaim_throttle()
562 node_page_state(pgdat, NR_THROTTLED_WRITTEN)); in reclaim_throttle()
[all …]
H A Dvmstat.c277 struct pglist_data *pgdat; in refresh_zone_stat_thresholds() local
283 for_each_online_pgdat(pgdat) { in refresh_zone_stat_thresholds()
285 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0; in refresh_zone_stat_thresholds()
290 struct pglist_data *pgdat = zone->zone_pgdat; in refresh_zone_stat_thresholds() local
302 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold; in refresh_zone_stat_thresholds()
303 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold in refresh_zone_stat_thresholds()
320 void set_pgdat_percpu_threshold(pg_data_t *pgdat, in set_pgdat_percpu_threshold() argument
328 for (i = 0; i < pgdat->nr_zones; i++) { in set_pgdat_percpu_threshold()
329 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold()
376 void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, in __mod_node_page_state() argument
[all …]
H A Dcompaction.c402 void reset_isolation_suitable(pg_data_t *pgdat) in reset_isolation_suitable() argument
407 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable()
763 pg_data_t *pgdat = cc->zone->zone_pgdat; in too_many_isolated() local
768 inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + in too_many_isolated()
769 node_page_state(pgdat, NR_INACTIVE_ANON); in too_many_isolated()
770 active = node_page_state(pgdat, NR_ACTIVE_FILE) + in too_many_isolated()
771 node_page_state(pgdat, NR_ACTIVE_ANON); in too_many_isolated()
772 isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + in too_many_isolated()
773 node_page_state(pgdat, NR_ISOLATED_ANON); in too_many_isolated()
788 wake_throttle_isolated(pgdat); in too_many_isolated()
[all …]
H A Dmmzone.c18 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) in next_online_pgdat() argument
20 int nid = next_online_node(pgdat->node_id); in next_online_pgdat()
32 pg_data_t *pgdat = zone->zone_pgdat; in next_zone() local
34 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone()
37 pgdat = next_online_pgdat(pgdat); in next_zone()
38 if (pgdat) in next_zone()
39 zone = pgdat->node_zones; in next_zone()
H A Dworkingset.c199 static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, in pack_shadow() argument
204 eviction = (eviction << NODES_SHIFT) | pgdat->node_id; in pack_shadow()
210 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, in unpack_shadow() argument
225 *pgdat = NODE_DATA(nid); in unpack_shadow()
245 struct pglist_data *pgdat = folio_pgdat(folio); in lru_gen_eviction() local
249 lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_eviction()
257 return pack_shadow(mem_cgroup_id(memcg), pgdat, token, workingset); in lru_gen_eviction()
270 struct pglist_data *pgdat; in lru_gen_test_recent() local
272 unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset); in lru_gen_test_recent()
275 *lruvec = mem_cgroup_lruvec(memcg, pgdat); in lru_gen_test_recent()
[all …]
H A Dmemory-tiers.c262 pg_data_t *pgdat; in __node_get_memory_tier() local
264 pgdat = NODE_DATA(node); in __node_get_memory_tier()
265 if (!pgdat) in __node_get_memory_tier()
272 return rcu_dereference_check(pgdat->memtier, in __node_get_memory_tier()
280 pg_data_t *pgdat; in node_is_toptier() local
283 pgdat = NODE_DATA(node); in node_is_toptier()
284 if (!pgdat) in node_is_toptier()
288 memtier = rcu_dereference(pgdat->memtier); in node_is_toptier()
302 void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) in node_get_allowed_targets() argument
312 memtier = rcu_dereference(pgdat->memtier); in node_get_allowed_targets()
[all …]
H A Dshuffle.h11 extern void __shuffle_free_memory(pg_data_t *pgdat);
13 static inline void __meminit shuffle_free_memory(pg_data_t *pgdat) in shuffle_free_memory() argument
17 __shuffle_free_memory(pgdat); in shuffle_free_memory()
40 static inline void shuffle_free_memory(pg_data_t *pgdat) in shuffle_free_memory() argument
H A Dmemory_hotplug.c519 static void update_pgdat_span(struct pglist_data *pgdat) in update_pgdat_span() argument
524 for (zone = pgdat->node_zones; in update_pgdat_span()
525 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { in update_pgdat_span()
543 pgdat->node_start_pfn = node_start_pfn; in update_pgdat_span()
544 pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; in update_pgdat_span()
552 struct pglist_data *pgdat = zone->zone_pgdat; in remove_pfn_range_from_zone() local
577 update_pgdat_span(pgdat); in remove_pfn_range_from_zone()
714 static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, in resize_pgdat_range() argument
717 unsigned long old_end_pfn = pgdat_end_pfn(pgdat); in resize_pgdat_range()
719 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) in resize_pgdat_range()
[all …]
H A Dshuffle.c153 void __meminit __shuffle_free_memory(pg_data_t *pgdat) in __shuffle_free_memory() argument
157 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in __shuffle_free_memory()
H A Dnuma.c37 pg_data_t *pgdat; in alloc_offline_node_data() local
38 node_data[nid] = memblock_alloc_or_panic(sizeof(*pgdat), SMP_CACHE_BYTES); in alloc_offline_node_data()
H A Dpage_alloc.c2909 struct pglist_data *pgdat = zone->zone_pgdat; in free_frozen_page_commit() local
2919 if (atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES && in free_frozen_page_commit()
2920 next_memory_node(pgdat->node_id) < MAX_NUMNODES) in free_frozen_page_commit()
2921 atomic_set(&pgdat->kswapd_failures, 0); in free_frozen_page_commit()
5513 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) in build_zonerefs_node() argument
5521 zone = pgdat->node_zones + zone_type; in build_zonerefs_node()
5631 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, in build_zonelists_in_node_order() argument
5637 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; in build_zonelists_in_node_order()
5654 static void build_thisnode_zonelists(pg_data_t *pgdat) in build_thisnode_zonelists() argument
5659 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; in build_thisnode_zonelists()
[all …]
H A Dinternal.h409 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
413 pg_data_t *pgdat = folio_pgdat(folio); in acct_reclaim_writeback() local
414 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled); in acct_reclaim_writeback()
417 __acct_reclaim_writeback(pgdat, folio, nr_throttled); in acct_reclaim_writeback()
420 static inline void wake_throttle_isolated(pg_data_t *pgdat) in wake_throttle_isolated() argument
424 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED]; in wake_throttle_isolated()
540 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
543 struct mem_cgroup *memcg, pg_data_t *pgdat);
546 struct mem_cgroup *memcg, pg_data_t *pgdat) in user_proactive_reclaim() argument
1212 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, in node_reclaim() argument
H A Dmemcontrol.c784 pg_data_t *pgdat = folio_pgdat(folio); in lruvec_stat_mod_folio() local
792 mod_node_page_state(pgdat, idx, val); in lruvec_stat_mod_folio()
796 lruvec = mem_cgroup_lruvec(memcg, pgdat); in lruvec_stat_mod_folio()
804 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); in mod_lruvec_kmem_state() local
818 mod_node_page_state(pgdat, idx, val); in mod_lruvec_kmem_state()
820 lruvec = mem_cgroup_lruvec(memcg, pgdat); in mod_lruvec_kmem_state()
1024 int nid = reclaim->pgdat->node_id; in mem_cgroup_iter()
2548 struct pglist_data *pgdat, in account_slab_nmi_safe() argument
2554 lruvec = mem_cgroup_lruvec(memcg, pgdat); in account_slab_nmi_safe()
2557 struct mem_cgroup_per_node *pn = memcg->nodeinfo[pgdat->node_id]; in account_slab_nmi_safe()
[all …]
/linux/tools/testing/vma/linux/
H A Dmmzone.h9 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
11 #define for_each_online_pgdat(pgdat) \ argument
12 for (pgdat = first_online_pgdat(); \
13 pgdat; \
14 pgdat = next_online_pgdat(pgdat))
/linux/tools/testing/memblock/linux/
H A Dmmzone.h9 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
11 #define for_each_online_pgdat(pgdat) \ argument
12 for (pgdat = first_online_pgdat(); \
13 pgdat; \
14 pgdat = next_online_pgdat(pgdat))
/linux/drivers/base/
H A Dnode.c446 struct pglist_data *pgdat = NODE_DATA(nid); in node_read_meminfo() local
452 sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B); in node_read_meminfo()
453 sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B); in node_read_meminfo()
455 swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE); in node_read_meminfo()
474 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) + in node_read_meminfo()
475 node_page_state(pgdat, NR_ACTIVE_FILE)), in node_read_meminfo()
476 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) + in node_read_meminfo()
477 node_page_state(pgdat, NR_INACTIVE_FILE)), in node_read_meminfo()
478 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)), in node_read_meminfo()
479 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)), in node_read_meminfo()
[all …]
/linux/include/linux/
H A Dvmstat.h169 static inline void node_page_state_add(long x, struct pglist_data *pgdat, in node_page_state_add() argument
172 atomic_long_add(x, &pgdat->vm_stat[item]); in node_page_state_add()
258 extern unsigned long node_page_state(struct pglist_data *pgdat,
260 extern unsigned long node_page_state_pages(struct pglist_data *pgdat,
304 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
318 static inline void __mod_node_page_state(struct pglist_data *pgdat, in __mod_node_page_state() argument
332 node_page_state_add(delta, pgdat, item); in __mod_node_page_state()
341 static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) in __inc_node_state() argument
343 atomic_long_inc(&pgdat->vm_stat[item]); in __inc_node_state()
353 static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) in __dec_node_state() argument
[all …]
H A Dcompaction.h96 extern void reset_isolation_suitable(pg_data_t *pgdat);
108 extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx);
111 static inline void reset_isolation_suitable(pg_data_t *pgdat) in reset_isolation_suitable() argument
129 static inline void wakeup_kcompactd(pg_data_t *pgdat, in wakeup_kcompactd() argument
H A Dmemcontrol.h60 pg_data_t *pgdat; member
707 struct pglist_data *pgdat) in mem_cgroup_lruvec() argument
713 lruvec = &pgdat->__lruvec; in mem_cgroup_lruvec()
720 mz = memcg->nodeinfo[pgdat->node_id]; in mem_cgroup_lruvec()
728 if (unlikely(lruvec->pgdat != pgdat)) in mem_cgroup_lruvec()
729 lruvec->pgdat = pgdat; in mem_cgroup_lruvec()
1167 struct pglist_data *pgdat) in mem_cgroup_lruvec() argument
1169 return &pgdat->__lruvec; in mem_cgroup_lruvec()
1174 struct pglist_data *pgdat = folio_pgdat(folio); in folio_lruvec() local
1175 return &pgdat->__lruvec; in folio_lruvec()
[all …]
H A Dmmzone.h617 void lru_gen_init_pgdat(struct pglist_data *pgdat);
630 static inline void lru_gen_init_pgdat(struct pglist_data *pgdat) in lru_gen_init_pgdat() argument
695 struct pglist_data *pgdat; member
1529 static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) in pgdat_end_pfn() argument
1531 return pgdat->node_start_pfn + pgdat->node_spanned_pages; in pgdat_end_pfn()
1536 void build_all_zonelists(pg_data_t *pgdat);
1562 return lruvec->pgdat; in lruvec_pgdat()
1676 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
1683 #define for_each_online_pgdat(pgdat) \ argument
1684 for (pgdat = first_online_pgdat(); \
[all …]
/linux/mm/damon/
H A Dops-common.c301 struct list_head *migrate_folios, struct pglist_data *pgdat, in __damon_migrate_folio_list() argument
316 if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE) in __damon_migrate_folio_list()
331 struct pglist_data *pgdat, in damon_migrate_folio_list() argument
361 &migrate_folios, pgdat, target_nid); in damon_migrate_folio_list()
/linux/tools/perf/util/bpf_skel/
H A Dlock_contention.bpf.c893 struct pglist_data *pgdat = NULL; in collect_zone_lock() local
896 err = bpf_core_read(&pgdat, sizeof(pgdat), &node_data[i]); in collect_zone_lock()
897 if (err < 0 || pgdat == NULL) in collect_zone_lock()
900 nr_zones = BPF_CORE_READ(pgdat, nr_zones); in collect_zone_lock()
907 zone_addr = (__u64)(void *)pgdat + (sizeof_zone * k) + zone_off; in collect_zone_lock()
/linux/tools/testing/memblock/
H A Dmmzone.c9 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) in next_online_pgdat() argument

12