/linux/mm/ |
H A D | vmscan.c | 494 static bool skip_throttle_noprogress(pg_data_t *pgdat) in skip_throttle_noprogress() argument 503 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in skip_throttle_noprogress() 512 struct zone *zone = pgdat->node_zones + i; in skip_throttle_noprogress() 527 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason) in reclaim_throttle() argument 529 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; in reclaim_throttle() 558 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { in reclaim_throttle() 559 WRITE_ONCE(pgdat->nr_reclaim_start, in reclaim_throttle() 560 node_page_state(pgdat, NR_THROTTLED_WRITTEN)); in reclaim_throttle() 567 if (skip_throttle_noprogress(pgdat)) { in reclaim_throttle() 589 atomic_dec(&pgdat->nr_writeback_throttled); in reclaim_throttle() [all …]
|
H A D | vmstat.c | 274 struct pglist_data *pgdat; in refresh_zone_stat_thresholds() local 280 for_each_online_pgdat(pgdat) { in refresh_zone_stat_thresholds() 282 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold = 0; in refresh_zone_stat_thresholds() 287 struct pglist_data *pgdat = zone->zone_pgdat; in refresh_zone_stat_thresholds() local 299 pgdat_threshold = per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold; in refresh_zone_stat_thresholds() 300 per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->stat_threshold in refresh_zone_stat_thresholds() 317 void set_pgdat_percpu_threshold(pg_data_t *pgdat, in set_pgdat_percpu_threshold() argument 325 for (i = 0; i < pgdat->nr_zones; i++) { in set_pgdat_percpu_threshold() 326 zone = &pgdat->node_zones[i]; in set_pgdat_percpu_threshold() 373 void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, in __mod_node_page_state() argument [all …]
|
H A D | compaction.c | 435 void reset_isolation_suitable(pg_data_t *pgdat) in reset_isolation_suitable() argument 440 struct zone *zone = &pgdat->node_zones[zoneid]; in reset_isolation_suitable() 796 pg_data_t *pgdat = cc->zone->zone_pgdat; in too_many_isolated() local 801 inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + in too_many_isolated() 802 node_page_state(pgdat, NR_INACTIVE_ANON); in too_many_isolated() 803 active = node_page_state(pgdat, NR_ACTIVE_FILE) + in too_many_isolated() 804 node_page_state(pgdat, NR_ACTIVE_ANON); in too_many_isolated() 805 isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + in too_many_isolated() 806 node_page_state(pgdat, NR_ISOLATED_ANON); in too_many_isolated() 821 wake_throttle_isolated(pgdat); in too_many_isolated() [all …]
|
H A D | mmzone.c | 18 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) in next_online_pgdat() argument 20 int nid = next_online_node(pgdat->node_id); in next_online_pgdat() 32 pg_data_t *pgdat = zone->zone_pgdat; in next_zone() local 34 if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) in next_zone() 37 pgdat = next_online_pgdat(pgdat); in next_zone() 38 if (pgdat) in next_zone() 39 zone = pgdat->node_zones; in next_zone()
|
H A D | numa.c | 39 pg_data_t *pgdat; in alloc_offline_node_data() local 40 node_data[nid] = memblock_alloc_or_panic(sizeof(*pgdat), SMP_CACHE_BYTES); in alloc_offline_node_data()
|
H A D | shuffle.h | 11 extern void __shuffle_free_memory(pg_data_t *pgdat); 13 static inline void __meminit shuffle_free_memory(pg_data_t *pgdat) in shuffle_free_memory() argument 17 __shuffle_free_memory(pgdat); in shuffle_free_memory() 40 static inline void shuffle_free_memory(pg_data_t *pgdat) in shuffle_free_memory() argument
|
H A D | memory-tiers.c | 262 pg_data_t *pgdat; in __node_get_memory_tier() local 264 pgdat = NODE_DATA(node); in __node_get_memory_tier() 265 if (!pgdat) in __node_get_memory_tier() 272 return rcu_dereference_check(pgdat->memtier, in __node_get_memory_tier() 280 pg_data_t *pgdat; in node_is_toptier() local 283 pgdat = NODE_DATA(node); in node_is_toptier() 284 if (!pgdat) in node_is_toptier() 288 memtier = rcu_dereference(pgdat->memtier); in node_is_toptier() 302 void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) in node_get_allowed_targets() argument 312 memtier = rcu_dereference(pgdat->memtier); in node_get_allowed_targets() [all …]
|
H A D | memory_hotplug.c | 518 static void update_pgdat_span(struct pglist_data *pgdat) in update_pgdat_span() argument 523 for (zone = pgdat->node_zones; in update_pgdat_span() 524 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) { in update_pgdat_span() 542 pgdat->node_start_pfn = node_start_pfn; in update_pgdat_span() 543 pgdat->node_spanned_pages = node_end_pfn - node_start_pfn; in update_pgdat_span() 551 struct pglist_data *pgdat = zone->zone_pgdat; in remove_pfn_range_from_zone() local 576 update_pgdat_span(pgdat); in remove_pfn_range_from_zone() 737 static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, in resize_pgdat_range() argument 740 unsigned long old_end_pfn = pgdat_end_pfn(pgdat); in resize_pgdat_range() 742 if (!pgdat->node_spanned_pages || start_pfn < pgdat->node_start_pfn) in resize_pgdat_range() [all …]
|
H A D | shuffle.c | 153 void __meminit __shuffle_free_memory(pg_data_t *pgdat) in __shuffle_free_memory() argument 157 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) in __shuffle_free_memory()
|
H A D | page_owner.c | 414 pg_data_t *pgdat, struct zone *zone) in __folio_copy_owner() 490 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name); in pagetypeinfo_showmixedcount_print() 760 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) in lseek_page_owner() 827 pgdat->node_id, zone->name, count); in init_pages_in_zone() 830 static void init_zones_in_node(pg_data_t *pgdat) in init_pages_in_zone() 833 struct zone *node_zones = pgdat->node_zones; in init_pages_in_zone() 839 init_pages_in_zone(pgdat, zone); in init_zones_in_node() 845 pg_data_t *pgdat; in init_zones_in_node() 847 for_each_online_pgdat(pgdat) 848 init_zones_in_node(pgdat); in init_early_allocated_pages() 419 pagetypeinfo_showmixedcount_print(struct seq_file * m,pg_data_t * pgdat,struct zone * zone) pagetypeinfo_showmixedcount_print() argument 765 init_pages_in_zone(pg_data_t * pgdat,struct zone * zone) init_pages_in_zone() argument 835 init_zones_in_node(pg_data_t * pgdat) init_zones_in_node() argument 850 pg_data_t *pgdat; init_early_allocated_pages() local [all...] |
H A D | page_alloc.c | 5010 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) in build_zonerefs_node() argument 5018 zone = pgdat->node_zones + zone_type; in build_zonerefs_node() 5128 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, in build_zonelists_in_node_order() argument 5134 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; in build_zonelists_in_node_order() 5151 static void build_thisnode_zonelists(pg_data_t *pgdat) in build_thisnode_zonelists() argument 5156 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; in build_thisnode_zonelists() 5157 nr_zones = build_zonerefs_node(pgdat, zonerefs); in build_thisnode_zonelists() 5163 static void build_zonelists(pg_data_t *pgdat) in build_zonelists() argument 5171 local_node = pgdat->node_id; in build_zonelists() 5189 build_zonelists_in_node_order(pgdat, node_order, nr_nodes); in build_zonelists() [all …]
|
H A D | internal.h | 349 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, 353 pg_data_t *pgdat = folio_pgdat(folio); in acct_reclaim_writeback() local 354 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled); in acct_reclaim_writeback() 357 __acct_reclaim_writeback(pgdat, folio, nr_throttled); in acct_reclaim_writeback() 360 static inline void wake_throttle_isolated(pg_data_t *pgdat) in wake_throttle_isolated() argument 364 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED]; in wake_throttle_isolated() 478 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason); 1103 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, in node_reclaim() argument
|
H A D | page_ext.c | 163 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) in pgdat_page_ext_init() argument 165 pgdat->node_page_ext = NULL; in pgdat_page_ext_init() 504 void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) in pgdat_page_ext_init() argument
|
H A D | page-writeback.c | 274 static unsigned long node_dirtyable_memory(struct pglist_data *pgdat) in node_dirtyable_memory() argument 280 struct zone *zone = pgdat->node_zones + z; in node_dirtyable_memory() 293 nr_pages -= min(nr_pages, pgdat->totalreserve_pages); in node_dirtyable_memory() 295 nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE); in node_dirtyable_memory() 296 nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE); in node_dirtyable_memory() 468 static unsigned long node_dirty_limit(struct pglist_data *pgdat) in node_dirty_limit() argument 470 unsigned long node_memory = node_dirtyable_memory(pgdat); in node_dirty_limit() 497 bool node_dirty_ok(struct pglist_data *pgdat) in node_dirty_ok() argument 499 unsigned long limit = node_dirty_limit(pgdat); in node_dirty_ok() 502 nr_pages += node_page_state(pgdat, NR_FILE_DIRTY); in node_dirty_ok() [all …]
|
H A D | migrate.c | 2601 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, in migrate_balanced_pgdat() argument 2606 for (z = pgdat->nr_zones - 1; z >= 0; z--) { in migrate_balanced_pgdat() 2607 struct zone *zone = pgdat->node_zones + z; in migrate_balanced_pgdat() 2648 pg_data_t *pgdat = NODE_DATA(node); in migrate_misplaced_folio_prepare() local 2673 if (!migrate_balanced_pgdat(pgdat, nr_pages)) { in migrate_misplaced_folio_prepare() 2678 for (z = pgdat->nr_zones - 1; z >= 0; z--) { in migrate_misplaced_folio_prepare() 2679 if (managed_zone(pgdat->node_zones + z)) in migrate_misplaced_folio_prepare() 2690 wakeup_kswapd(pgdat->node_zones + z, 0, in migrate_misplaced_folio_prepare() 2712 pg_data_t *pgdat = NODE_DATA(node); in migrate_misplaced_folio() local 2717 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); in migrate_misplaced_folio()
|
/linux/include/linux/ |
H A D | memory_hotplug.h | 180 static inline void pgdat_kswapd_lock(pg_data_t *pgdat) 182 mutex_lock(&pgdat->kswapd_lock); in pgdat_kswapd_lock() argument 185 static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) in pgdat_kswapd_lock() 187 mutex_unlock(&pgdat->kswapd_lock); in pgdat_kswapd_unlock() argument 190 static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) in pgdat_kswapd_unlock() 192 mutex_init(&pgdat->kswapd_lock); in pgdat_kswapd_lock_init() argument 237 static inline void pgdat_kswapd_lock(pg_data_t *pgdat) {} in mhp_supports_memmap_on_memory() 238 static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) {} 239 static inline void pgdat_kswapd_lock_init(pg_data_t *pgdat) {} in pgdat_kswapd_lock() argument 251 * pgdat resizin 240 pgdat_kswapd_unlock(pg_data_t * pgdat) pgdat_kswapd_unlock() argument 241 pgdat_kswapd_lock_init(pg_data_t * pgdat) pgdat_kswapd_lock_init() argument 256 pgdat_resize_lock(struct pglist_data * pgdat,unsigned long * flags) pgdat_resize_lock() argument 261 pgdat_resize_unlock(struct pglist_data * pgdat,unsigned long * flags) pgdat_resize_unlock() argument 266 pgdat_resize_init(struct pglist_data * pgdat) pgdat_resize_init() argument 276 pgdat_resize_init(struct pglist_data * pgdat) pgdat_resize_init() argument [all...] |
H A D | vmstat.h | 176 static inline void node_page_state_add(long x, struct pglist_data *pgdat, in node_page_state_add() argument 179 atomic_long_add(x, &pgdat->vm_stat[item]); in node_page_state_add() 265 extern unsigned long node_page_state(struct pglist_data *pgdat, 267 extern unsigned long node_page_state_pages(struct pglist_data *pgdat, 315 void set_pgdat_percpu_threshold(pg_data_t *pgdat, 329 static inline void __mod_node_page_state(struct pglist_data *pgdat, in __mod_node_page_state() argument 343 node_page_state_add(delta, pgdat, item); in __mod_node_page_state() 352 static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) in __inc_node_state() argument 354 atomic_long_inc(&pgdat->vm_stat[item]); in __inc_node_state() 364 static inline void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) in __dec_node_state() argument [all …]
|
H A D | compaction.h | 96 extern void reset_isolation_suitable(pg_data_t *pgdat); 108 extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx); 111 static inline void reset_isolation_suitable(pg_data_t *pgdat) in reset_isolation_suitable() argument 128 static inline void wakeup_kcompactd(pg_data_t *pgdat, in wakeup_kcompactd() argument
|
H A D | memory-tiers.h | 57 void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets); 65 static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) in node_get_allowed_targets() argument 109 static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) in node_get_allowed_targets() argument
|
H A D | node.h | 130 struct pglist_data *pgdat = NODE_DATA(nid); in register_one_node() local 131 unsigned long start_pfn = pgdat->node_start_pfn; in register_one_node() 132 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; in register_one_node()
|
/linux/tools/testing/vma/linux/ |
H A D | mmzone.h | 9 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); 11 #define for_each_online_pgdat(pgdat) \ argument 12 for (pgdat = first_online_pgdat(); \ 13 pgdat; \ 14 pgdat = next_online_pgdat(pgdat))
|
/linux/tools/testing/memblock/linux/ |
H A D | mmzone.h | 9 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); 11 #define for_each_online_pgdat(pgdat) \ argument 12 for (pgdat = first_online_pgdat(); \ 13 pgdat; \ 14 pgdat = next_online_pgdat(pgdat))
|
/linux/arch/sh/mm/ |
H A D | init.c | 334 pg_data_t *pgdat; in paging_init() 337 for_each_online_pgdat(pgdat) 339 __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); in mem_init() 340 pg_data_t *pgdat; mem_init() local
|
/linux/mm/damon/ |
H A D | paddr.c | 341 struct list_head *migrate_folios, struct pglist_data *pgdat, in __damon_pa_migrate_folio_list() 358 if (pgdat->node_id == target_nid || target_nid == NUMA_NO_NODE) in __damon_pa_migrate_folio_list() 373 struct pglist_data *pgdat, in damon_pa_migrate_folio_list() 403 &migrate_folios, pgdat, target_nid); in damon_pa_migrate_folio_list() 332 __damon_pa_migrate_folio_list(struct list_head * migrate_folios,struct pglist_data * pgdat,int target_nid) __damon_pa_migrate_folio_list() argument 364 damon_pa_migrate_folio_list(struct list_head * folio_list,struct pglist_data * pgdat,int target_nid) damon_pa_migrate_folio_list() argument
|
/linux/tools/testing/memblock/ |
H A D | mmzone.c | 9 struct pglist_data *next_online_pgdat(struct pglist_data *pgdat) in next_online_pgdat() argument
|