| /linux/include/linux/ |
| H A D | cpuset.h | 224 #define cpuset_current_mems_allowed (node_states[N_MEMORY]) 304 nodes_copy(*mask, node_states[N_MEMORY]); in cpuset_nodes_allowed()
|
| /linux/mm/ |
| H A D | memory-tiers.c | 458 nodes_andnot(tier_nodes, node_states[N_MEMORY], tier_nodes); in establish_demotion_targets() 489 if (nodes_and(tier_nodes, node_states[N_CPU], tier_nodes)) { in establish_demotion_targets() 505 lower_tier = node_states[N_MEMORY]; in establish_demotion_targets() 931 nodes_and(default_dram_nodes, node_states[N_MEMORY], in memory_tier_init() 932 node_states[N_CPU]); in memory_tier_init()
|
| H A D | hugetlb_sysfs.c | 200 n_mask = &node_states[N_MEMORY]; in demote_store()
|
| H A D | mm_init.c | 374 nodemask_t saved_node_state = node_states[N_MEMORY]; in find_zone_movable_pfns_for_nodes() 376 int usable_nodes = nodes_weight(node_states[N_MEMORY]); in find_zone_movable_pfns_for_nodes() 589 node_states[N_MEMORY] = saved_node_state; in __init_single_page()
|
| H A D | mempolicy.c | 419 cpuset_current_mems_allowed, node_states[N_MEMORY]); in mpol_set_nodemask() 2090 * policy->nodes is intersect with node_states[N_MEMORY]. in apply_policy_zone() 2094 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) in apply_policy_zone() 3436 if (!nodes_subset(nodes, node_states[N_MEMORY])) in mpol_parse_str() 3468 nodes = node_states[N_MEMORY]; in mpol_parse_str()
|
| H A D | oom_kill.c | 284 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { in constrained_alloc()
|
| H A D | hugetlb.c | 2397 folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1); 3403 &node_states[N_MEMORY], NULL); in gather_bootmem_prealloc_parallel() 3409 &node_states[N_MEMORY], NULL); in gather_bootmem_prealloc() 3477 folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY], in hugetlb_hstate_alloc_pages_specific_nodes() 4079 n_mask = &node_states[N_MEMORY]; in demote_pool_huge_page()
|
| H A D | memory_hotplug.c | 1841 nodemask_t nmask = node_states[N_MEMORY]; in do_migrate_range()
|
| H A D | page_alloc.c | 188 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { 200 EXPORT_SYMBOL(node_states); 224 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { global() variable
|
| /linux/drivers/base/ |
| H A D | node.c | 944 enum node_states state; in show_node_state() 953 nodemask_pr_args(&node_states[na->state]));
|
| /linux/kernel/cgroup/ |
| H A D | cpuset.c | 500 while (!nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems() 3869 new_mems = node_states[N_MEMORY]; in cpuset_handle_hotplug() 3996 top_cpuset.effective_mems = node_states[N_MEMORY]; in cpuset_init_smp() 4266 nodes_copy(*mask, node_states[N_MEMORY]); in cpuset_nodes_allowed() 4272 nodes_copy(*mask, node_states[N_MEMORY]); in cpuset_nodes_allowed()
|
| /linux/kernel/sched/ |
| H A D | ext_idle.c | 162 nodes_copy(*unvisited, node_states[N_ONLINE]); in pick_idle_cpu_from_online_nodes()
|
| H A D | fair.c | 3000 nodes = node_states[N_CPU]; in task_numa_placement()
|
| /linux/init/ |
| H A D | main.c | 1682 set_mems_allowed(node_states[N_MEMORY]); in kernel_init_freeable()
|
| /linux/kernel/ |
| H A D | padata.c | 491 nid = next_node_in(old_node, node_states[N_CPU]); in padata_do_multithreaded()
|
| H A D | kthread.c | 795 set_mems_allowed(node_states[N_MEMORY]); in kthreadd()
|
| /linux/kernel/liveupdate/ |
| H A D | kexec_handover.c | 659 kho_scratch_cnt = nodes_weight(node_states[N_MEMORY]) + 2; in kho_reserve_scratch()
|
| /linux/Documentation/admin-guide/kdump/ |
| H A D | vmcoreinfo.rst | 51 An array node_states[N_ONLINE] which represents the set of online nodes
|
| /linux/fs/proc/ |
| H A D | task_mmu.c | 3186 if (!node_isset(nid, node_states[N_MEMORY])) in can_gather_numa_stats() 3211 if (!node_isset(nid, node_states[N_MEMORY])) in can_gather_numa_stats_pmd()
|
| /linux/Documentation/admin-guide/cgroup-v1/ |
| H A D | cpusets.rst | 225 automatically tracks the value of node_states[N_MEMORY]--i.e.,
|