Searched refs:node_states (Results 1 – 20 of 20) sorted by relevance
| /linux/include/linux/ |
| H A D | cpuset.h | 224 #define cpuset_current_mems_allowed (node_states[N_MEMORY]) 304 nodes_copy(*mask, node_states[N_MEMORY]); in cpuset_nodes_allowed()
|
| /linux/mm/ |
| H A D | memory-tiers.c | 458 nodes_andnot(tier_nodes, node_states[N_MEMORY], tier_nodes); in establish_demotion_targets() 489 if (nodes_and(tier_nodes, node_states[N_CPU], tier_nodes)) { in establish_demotion_targets() 505 lower_tier = node_states[N_MEMORY]; in establish_demotion_targets() 931 nodes_and(default_dram_nodes, node_states[N_MEMORY], in memory_tier_init() 932 node_states[N_CPU]); in memory_tier_init()
|
| H A D | hugetlb_sysfs.c | 200 n_mask = &node_states[N_MEMORY]; in demote_store()
|
| H A D | mm_init.c | 363 nodemask_t saved_node_state = node_states[N_MEMORY]; in find_zone_movable_pfns_for_nodes() 365 int usable_nodes = nodes_weight(node_states[N_MEMORY]); in find_zone_movable_pfns_for_nodes() 578 node_states[N_MEMORY] = saved_node_state; in find_zone_movable_pfns_for_nodes()
|
| H A D | mempolicy.c | 419 cpuset_current_mems_allowed, node_states[N_MEMORY]); in mpol_set_nodemask() 2088 if (!nodes_intersects(policy->nodes, node_states[N_HIGH_MEMORY])) in apply_policy_zone() 3430 if (!nodes_subset(nodes, node_states[N_MEMORY])) in mpol_parse_str() 3462 nodes = node_states[N_MEMORY]; in mpol_parse_str()
|
| H A D | oom_kill.c | 287 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { in constrained_alloc()
|
| H A D | hugetlb.c | 2433 folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1); in return_unused_surplus_pages() 3437 &node_states[N_MEMORY], NULL); in hugetlb_hstate_alloc_pages_onenode() 3443 &node_states[N_MEMORY], NULL); in hugetlb_hstate_alloc_pages_onenode() 3511 folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY], in hugetlb_pages_alloc_boot_node() 4113 n_mask = &node_states[N_MEMORY]; in __nr_hugepages_store_common()
|
| H A D | memory_hotplug.c | 1837 nodemask_t nmask = node_states[N_MEMORY]; in do_migrate_range()
|
| H A D | page_alloc.c | 224 nodemask_t node_states[NR_NODE_STATES] __read_mostly = { variable 236 EXPORT_SYMBOL(node_states);
|
| /linux/drivers/base/ |
| H A D | node.c | 939 enum node_states state; 948 nodemask_pr_args(&node_states[na->state])); in show_node_state()
|
| /linux/kernel/cgroup/ |
| H A D | cpuset.c | 461 while (!nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY])) in guarantee_online_mems() 3795 new_mems = node_states[N_MEMORY]; in cpuset_handle_hotplug() 3906 top_cpuset.effective_mems = node_states[N_MEMORY]; in cpuset_init_smp() 4176 nodes_copy(*mask, node_states[N_MEMORY]); in cpuset_nodes_allowed() 4182 nodes_copy(*mask, node_states[N_MEMORY]); in cpuset_nodes_allowed()
|
| /linux/kernel/sched/ |
| H A D | ext_idle.c | 162 nodes_copy(*unvisited, node_states[N_ONLINE]); in pick_idle_cpu_from_online_nodes()
|
| H A D | fair.c | 2851 nodes = node_states[N_CPU]; in preferred_group_nid()
|
| /linux/init/ |
| H A D | main.c | 1671 set_mems_allowed(node_states[N_MEMORY]); in kernel_init_freeable()
|
| /linux/kernel/ |
| H A D | padata.c | 491 nid = next_node_in(old_node, node_states[N_CPU]); in padata_do_multithreaded()
|
| H A D | kthread.c | 826 set_mems_allowed(node_states[N_MEMORY]); in kthreadd()
|
| /linux/kernel/liveupdate/ |
| H A D | kexec_handover.c | 658 kho_scratch_cnt = nodes_weight(node_states[N_MEMORY]) + 2; in kho_reserve_scratch()
|
| /linux/Documentation/admin-guide/kdump/ |
| H A D | vmcoreinfo.rst | 51 An array node_states[N_ONLINE] which represents the set of online nodes
|
| /linux/fs/proc/ |
| H A D | task_mmu.c | 3186 if (!node_isset(nid, node_states[N_MEMORY])) in can_gather_numa_stats() 3211 if (!node_isset(nid, node_states[N_MEMORY])) in can_gather_numa_stats_pmd()
|
| /linux/Documentation/admin-guide/cgroup-v1/ |
| H A D | cpusets.rst | 225 automatically tracks the value of node_states[N_MEMORY]--i.e.,
|