Home
last modified time | relevance | path

Searched refs:nodemask (Results 1 – 25 of 31) sorted by relevance

12

/linux/mm/
H A Dshow_mem.c119 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) in show_mem_node_skip() argument
129 if (!nodemask) in show_mem_node_skip()
130 nodemask = &cpuset_current_mems_allowed; in show_mem_node_skip()
132 return !node_isset(nid, *nodemask); in show_mem_node_skip()
180 static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) in show_free_areas() argument
190 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
227 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) in show_free_areas()
291 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
362 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
392 if (show_mem_node_skip(filter, nid, nodemask)) in show_free_areas()
[all …]
H A Dhugetlb_cma.h8 int nid, nodemask_t *nodemask);
22 int nid, nodemask_t *nodemask) in hugetlb_cma_alloc_folio() argument
H A Dmempolicy.c1443 nodemask_t *nodemask; in alloc_migration_target_by_mpol() local
1448 nodemask = policy_nodemask(gfp, pol, ilx, &nid); in alloc_migration_target_by_mpol()
1449 return alloc_hugetlb_folio_nodemask(h, nid, nodemask, gfp, in alloc_migration_target_by_mpol()
2202 nodemask_t nodemask; in weighted_interleave_nid() local
2209 nr_nodes = read_once_policy_nodemask(pol, &nodemask); in weighted_interleave_nid()
2221 for_each_node_mask(nid, nodemask) in weighted_interleave_nid()
2226 nid = first_node(nodemask); in weighted_interleave_nid()
2233 nid = next_node_in(nid, nodemask); in weighted_interleave_nid()
2246 nodemask_t nodemask; in interleave_nid() local
2251 nnodes = read_once_policy_nodemask(pol, &nodemask); in interleave_nid()
[all …]
H A Doom_kill.c95 const nodemask_t *mask = oc->nodemask; in oom_cpuset_eligible()
286 if (oc->nodemask && in constrained_alloc()
287 !nodes_subset(node_states[N_MEMORY], *oc->nodemask)) { in constrained_alloc()
289 for_each_node_mask(nid, *oc->nodemask) in constrained_alloc()
296 highest_zoneidx, oc->nodemask) in constrained_alloc()
452 nodemask_pr_args(oc->nodemask)); in dump_oom_victim()
471 __show_mem(SHOW_MEM_FILTER_NODES, oc->nodemask, gfp_zone(oc->gfp_mask)); in dump_header()
1158 oc->nodemask = NULL; in out_of_memory()
H A Dpage_alloc.c3491 ac->nodemask) { in unreserve_highatomic_pageblock()
3810 ac->nodemask) { in get_page_from_freelist()
3988 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) in warn_alloc_show_mem() argument
4004 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); in warn_alloc_show_mem()
4008 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) in warn_alloc() argument
4024 nodemask_pr_args(nodemask)); in warn_alloc()
4030 warn_alloc_show_mem(gfp_mask, nodemask); in warn_alloc()
4058 .nodemask = ac->nodemask, in __alloc_pages_may_oom()
4300 ac->highest_zoneidx, ac->nodemask) { in should_compact_retry()
4408 ac->nodemask); in __perform_reclaim()
[all …]
H A Dhugetlb_cma.c30 int nid, nodemask_t *nodemask) in hugetlb_cma_alloc_folio() argument
39 for_each_node_mask(node, *nodemask) { in hugetlb_cma_alloc_folio()
H A Dhugetlb.c1388 nodemask_t *nodemask; in dequeue_hugetlb_folio_vma() local
1399 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_hugetlb_folio_vma()
1403 nid, nodemask); in dequeue_hugetlb_folio_vma()
1406 nodemask = NULL; in dequeue_hugetlb_folio_vma()
1411 nid, nodemask); in dequeue_hugetlb_folio_vma()
1423 int nid, nodemask_t *nodemask) in alloc_gigantic_folio() argument
1429 folio = hugetlb_cma_alloc_folio(order, gfp_mask, nid, nodemask); in alloc_gigantic_folio()
1434 folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask); in alloc_gigantic_folio()
1453 nodemask_t *nodemask) in alloc_gigantic_folio() argument
1461 nodemask_t *nodemask) in alloc_gigantic_folio() argument
[all …]
H A Dnuma_memblks.c23 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, in numa_nodemask_from_meminfo() argument
31 node_set(mi->blk[i].nid, *nodemask); in numa_nodemask_from_meminfo()
H A Dvmscan.c83 nodemask_t *nodemask; member
6243 sc->reclaim_idx, sc->nodemask) { in shrink_zones()
6380 sc->nodemask) { in do_try_to_free_pages()
6494 nodemask_t *nodemask) in throttle_direct_reclaim() argument
6532 gfp_zone(gfp_mask), nodemask) { in throttle_direct_reclaim()
6574 gfp_t gfp_mask, nodemask_t *nodemask) in try_to_free_pages() argument
6582 .nodemask = nodemask, in try_to_free_pages()
6602 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) in try_to_free_pages()
H A Dcompaction.c2443 ac->highest_zoneidx, ac->nodemask) { in compaction_zonelist_suitable()
2829 ac->highest_zoneidx, ac->nodemask) { in try_to_compact_pages()
/linux/tools/testing/selftests/kvm/x86/
H A Dxapic_ipi_test.c254 unsigned long nodemask = 0; in do_migrations() local
255 unsigned long nodemasks[sizeof(nodemask) * 8]; in do_migrations()
270 kvm_get_mempolicy(NULL, &nodemask, sizeof(nodemask) * 8, in do_migrations()
275 sizeof(nodemask) * 8, nodemask); in do_migrations()
281 for (i = 0, bit = 1; i < sizeof(nodemask) * 8; i++, bit <<= 1) { in do_migrations()
282 if (nodemask & bit) { in do_migrations()
283 nodemasks[nodes] = nodemask & bit; in do_migrations()
/linux/include/linux/
H A Dcpuset.h84 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
164 static inline void set_mems_allowed(nodemask_t nodemask) in set_mems_allowed() argument
171 current->mems_allowed = nodemask; in set_mems_allowed()
229 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) in cpuset_nodemask_valid_mems_allowed() argument
290 static inline void set_mems_allowed(nodemask_t nodemask) in set_mems_allowed() argument
H A Dnodemask.h149 #define node_isset(node, nodemask) test_bit((node), (nodemask).bits) argument
151 #define node_test_and_set(node, nodemask) \ argument
152 __node_test_and_set((node), &(nodemask))
235 #define nodes_full(nodemask) __nodes_full(&(nodemask), MAX_NUMNODES) argument
241 #define nodes_weight(nodemask) __nodes_weight(&(nodemask), MAX_NUMNODES) argument
H A Dmempolicy.h144 struct mempolicy **mpol, nodemask_t **nodemask);
260 struct mempolicy **mpol, nodemask_t **nodemask) in huge_node() argument
263 *nodemask = NULL; in huge_node()
H A Dgfp.h226 nodemask_t *nodemask);
230 nodemask_t *nodemask);
234 nodemask_t *nodemask, int nr_pages,
438 int nid, nodemask_t *nodemask);
H A Doom.h33 nodemask_t *nodemask; member
H A Dmmzone.h1787 #define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \ argument
1788 for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
1790 z = next_zones_zonelist(++z, highidx, nodemask), \
1793 #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ argument
1796 z = next_zones_zonelist(++z, highidx, nodemask), \
/linux/tools/testing/selftests/kvm/
H A Dguest_memfd_test.c82 unsigned long nodemask = 0; in test_mbind() local
95 kvm_get_mempolicy(&policy, &nodemask, maxnode, mem, MPOL_F_ADDR); in test_mbind()
96 TEST_ASSERT(policy == MPOL_INTERLEAVE && nodemask == nodemask_0, in test_mbind()
98 MPOL_INTERLEAVE, nodemask_0, policy, nodemask); in test_mbind()
102 kvm_get_mempolicy(&policy, &nodemask, maxnode, mem + page_size * 2, MPOL_F_ADDR); in test_mbind()
103 TEST_ASSERT(policy == MPOL_BIND && nodemask == nodemask_0, in test_mbind()
105 MPOL_BIND, nodemask_0, policy, nodemask); in test_mbind()
109 kvm_get_mempolicy(&policy, &nodemask, maxnode, mem, MPOL_F_ADDR); in test_mbind()
110 TEST_ASSERT(policy == MPOL_DEFAULT && !nodemask, in test_mbind()
112 MPOL_DEFAULT, policy, nodemask); in test_mbind()
/linux/tools/testing/selftests/futex/functional/
H A Dfutex_numa_mpol.c183 unsigned long nodemask; in TEST() local
186 nodemask = 1 << i; in TEST()
187 ret = mbind(futex_ptr, mem_size, MPOL_BIND, &nodemask, in TEST()
188 sizeof(nodemask) * 8, 0); in TEST()
/linux/Documentation/admin-guide/mm/
H A Dnuma_memory_policy.rst215 is always preferred by passing an empty nodemask with this
216 mode. If an empty nodemask is passed, the policy cannot use
248 satisfied from the nodemask specified in the policy. If there is
249 a memory pressure on all nodes in the nodemask, the allocation
265 This flag specifies that the nodemask passed by
270 change in the set of allowed nodes, the preferred nodemask (Preferred
271 Many), preferred node (Preferred) or nodemask (Bind, Interleave) is
284 3 is allowed from the user's nodemask, the "interleave" only
285 occurs over that node. If no nodes from the user's nodemask are
290 MPOL_PREFERRED policies that were created with an empty nodemask
[all …]
/linux/tools/testing/selftests/kvm/include/
H A Dnumaif.h30 const unsigned long *, nodemask, unsigned long, maxnode,
/linux/Documentation/translations/zh_CN/core-api/
H A Dprintk-formats.rst532 位图及其衍生物,如cpumask和nodemask
540 对于打印位图(bitmap)及其派生的cpumask和nodemask,%*pb输出以字段宽度为位数的位图,
544 nodemask_pr_args()来方便打印cpumask和nodemask
/linux/tools/workqueue/
H A Dwq_dump.py53 from drgn.helpers.linux.nodemask import for_each_node
/linux/Documentation/core-api/
H A Dprintk-formats.rst587 bitmap and its derivatives such as cpumask and nodemask
595 For printing bitmap and its derivatives such as cpumask and nodemask,
601 printing cpumask and nodemask.
/linux/kernel/irq/
H A Dmanage.c602 const struct cpumask *nodemask = cpumask_of_node(node); in irq_setup_affinity() local
605 if (cpumask_intersects(&mask, nodemask)) in irq_setup_affinity()
606 cpumask_and(&mask, &mask, nodemask); in irq_setup_affinity()

12