Home
last modified time | relevance | path

Searched refs:nodemask (Results 1 – 22 of 22) sorted by relevance

/linux/mm/
H A Dshow_mem.c119 static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) in show_mem_node_skip() argument
129 if (!nodemask) in show_mem_node_skip()
130 nodemask = &cpuset_current_mems_allowed; in show_mem_node_skip()
132 return !node_isset(nid, *nodemask); in show_mem_node_skip()
180 static void show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) in show_free_areas() argument
190 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
227 if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) in show_free_areas()
290 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
355 if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) in show_free_areas()
385 if (show_mem_node_skip(filter, nid, nodemask)) in show_free_areas()
397 __show_mem(unsigned int filter,nodemask_t * nodemask,int max_zone_idx) __show_mem() argument
[all...]
H A Dmempolicy.c1373 nodemask_t *nodemask; in alloc_migration_target_by_mpol() local
1378 nodemask = policy_nodemask(gfp, pol, ilx, &nid); in alloc_migration_target_by_mpol()
1379 return alloc_hugetlb_folio_nodemask(h, nid, nodemask, gfp, in alloc_migration_target_by_mpol()
2132 nodemask_t nodemask; in weighted_interleave_nid() local
2139 nr_nodes = read_once_policy_nodemask(pol, &nodemask); in weighted_interleave_nid()
2151 for_each_node_mask(nid, nodemask) in weighted_interleave_nid()
2156 nid = first_node(nodemask); in weighted_interleave_nid()
2163 nid = next_node_in(nid, nodemask); in weighted_interleave_nid()
2176 nodemask_t nodemask; in interleave_nid() local
2181 nnodes = read_once_policy_nodemask(pol, &nodemask); in interleave_nid()
[all …]
H A Dpage_alloc.c37 #include <linux/nodemask.h>
3419 ac->nodemask) { in zone_watermark_fast()
3736 ac->nodemask) { in warn_alloc()
3892 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) in __alloc_pages_direct_compact()
3908 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); in __alloc_pages_direct_compact()
3911 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) in __alloc_pages_direct_compact()
3925 pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl", in should_compact_retry()
3927 nodemask_pr_args(nodemask)); in should_compact_retry()
3933 warn_alloc_show_mem(gfp_mask, nodemask); in should_compact_retry()
3961 .nodemask in should_compact_retry()
3703 warn_alloc_show_mem(gfp_t gfp_mask,nodemask_t * nodemask) warn_alloc_show_mem() argument
3722 warn_alloc(gfp_t gfp_mask,nodemask_t * nodemask,const char * fmt,...) warn_alloc() argument
4706 prepare_alloc_pages(gfp_t gfp_mask,unsigned int order,int preferred_nid,nodemask_t * nodemask,struct alloc_context * ac,gfp_t * alloc_gfp,unsigned int * alloc_flags) prepare_alloc_pages() argument
4770 alloc_pages_bulk_noprof(gfp_t gfp,int preferred_nid,nodemask_t * nodemask,int nr_pages,struct page ** page_array) alloc_pages_bulk_noprof() argument
4924 __alloc_frozen_pages_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask) __alloc_frozen_pages_noprof() argument
4989 __alloc_pages_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask) __alloc_pages_noprof() argument
5001 __folio_alloc_noprof(gfp_t gfp,unsigned int order,int preferred_nid,nodemask_t * nodemask) __folio_alloc_noprof() argument
6870 alloc_contig_pages_noprof(unsigned long nr_pages,gfp_t gfp_mask,int nid,nodemask_t * nodemask) alloc_contig_pages_noprof() argument
[all...]
H A Dhugetlb_cma.c30 int nid, nodemask_t *nodemask) in hugetlb_cma_alloc_folio() argument
40 for_each_node_mask(node, *nodemask) { in hugetlb_cma_alloc_folio()
H A Dnuma_memblks.c21 static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask, in numa_nodemask_from_meminfo() argument
29 node_set(mi->blk[i].nid, *nodemask); in numa_nodemask_from_meminfo()
H A Dhugetlb.c1371 nodemask_t *nodemask; in dequeue_hugetlb_folio_vma() local
1382 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_hugetlb_folio_vma()
1386 nid, nodemask); in dequeue_hugetlb_folio_vma()
1389 nodemask = NULL; in dequeue_hugetlb_folio_vma()
1394 nid, nodemask); in dequeue_hugetlb_folio_vma()
1477 int nid, nodemask_t *nodemask) in alloc_gigantic_folio() argument
1486 folio = hugetlb_cma_alloc_folio(h, gfp_mask, nid, nodemask); in alloc_gigantic_folio()
1491 folio = folio_alloc_gigantic(order, gfp_mask, nid, nodemask); in alloc_gigantic_folio()
1510 int nid, nodemask_t *nodemask) in alloc_gigantic_folio() argument
1518 int nid, nodemask_t *nodemask) in alloc_gigantic_folio() argument
[all …]
H A Dvmscan.c83 nodemask_t *nodemask; member
6279 sc->reclaim_idx, sc->nodemask) { in shrink_zones()
6416 sc->nodemask) { in do_try_to_free_pages()
6530 nodemask_t *nodemask) in throttle_direct_reclaim() argument
6568 gfp_zone(gfp_mask), nodemask) { in throttle_direct_reclaim()
6610 gfp_t gfp_mask, nodemask_t *nodemask) in try_to_free_pages() argument
6618 .nodemask = nodemask, in try_to_free_pages()
6638 if (throttle_direct_reclaim(sc.gfp_mask, zonelist, nodemask)) in try_to_free_pages()
H A Dcompaction.c2443 ac->highest_zoneidx, ac->nodemask) { in compaction_suitable()
2829 ac->highest_zoneidx, ac->nodemask) { in compact_zone_order()
H A Dinternal.h590 nodemask_t *nodemask; member
H A Dmemcontrol.c1632 .nodemask = NULL, in mem_cgroup_out_of_memory()
/linux/include/linux/
H A Dcpuset.h83 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
163 static inline void set_mems_allowed(nodemask_t nodemask) in set_mems_allowed() argument
170 current->mems_allowed = nodemask; in set_mems_allowed()
222 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) in cpuset_nodemask_valid_mems_allowed() argument
283 static inline void set_mems_allowed(nodemask_t nodemask) in set_mems_allowed() argument
H A Dmempolicy.h144 struct mempolicy **mpol, nodemask_t **nodemask);
260 struct mempolicy **mpol, nodemask_t **nodemask) in huge_node() argument
263 *nodemask = NULL; in huge_node()
H A Dmm.h3242 extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
3252 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
/linux/tools/testing/selftests/futex/functional/
H A Dfutex_numa_mpol.c206 unsigned long nodemask; in main() local
209 nodemask = 1 << i; in main()
210 ret = mbind(futex_ptr, mem_size, MPOL_BIND, &nodemask, in main()
211 sizeof(nodemask) * 8, 0); in main()
/linux/Documentation/admin-guide/mm/
H A Dnuma_memory_policy.rst215 is always preferred by passing an empty nodemask with this
216 mode. If an empty nodemask is passed, the policy cannot use
248 satisfied from the nodemask specified in the policy. If there is
249 a memory pressure on all nodes in the nodemask, the allocation
265 This flag specifies that the nodemask passed by
270 change in the set of allowed nodes, the preferred nodemask (Preferred
271 Many), preferred node (Preferred) or nodemask (Bind, Interleave) is
284 3 is allowed from the user's nodemask, the "interleave" only
285 occurs over that node. If no nodes from the user's nodemask are
290 MPOL_PREFERRED policies that were created with an empty nodemask
[all …]
/linux/Documentation/translations/zh_CN/core-api/
H A Dprintk-formats.rst532 位图及其衍生物,如cpumask和nodemask
540 对于打印位图(bitmap)及其派生的cpumask和nodemask,%*pb输出以字段宽度为位数的位图,
544 nodemask_pr_args()来方便打印cpumask和nodemask
/linux/tools/workqueue/
H A Dwq_dump.py53 from drgn.helpers.linux.nodemask import for_each_node
/linux/kernel/irq/
H A Dmanage.c602 const struct cpumask *nodemask = cpumask_of_node(node); in irq_setup_affinity() local
605 if (cpumask_intersects(&mask, nodemask)) in irq_setup_affinity()
606 cpumask_and(&mask, &mask, nodemask); in irq_setup_affinity()
/linux/drivers/tty/
H A Dsysrq.c390 .nodemask = NULL, in moom_callback()
/linux/kernel/cgroup/
H A Dcpuset.c4185 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) in cpuset_nodemask_valid_mems_allowed() argument
4187 return nodes_intersects(*nodemask, current->mems_allowed); in cpuset_nodemask_valid_mems_allowed()
/linux/kernel/sched/
H A Dcore.c3530 const struct cpumask *nodemask = NULL; in select_fallback_rq() local
3540 nodemask = cpumask_of_node(nid); in select_fallback_rq()
3543 for_each_cpu(dest_cpu, nodemask) { in select_fallback_rq()
/linux/
H A DMAINTAINERS4278 F: include/linux/nodemask.h