Home
last modified time | relevance | path

Searched refs:mpol (Results 1 – 15 of 15) sorted by relevance

/linux/Documentation/translations/zh_CN/filesystems/
H A Dtmpfs.rst76 mpol=default 采用进程分配策略
78 mpol=prefer:Node 倾向从给定的节点分配
79 mpol=bind:NodeList 只允许从指定的链表分配
80 mpol=interleave 倾向于依次从每个节点分配
81 mpol=interleave:NodeList 依次从每个节点分配
82 mpol=local 优先本地节点分配内存
86 分隔符的十进制数来表示。例如,mpol=bind0-3,5,7,9-15
104 例如,mpol=bind=staticNodeList相当于MPOL_BIND|MPOL_F_STATIC_NODES的分配策略
106 请注意,如果内核不支持NUMA,那么使用mpol选项挂载tmpfs将会失败;nodelist指定不
108 revocery内核),或者具有较少的节点在线,建议从自动模式中省略mpol选项挂载选项。
[all …]
/linux/Documentation/translations/zh_TW/filesystems/
H A Dtmpfs.rst76 mpol=default 採用進程分配策略
78 mpol=prefer:Node 傾向從給定的節點分配
79 mpol=bind:NodeList 只允許從指定的鏈表分配
80 mpol=interleave 傾向於依次從每個節點分配
81 mpol=interleave:NodeList 依次從每個節點分配
82 mpol=local 優先本地節點分配內存
86 分隔符的十進制數來表示。例如,mpol=bind0-3,5,7,9-15
104 例如,mpol=bind=staticNodeList相當於MPOL_BIND|MPOL_F_STATIC_NODES的分配策略
106 請注意,如果內核不支持NUMA,那麼使用mpol選項掛載tmpfs將會失敗;nodelist指定不
108 revocery內核),或者具有較少的節點在線,建議從自動模式中省略mpol選項掛載選項。
[all …]
/linux/include/linux/
H A Dmempolicy.h124 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
126 struct vm_area_struct *vma, struct mempolicy *mpol);
145 struct mempolicy **mpol, nodemask_t **nodemask);
164 extern int mpol_parse_str(char *str, struct mempolicy **mpol);
215 struct mempolicy *mpol) in mpol_shared_policy_init() argument
261 struct mempolicy **mpol, nodemask_t **nodemask) in huge_node() argument
263 *mpol = NULL; in huge_node()
284 static inline int mpol_parse_str(char *str, struct mempolicy **mpol) in mpol_parse_str() argument
H A Dgfp.h325 struct mempolicy *mpol, pgoff_t ilx, int nid);
338 struct mempolicy *mpol, pgoff_t ilx, int nid) in folio_alloc_mpol_noprof() argument
H A Dshmem_fs.h87 struct mempolicy *mpol; /* default memory policy for mappings */ member
/linux/mm/
H A Dswap_state.c552 struct mempolicy *mpol, pgoff_t ilx, in swap_cache_alloc_folio() argument
570 folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id()); in swap_cache_alloc_folio()
621 struct mempolicy *mpol; in read_swap_cache_async() local
629 mpol = get_vma_policy(vma, addr, 0, &ilx); in read_swap_cache_async()
630 folio = swap_cache_alloc_folio(entry, gfp_mask, mpol, ilx, in read_swap_cache_async()
632 mpol_cond_put(mpol); in read_swap_cache_async()
721 struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead() argument
749 swp_entry(swp_type(entry), offset), gfp_mask, mpol, ilx, in swap_cluster_readahead()
767 folio = swap_cache_alloc_folio(entry, gfp_mask, mpol, ilx, in swap_cluster_readahead()
829 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) in swap_vma_readahead() argument
[all …]
H A Dshmem.c116 struct mempolicy *mpol; member
1744 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol()
1748 if (!mpol || mpol->mode == MPOL_DEFAULT) in shmem_show_mpol()
1751 mpol_to_str(buffer, sizeof(buffer), mpol); in shmem_show_mpol()
1753 seq_printf(seq, ",mpol=%s", buffer); in shmem_get_sbmpol()
1758 struct mempolicy *mpol = NULL; in shmem_get_sbmpol()
1759 if (sbinfo->mpol) { in shmem_get_sbmpol()
1761 mpol = sbinfo->mpol; in shmem_get_sbmpol()
1741 shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol) shmem_show_mpol() argument
1755 struct mempolicy *mpol = NULL; shmem_get_sbmpol() local
1765 shmem_show_mpol(struct seq_file * seq,struct mempolicy * mpol) shmem_show_mpol() argument
1780 struct mempolicy *mpol; shmem_swapin_cluster() local
1925 struct mempolicy *mpol; shmem_alloc_folio() local
2888 shmem_set_policy(struct vm_area_struct * vma,struct mempolicy * mpol) shmem_set_policy() argument
2914 struct mempolicy *mpol; shmem_get_pgoff_policy() local
4815 struct mempolicy *mpol = NULL; shmem_reconfigure() local
4910 struct mempolicy *mpol; shmem_show_options() local
[all...]
H A Dswap.h280 struct mempolicy *mpol, pgoff_t ilx,
297 struct mempolicy *mpol, pgoff_t ilx); in swap_zeromap_batch()
422 gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) in __swap_cache_del_folio()
378 swap_cluster_readahead(swp_entry_t entry,gfp_t gfp_mask,struct mempolicy * mpol,pgoff_t ilx) swap_cluster_readahead() argument
H A Dmempolicy.c2328 struct mempolicy **mpol, nodemask_t **nodemask) in huge_node() argument
2334 *mpol = get_vma_policy(vma, addr, hstate_vma(vma)->order, &ilx); in huge_node()
2335 *nodemask = policy_nodemask(gfp_flags, *mpol, ilx, &nid); in huge_node()
3210 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) in mpol_shared_policy_init() argument
3217 if (mpol) { in mpol_shared_policy_init()
3226 npol = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); in mpol_shared_policy_init()
3231 ret = mpol_set_nodemask(npol, &mpol->w.user_nodemask, scratch); in mpol_shared_policy_init()
3245 mpol_put(mpol); /* drop our incoming ref on sb mpol */ in mpol_shared_policy_init()
3419 int mpol_parse_str(char *str, struct mempolicy **mpol) in mpol_parse_str() argument
3539 *mpol = new; in mpol_parse_str()
H A Dhugetlb.c1370 struct mempolicy *mpol; in available_huge_pages()
1383 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_hugetlb_folio_vma()
1385 if (mpol_is_preferred_many(mpol)) { in dequeue_hugetlb_folio_vma()
1397 mpol_cond_put(mpol); in dequeue_hugetlb_folio_vma()
2204 struct mempolicy *mpol;
2209 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); in alloc_buddy_hugetlb_folio_with_mpol()
2210 if (mpol_is_preferred_many(mpol)) { in alloc_buddy_hugetlb_folio_with_mpol()
2221 mpol_cond_put(mpol); in alloc_buddy_hugetlb_folio_with_mpol()
2272 struct mempolicy *mpol = get_task_policy(current); in alloc_hugetlb_folio_nodemask()
2278 if (mpol in policy_mbind_nodemask()
1378 struct mempolicy *mpol; dequeue_hugetlb_folio_vma() local
2212 struct mempolicy *mpol; alloc_buddy_hugetlb_folio_with_mpol() local
2280 struct mempolicy *mpol = get_task_policy(current); policy_mbind_nodemask() local
6170 struct mempolicy *mpol; alloc_hugetlb_folio_vma() local
[all...]
H A Dzswap.c1000 struct mempolicy *mpol; in zswap_writeback_entry() local
1010 mpol = get_task_policy(current); in zswap_writeback_entry()
1011 folio = swap_cache_alloc_folio(swpentry, GFP_KERNEL, mpol, in zswap_writeback_entry()
H A Dslub.c4462 struct mempolicy *mpol = current->mempolicy; in __slab_alloc_node() local
4464 if (mpol) { in __slab_alloc_node()
4472 if (mpol->mode != MPOL_BIND || in __slab_alloc_node()
4473 !node_isset(numa_mem_id(), mpol->nodes)) in __slab_alloc_node()
4682 struct mempolicy *mpol = current->mempolicy; in alloc_from_pcs() local
4684 if (mpol) { in alloc_from_pcs()
4692 if (mpol->mode != MPOL_BIND || in alloc_from_pcs()
4693 !node_isset(numa_mem_id(), mpol->nodes)) in alloc_from_pcs()
/linux/kernel/futex/
H A Dcore.c339 struct mempolicy *mpol; in __futex_key_to_node() local
345 mpol = READ_ONCE(vma->vm_policy); in __futex_key_to_node()
346 if (!mpol) in __futex_key_to_node()
349 switch (mpol->mode) { in __futex_key_to_node()
351 node = first_node(mpol->nodes); in __futex_key_to_node()
355 if (mpol->home_node != NUMA_NO_NODE) in __futex_key_to_node()
356 node = mpol->home_node; in __futex_key_to_node()
/linux/ipc/
H A Dshm.c571 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) in shm_set_policy() argument
577 err = sfd->vm_ops->set_policy(vma, mpol); in shm_set_policy()
585 struct mempolicy *mpol = vma->vm_policy; in shm_get_policy() local
588 mpol = sfd->vm_ops->get_policy(vma, addr, ilx); in shm_get_policy()
589 return mpol; in shm_get_policy()
/linux/virt/kvm/
H A Dguest_memfd.c434 static int kvm_gmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) in kvm_gmem_set_policy()
438 return mpol_set_shared_policy(&GMEM_I(inode)->policy, vma, mpol); in kvm_gmem_set_policy()
433 kvm_gmem_set_policy(struct vm_area_struct * vma,struct mempolicy * mpol) kvm_gmem_set_policy() argument