/linux/Documentation/translations/zh_CN/filesystems/ |
H A D | tmpfs.rst | 76 mpol=default 采用进程分配策略 78 mpol=prefer:Node 倾向从给定的节点分配 79 mpol=bind:NodeList 只允许从指定的链表分配 80 mpol=interleave 倾向于依次从每个节点分配 81 mpol=interleave:NodeList 依次从每个节点分配 82 mpol=local 优先本地节点分配内存 86 分隔符的十进制数来表示。例如,mpol=bind0-3,5,7,9-15 104 例如,mpol=bind=staticNodeList相当于MPOL_BIND|MPOL_F_STATIC_NODES的分配策略 106 请注意,如果内核不支持NUMA,那么使用mpol选项挂载tmpfs将会失败;nodelist指定不 108 revocery内核),或者具有较少的节点在线,建议从自动模式中省略mpol选项挂载选项。 [all …]
|
/linux/Documentation/translations/zh_TW/filesystems/ |
H A D | tmpfs.rst | 76 mpol=default 採用進程分配策略 78 mpol=prefer:Node 傾向從給定的節點分配 79 mpol=bind:NodeList 只允許從指定的鏈表分配 80 mpol=interleave 傾向於依次從每個節點分配 81 mpol=interleave:NodeList 依次從每個節點分配 82 mpol=local 優先本地節點分配內存 86 分隔符的十進制數來表示。例如,mpol=bind0-3,5,7,9-15 104 例如,mpol=bind=staticNodeList相當於MPOL_BIND|MPOL_F_STATIC_NODES的分配策略 106 請注意,如果內核不支持NUMA,那麼使用mpol選項掛載tmpfs將會失敗;nodelist指定不 108 revocery內核),或者具有較少的節點在線,建議從自動模式中省略mpol選項掛載選項。 [all …]
|
/linux/mm/ |
H A D | swap_state.c | 433 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, in __read_swap_cache_async() argument 477 new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id()); in __read_swap_cache_async() 563 struct mempolicy *mpol; in read_swap_cache_async() local 567 mpol = get_vma_policy(vma, addr, 0, &ilx); in read_swap_cache_async() 568 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, in read_swap_cache_async() 570 mpol_cond_put(mpol); in read_swap_cache_async() 642 * @mpol: NUMA memory allocation policy to be applied 657 struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead() argument 686 gfp_mask, mpol, ilx, &page_allocated, false); in swap_cluster_readahead() 703 folio = __read_swap_cache_async(entry, gfp_mask, mpol, il in swap_cluster_readahead() 800 swap_vma_readahead(swp_entry_t targ_entry,gfp_t gfp_mask,struct mempolicy * mpol,pgoff_t targ_ilx,struct vm_fault * vmf) swap_vma_readahead() argument 875 struct mempolicy *mpol; swapin_readahead() local [all...] |
H A D | swap.h | 72 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, 75 struct mempolicy *mpol, pgoff_t ilx); 133 gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead() argument
|
H A D | shmem.c | 117 struct mempolicy *mpol; member 1593 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol() argument 1597 if (!mpol || mpol->mode == MPOL_DEFAULT) in shmem_show_mpol() 1600 mpol_to_str(buffer, sizeof(buffer), mpol); in shmem_show_mpol() 1607 struct mempolicy *mpol = NULL; in shmem_get_sbmpol() local 1608 if (sbinfo->mpol) { in shmem_get_sbmpol() 1610 mpol = sbinfo->mpol; in shmem_get_sbmpol() 1611 mpol_get(mpol); in shmem_get_sbmpol() 1614 return mpol; in shmem_get_sbmpol() 1617 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol() argument [all …]
|
H A D | mempolicy.c | 2108 struct mempolicy **mpol, nodemask_t **nodemask) in huge_node() argument 2114 *mpol = get_vma_policy(vma, addr, hstate_vma(vma)->order, &ilx); in huge_node() 2115 *nodemask = policy_nodemask(gfp_flags, *mpol, ilx, &nid); in huge_node() 2970 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol) in mpol_shared_policy_init() argument 2977 if (mpol) { in mpol_shared_policy_init() 2986 npol = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask); in mpol_shared_policy_init() 2991 ret = mpol_set_nodemask(npol, &mpol->w.user_nodemask, scratch); in mpol_shared_policy_init() 3005 mpol_put(mpol); /* drop our incoming ref on sb mpol */ in mpol_shared_policy_init() 3176 int mpol_parse_str(char *str, struct mempolicy **mpol) in mpol_parse_str() argument 3296 *mpol = new; in mpol_parse_str()
|
H A D | hugetlb.c | 1401 struct mempolicy *mpol; in dequeue_hugetlb_folio_vma() local 1419 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_hugetlb_folio_vma() 1421 if (mpol_is_preferred_many(mpol)) { in dequeue_hugetlb_folio_vma() 1438 mpol_cond_put(mpol); in dequeue_hugetlb_folio_vma() 2373 struct mempolicy *mpol; in alloc_buddy_hugetlb_folio_with_mpol() local 2378 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); in alloc_buddy_hugetlb_folio_with_mpol() 2379 if (mpol_is_preferred_many(mpol)) { in alloc_buddy_hugetlb_folio_with_mpol() 2390 mpol_cond_put(mpol); in alloc_buddy_hugetlb_folio_with_mpol() 2438 struct mempolicy *mpol = get_task_policy(current); in policy_mbind_nodemask() local 2444 if (mpol->mode == MPOL_BIND && in policy_mbind_nodemask() [all …]
|
H A D | zswap.c | 1029 struct mempolicy *mpol; in zswap_writeback_entry() local 1036 mpol = get_task_policy(current); in zswap_writeback_entry() 1037 folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol, in zswap_writeback_entry()
|
H A D | slub.c | 3975 struct mempolicy *mpol = current->mempolicy; in __slab_alloc_node() local 3977 if (mpol) { in __slab_alloc_node() 3985 if (mpol->mode != MPOL_BIND || !slab || in __slab_alloc_node() 3986 !node_isset(slab_nid(slab), mpol->nodes)) in __slab_alloc_node()
|
/linux/include/linux/ |
H A D | shmem_fs.h | 71 struct mempolicy *mpol; /* default memory policy for mappings */ member
|
/linux/ipc/ |
H A D | shm.c | 566 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) in shm_set_policy() argument 572 err = sfd->vm_ops->set_policy(vma, mpol); in shm_set_policy() 580 struct mempolicy *mpol = vma->vm_policy; in shm_get_policy() local 583 mpol = sfd->vm_ops->get_policy(vma, addr, ilx); in shm_get_policy() 584 return mpol; in shm_get_policy()
|