| /linux/include/linux/ |
| H A D | mempolicy.h | 47 struct mempolicy { struct 65 extern void __mpol_put(struct mempolicy *pol); 66 static inline void mpol_put(struct mempolicy *pol) in mpol_put() 76 static inline int mpol_needs_cond_ref(struct mempolicy *pol) in mpol_needs_cond_ref() 81 static inline void mpol_cond_put(struct mempolicy *pol) in mpol_cond_put() 87 extern struct mempolicy *__mpol_dup(struct mempolicy *pol); 88 static inline struct mempolicy *mpol_dup(struct mempolicy *pol) in mpol_dup() 95 static inline void mpol_get(struct mempolicy *pol) in mpol_get() 101 extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); 102 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) in mpol_equal() [all …]
|
| H A D | gfp.h | 14 struct mempolicy; 321 struct mempolicy *mpol, pgoff_t ilx, int nid); 334 struct mempolicy *mpol, pgoff_t ilx, int nid) in folio_alloc_mpol_noprof()
|
| H A D | shmem_fs.h | 87 struct mempolicy *mpol; /* default memory policy for mappings */
|
| H A D | pagemap.h | 655 struct mempolicy *policy); 658 struct mempolicy *policy) in filemap_alloc_folio_noprof() 756 pgoff_t index, fgf_t fgf_flags, gfp_t gfp, struct mempolicy *policy);
|
| H A D | sched.h | 69 struct mempolicy; 1357 struct mempolicy *mempolicy; member
|
| H A D | mm.h | 40 struct mempolicy; 790 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 802 struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
|
| /linux/mm/ |
| H A D | mempolicy.c | 138 static struct mempolicy default_policy = { 143 static struct mempolicy preferred_node_policy[MAX_NUMNODES]; 341 struct mempolicy *get_task_policy(struct task_struct *p) in get_task_policy() 343 struct mempolicy *pol = p->mempolicy; in get_task_policy() 362 int (*create)(struct mempolicy *pol, const nodemask_t *nodes); 363 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes); 366 static inline int mpol_store_user_nodemask(const struct mempolicy *pol) in mpol_store_user_nodemask() 379 static int mpol_new_nodemask(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_nodemask() 387 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes) in mpol_new_preferred() 405 static int mpol_set_nodemask(struct mempolicy *pol, in mpol_set_nodemask() [all …]
|
| H A D | swap.h | 6 struct mempolicy; 265 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, 268 struct mempolicy *mpol, pgoff_t ilx); 378 gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead()
|
| H A D | swap_state.c | 406 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, in __read_swap_cache_async() 519 struct mempolicy *mpol; in read_swap_cache_async() 619 struct mempolicy *mpol, pgoff_t ilx) in swap_cluster_readahead() 727 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) in swap_vma_readahead() 814 struct mempolicy *mpol; in swapin_readahead()
|
| H A D | vma.h | 104 struct mempolicy *policy; 340 struct mempolicy *new_pol);
|
| H A D | shmem.c | 70 #include <linux/mempolicy.h> 116 struct mempolicy *mpol; 1701 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol() 1713 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) in shmem_get_sbmpol() 1715 struct mempolicy *mpol = NULL; in shmem_get_sbmpol() 1725 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol) in shmem_show_mpol() 1728 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) in shmem_get_sbmpol() 1734 static struct mempolicy *shmem_get_pgoff_policy(struct shmem_inode_info *info, 1740 struct mempolicy *mpol; in shmem_swapin_cluster() 1885 struct mempolicy *mpo in shmem_alloc_folio() [all...] |
| H A D | Makefile | 86 obj-$(CONFIG_NUMA) += mempolicy.o
|
| H A D | hugetlb.c | 1386 struct mempolicy *mpol; in dequeue_hugetlb_folio_vma() 2239 struct mempolicy *mpol; in alloc_buddy_hugetlb_folio_with_mpol() 2307 struct mempolicy *mpol = get_task_policy(current); in policy_mbind_nodemask() 6165 struct mempolicy *mpol; in alloc_hugetlb_folio_vma()
|
| /linux/Documentation/ABI/testing/ |
| H A D | sysfs-kernel-mm-mempolicy-weighted-interleave | 1 What: /sys/kernel/mm/mempolicy/weighted_interleave/ 6 What: /sys/kernel/mm/mempolicy/weighted_interleave/nodeN 12 utilized by tasks which have set their mempolicy to 29 What: /sys/kernel/mm/mempolicy/weighted_interleave/auto
|
| H A D | sysfs-kernel-mm-mempolicy | 1 What: /sys/kernel/mm/mempolicy/
|
| /linux/Documentation/driver-api/cxl/allocation/ |
| H A D | page-allocator.rst | 15 NUMA nodes and mempolicy 17 Unless a task explicitly registers a mempolicy, the default memory policy
|
| /linux/tools/testing/vma/ |
| H A D | vma_internal.h | 513 struct mempolicy {}; struct 716 struct mempolicy *vm_policy; /* NUMA policy for the VMA */ 797 int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new); 809 struct mempolicy *(*get_policy)(struct vm_area_struct *vma, 1106 static inline void mpol_put(struct mempolicy *pol) in mpol_put() 1315 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) in mpol_equal()
|
| /linux/fs/proc/ |
| H A D | internal.h | 19 struct mempolicy; 398 struct mempolicy *task_mempolicy;
|
| /linux/Documentation/translations/zh_CN/core-api/ |
| H A D | mm-api.rst | 117 mm/mempolicy.c
|
| /linux/Documentation/admin-guide/mm/ |
| H A D | numa_memory_policy.rst | 170 structure, struct mempolicy. Details of this structure will be 207 preferred_node member of struct mempolicy. When the internal 256 /sys/kernel/mm/mempolicy/weighted_interleave/ 269 Without this flag, any time a mempolicy is rebound because of a 301 mempolicy is rebound because of a change in the set of allowed 321 if not already set, sets the node in the mempolicy nodemask. 347 To resolve use/free races, struct mempolicy contains an atomic reference 350 the structure back to the mempolicy kmem cache when the reference count 427 definitions are defined in <linux/mempolicy.h>. 478 mempolicy range. Other address ranges are ignored. A home node is the NUMA node
|
| /linux/virt/kvm/ |
| H A D | guest_memfd.c | 136 struct mempolicy *policy; in kvm_gmem_get_folio() 438 static int kvm_gmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) in kvm_gmem_set_policy() 445 static struct mempolicy *kvm_gmem_get_policy(struct vm_area_struct *vma, in kvm_gmem_get_policy()
|
| /linux/Documentation/core-api/ |
| H A D | mm-api.rst | 101 .. kernel-doc:: mm/mempolicy.c
|
| /linux/kernel/ |
| H A D | fork.c | 2154 p->mempolicy = mpol_dup(p->mempolicy); in copy_process() 2155 if (IS_ERR(p->mempolicy)) { in copy_process() 2156 retval = PTR_ERR(p->mempolicy); in copy_process() 2157 p->mempolicy = NULL; in copy_process() 2525 mpol_put(p->mempolicy); in copy_process()
|
| /linux/ipc/ |
| H A D | shm.c | 571 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) in shm_set_policy() 581 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, in shm_get_policy() 585 struct mempolicy *mpol = vma->vm_policy; in shm_get_policy()
|
| /linux/Documentation/admin-guide/cgroup-v1/ |
| H A D | cpusets.rst | 342 except perhaps as modified by the task's NUMA mempolicy or cpuset 349 or slab caches to ignore the task's NUMA mempolicy and be spread 353 is turned off, then the currently specified NUMA mempolicy once again 631 mempolicy MPOL_BIND, and the nodes to which it was bound overlap with
|