1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * NUMA memory policies for Linux. 4 * Copyright 2003,2004 Andi Kleen SuSE Labs 5 */ 6 #ifndef _LINUX_MEMPOLICY_H 7 #define _LINUX_MEMPOLICY_H 1 8 9 #include <linux/sched.h> 10 #include <linux/mmzone.h> 11 #include <linux/slab.h> 12 #include <linux/rbtree.h> 13 #include <linux/spinlock.h> 14 #include <linux/nodemask.h> 15 #include <linux/pagemap.h> 16 #include <uapi/linux/mempolicy.h> 17 18 struct mm_struct; 19 20 #ifdef CONFIG_NUMA 21 22 /* 23 * Describe a memory policy. 24 * 25 * A mempolicy can be either associated with a process or with a VMA. 26 * For VMA related allocations the VMA policy is preferred, otherwise 27 * the process policy is used. Interrupts ignore the memory policy 28 * of the current process. 29 * 30 * Locking policy for interleave: 31 * In process context there is no locking because only the process accesses 32 * its own state. All vma manipulation is somewhat protected by a down_read on 33 * mmap_lock. 34 * 35 * Freeing policy: 36 * Mempolicy objects are reference counted. A mempolicy will be freed when 37 * mpol_put() decrements the reference count to zero. 38 * 39 * Duplicating policy objects: 40 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy 41 * to the new storage. The reference count of the new object is initialized 42 * to 1, representing the caller of mpol_dup(). 43 */ 44 struct mempolicy { 45 atomic_t refcnt; 46 unsigned short mode; /* See MPOL_* above */ 47 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ 48 nodemask_t nodes; /* interleave/bind/perfer */ 49 int home_node; /* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */ 50 51 union { 52 nodemask_t cpuset_mems_allowed; /* relative to these nodes */ 53 nodemask_t user_nodemask; /* nodemask passed by user */ 54 } w; 55 }; 56 57 /* 58 * Support for managing mempolicy data objects (clone, copy, destroy) 59 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. 60 */ 61 62 extern void __mpol_put(struct mempolicy *pol); 63 static inline void mpol_put(struct mempolicy *pol) 64 { 65 if (pol) 66 __mpol_put(pol); 67 } 68 69 /* 70 * Does mempolicy pol need explicit unref after use? 71 * Currently only needed for shared policies. 72 */ 73 static inline int mpol_needs_cond_ref(struct mempolicy *pol) 74 { 75 return (pol && (pol->flags & MPOL_F_SHARED)); 76 } 77 78 static inline void mpol_cond_put(struct mempolicy *pol) 79 { 80 if (mpol_needs_cond_ref(pol)) 81 __mpol_put(pol); 82 } 83 84 extern struct mempolicy *__mpol_dup(struct mempolicy *pol); 85 static inline struct mempolicy *mpol_dup(struct mempolicy *pol) 86 { 87 if (pol) 88 pol = __mpol_dup(pol); 89 return pol; 90 } 91 92 #define vma_policy(vma) ((vma)->vm_policy) 93 94 static inline void mpol_get(struct mempolicy *pol) 95 { 96 if (pol) 97 atomic_inc(&pol->refcnt); 98 } 99 100 extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); 101 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) 102 { 103 if (a == b) 104 return true; 105 return __mpol_equal(a, b); 106 } 107 108 /* 109 * Tree of shared policies for a shared memory region. 110 * Maintain the policies in a pseudo mm that contains vmas. The vmas 111 * carry the policy. As a special twist the pseudo mm is indexed in pages, not 112 * bytes, so that we can work with shared memory segments bigger than 113 * unsigned long. 114 */ 115 116 struct sp_node { 117 struct rb_node nd; 118 unsigned long start, end; 119 struct mempolicy *policy; 120 }; 121 122 struct shared_policy { 123 struct rb_root root; 124 rwlock_t lock; 125 }; 126 127 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); 128 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); 129 int mpol_set_shared_policy(struct shared_policy *info, 130 struct vm_area_struct *vma, 131 struct mempolicy *new); 132 void mpol_free_shared_policy(struct shared_policy *p); 133 struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, 134 unsigned long idx); 135 136 struct mempolicy *get_task_policy(struct task_struct *p); 137 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, 138 unsigned long addr); 139 bool vma_policy_mof(struct vm_area_struct *vma); 140 141 extern void numa_default_policy(void); 142 extern void numa_policy_init(void); 143 extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new); 144 extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); 145 146 extern int huge_node(struct vm_area_struct *vma, 147 unsigned long addr, gfp_t gfp_flags, 148 struct mempolicy **mpol, nodemask_t **nodemask); 149 extern bool init_nodemask_of_mempolicy(nodemask_t *mask); 150 extern bool mempolicy_in_oom_domain(struct task_struct *tsk, 151 const nodemask_t *mask); 152 extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy); 153 154 extern unsigned int mempolicy_slab_node(void); 155 156 extern enum zone_type policy_zone; 157 158 static inline void check_highest_zone(enum zone_type k) 159 { 160 if (k > policy_zone && k != ZONE_MOVABLE) 161 policy_zone = k; 162 } 163 164 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 165 const nodemask_t *to, int flags); 166 167 168 #ifdef CONFIG_TMPFS 169 extern int mpol_parse_str(char *str, struct mempolicy **mpol); 170 #endif 171 172 extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); 173 174 /* Check if a vma is migratable */ 175 extern bool vma_migratable(struct vm_area_struct *vma); 176 177 extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); 178 extern void mpol_put_task_policy(struct task_struct *); 179 180 static inline bool mpol_is_preferred_many(struct mempolicy *pol) 181 { 182 return (pol->mode == MPOL_PREFERRED_MANY); 183 } 184 185 extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone); 186 187 #else 188 189 struct mempolicy {}; 190 191 static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) 192 { 193 return true; 194 } 195 196 static inline void mpol_put(struct mempolicy *p) 197 { 198 } 199 200 static inline void mpol_cond_put(struct mempolicy *pol) 201 { 202 } 203 204 static inline void mpol_get(struct mempolicy *pol) 205 { 206 } 207 208 struct shared_policy {}; 209 210 static inline void mpol_shared_policy_init(struct shared_policy *sp, 211 struct mempolicy *mpol) 212 { 213 } 214 215 static inline void mpol_free_shared_policy(struct shared_policy *p) 216 { 217 } 218 219 static inline struct mempolicy * 220 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) 221 { 222 return NULL; 223 } 224 225 #define vma_policy(vma) NULL 226 227 static inline int 228 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) 229 { 230 return 0; 231 } 232 233 static inline void numa_policy_init(void) 234 { 235 } 236 237 static inline void numa_default_policy(void) 238 { 239 } 240 241 static inline void mpol_rebind_task(struct task_struct *tsk, 242 const nodemask_t *new) 243 { 244 } 245 246 static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) 247 { 248 } 249 250 static inline int huge_node(struct vm_area_struct *vma, 251 unsigned long addr, gfp_t gfp_flags, 252 struct mempolicy **mpol, nodemask_t **nodemask) 253 { 254 *mpol = NULL; 255 *nodemask = NULL; 256 return 0; 257 } 258 259 static inline bool init_nodemask_of_mempolicy(nodemask_t *m) 260 { 261 return false; 262 } 263 264 static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, 265 const nodemask_t *to, int flags) 266 { 267 return 0; 268 } 269 270 static inline void check_highest_zone(int k) 271 { 272 } 273 274 #ifdef CONFIG_TMPFS 275 static inline int mpol_parse_str(char *str, struct mempolicy **mpol) 276 { 277 return 1; /* error */ 278 } 279 #endif 280 281 static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, 282 unsigned long address) 283 { 284 return -1; /* no node preference */ 285 } 286 287 static inline void mpol_put_task_policy(struct task_struct *task) 288 { 289 } 290 291 static inline bool mpol_is_preferred_many(struct mempolicy *pol) 292 { 293 return false; 294 } 295 296 #endif /* CONFIG_NUMA */ 297 #endif 298