xref: /linux/include/linux/cpuset.h (revision eeccf287a2a517954b57cf9d733b3cf5d47afa34)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CPUSET_H
3 #define _LINUX_CPUSET_H
4 /*
5  *  cpuset interface
6  *
7  *  Copyright (C) 2003 BULL SA
8  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
9  *
10  */
11 
12 #include <linux/sched.h>
13 #include <linux/sched/topology.h>
14 #include <linux/sched/task.h>
15 #include <linux/cpumask.h>
16 #include <linux/nodemask.h>
17 #include <linux/mm.h>
18 #include <linux/mmu_context.h>
19 #include <linux/jump_label.h>
20 
21 extern bool lockdep_is_cpuset_held(void);
22 
23 #ifdef CONFIG_CPUSETS
24 
25 /*
26  * Static branch rewrites can happen in an arbitrary order for a given
27  * key. In code paths where we need to loop with read_mems_allowed_begin() and
28  * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
29  * to ensure that begin() always gets rewritten before retry() in the
30  * disabled -> enabled transition. If not, then if local irqs are disabled
31  * around the loop, we can deadlock since retry() would always be
32  * comparing the latest value of the mems_allowed seqcount against 0 as
33  * begin() still would see cpusets_enabled() as false. The enabled -> disabled
34  * transition should happen in reverse order for the same reasons (want to stop
35  * looking at real value of mems_allowed.sequence in retry() first).
36  */
37 extern struct static_key_false cpusets_pre_enable_key;
38 extern struct static_key_false cpusets_enabled_key;
39 extern struct static_key_false cpusets_insane_config_key;
40 
cpusets_enabled(void)41 static inline bool cpusets_enabled(void)
42 {
43 	return static_branch_unlikely(&cpusets_enabled_key);
44 }
45 
cpuset_inc(void)46 static inline void cpuset_inc(void)
47 {
48 	static_branch_inc_cpuslocked(&cpusets_pre_enable_key);
49 	static_branch_inc_cpuslocked(&cpusets_enabled_key);
50 }
51 
cpuset_dec(void)52 static inline void cpuset_dec(void)
53 {
54 	static_branch_dec_cpuslocked(&cpusets_enabled_key);
55 	static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
56 }
57 
58 /*
59  * This will get enabled whenever a cpuset configuration is considered
60  * unsupportable in general. E.g. movable only node which cannot satisfy
61  * any non movable allocations (see update_nodemask). Page allocator
62  * needs to make additional checks for those configurations and this
63  * check is meant to guard those checks without any overhead for sane
64  * configurations.
65  */
cpusets_insane_config(void)66 static inline bool cpusets_insane_config(void)
67 {
68 	return static_branch_unlikely(&cpusets_insane_config_key);
69 }
70 
71 extern int cpuset_init(void);
72 extern void cpuset_init_smp(void);
73 extern void cpuset_force_rebuild(void);
74 extern void cpuset_update_active_cpus(void);
75 extern void inc_dl_tasks_cs(struct task_struct *task);
76 extern void dec_dl_tasks_cs(struct task_struct *task);
77 extern void cpuset_lock(void);
78 extern void cpuset_unlock(void);
79 extern void lockdep_assert_cpuset_lock_held(void);
80 extern void cpuset_cpus_allowed_locked(struct task_struct *p, struct cpumask *mask);
81 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
82 extern bool cpuset_cpus_allowed_fallback(struct task_struct *p);
83 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
84 #define cpuset_current_mems_allowed (current->mems_allowed)
85 void cpuset_init_current_mems_allowed(void);
86 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
87 
88 extern bool cpuset_current_node_allowed(int node, gfp_t gfp_mask);
89 
__cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)90 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
91 {
92 	return cpuset_current_node_allowed(zone_to_nid(z), gfp_mask);
93 }
94 
cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)95 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
96 {
97 	if (cpusets_enabled())
98 		return __cpuset_zone_allowed(z, gfp_mask);
99 	return true;
100 }
101 
102 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
103 					  const struct task_struct *tsk2);
104 
105 #ifdef CONFIG_CPUSETS_V1
106 #define cpuset_memory_pressure_bump() 				\
107 	do {							\
108 		if (cpuset_memory_pressure_enabled)		\
109 			__cpuset_memory_pressure_bump();	\
110 	} while (0)
111 extern int cpuset_memory_pressure_enabled;
112 extern void __cpuset_memory_pressure_bump(void);
113 #else
cpuset_memory_pressure_bump(void)114 static inline void cpuset_memory_pressure_bump(void) { }
115 #endif
116 
117 extern void cpuset_task_status_allowed(struct seq_file *m,
118 					struct task_struct *task);
119 extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
120 			    struct pid *pid, struct task_struct *tsk);
121 
122 extern int cpuset_mem_spread_node(void);
123 
cpuset_do_page_mem_spread(void)124 static inline int cpuset_do_page_mem_spread(void)
125 {
126 	return task_spread_page(current);
127 }
128 
129 extern bool current_cpuset_is_being_rebound(void);
130 
131 extern void dl_rebuild_rd_accounting(void);
132 extern void rebuild_sched_domains(void);
133 
134 extern void cpuset_print_current_mems_allowed(void);
135 extern void cpuset_reset_sched_domains(void);
136 
137 /*
138  * read_mems_allowed_begin is required when making decisions involving
139  * mems_allowed such as during page allocation. mems_allowed can be updated in
140  * parallel and depending on the new value an operation can fail potentially
141  * causing process failure. A retry loop with read_mems_allowed_begin and
142  * read_mems_allowed_retry prevents these artificial failures.
143  */
read_mems_allowed_begin(void)144 static inline unsigned int read_mems_allowed_begin(void)
145 {
146 	if (!static_branch_unlikely(&cpusets_pre_enable_key))
147 		return 0;
148 
149 	return read_seqcount_begin(&current->mems_allowed_seq);
150 }
151 
152 /*
153  * If this returns true, the operation that took place after
154  * read_mems_allowed_begin may have failed artificially due to a concurrent
155  * update of mems_allowed. It is up to the caller to retry the operation if
156  * appropriate.
157  */
read_mems_allowed_retry(unsigned int seq)158 static inline bool read_mems_allowed_retry(unsigned int seq)
159 {
160 	if (!static_branch_unlikely(&cpusets_enabled_key))
161 		return false;
162 
163 	return read_seqcount_retry(&current->mems_allowed_seq, seq);
164 }
165 
set_mems_allowed(nodemask_t nodemask)166 static inline void set_mems_allowed(nodemask_t nodemask)
167 {
168 	unsigned long flags;
169 
170 	task_lock(current);
171 	local_irq_save(flags);
172 	write_seqcount_begin(&current->mems_allowed_seq);
173 	current->mems_allowed = nodemask;
174 	write_seqcount_end(&current->mems_allowed_seq);
175 	local_irq_restore(flags);
176 	task_unlock(current);
177 }
178 
179 extern void cpuset_nodes_allowed(struct cgroup *cgroup, nodemask_t *mask);
180 #else /* !CONFIG_CPUSETS */
181 
cpusets_enabled(void)182 static inline bool cpusets_enabled(void) { return false; }
183 
cpusets_insane_config(void)184 static inline bool cpusets_insane_config(void) { return false; }
185 
cpuset_init(void)186 static inline int cpuset_init(void) { return 0; }
cpuset_init_smp(void)187 static inline void cpuset_init_smp(void) {}
188 
cpuset_force_rebuild(void)189 static inline void cpuset_force_rebuild(void) { }
190 
cpuset_update_active_cpus(void)191 static inline void cpuset_update_active_cpus(void)
192 {
193 	partition_sched_domains(1, NULL, NULL);
194 }
195 
inc_dl_tasks_cs(struct task_struct * task)196 static inline void inc_dl_tasks_cs(struct task_struct *task) { }
dec_dl_tasks_cs(struct task_struct * task)197 static inline void dec_dl_tasks_cs(struct task_struct *task) { }
cpuset_lock(void)198 static inline void cpuset_lock(void) { }
cpuset_unlock(void)199 static inline void cpuset_unlock(void) { }
lockdep_assert_cpuset_lock_held(void)200 static inline void lockdep_assert_cpuset_lock_held(void) { }
201 
cpuset_cpus_allowed_locked(struct task_struct * p,struct cpumask * mask)202 static inline void cpuset_cpus_allowed_locked(struct task_struct *p,
203 					struct cpumask *mask)
204 {
205 	cpumask_copy(mask, task_cpu_possible_mask(p));
206 }
207 
cpuset_cpus_allowed(struct task_struct * p,struct cpumask * mask)208 static inline void cpuset_cpus_allowed(struct task_struct *p,
209 				       struct cpumask *mask)
210 {
211 	cpuset_cpus_allowed_locked(p, mask);
212 }
213 
cpuset_cpus_allowed_fallback(struct task_struct * p)214 static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p)
215 {
216 	return false;
217 }
218 
cpuset_mems_allowed(struct task_struct * p)219 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
220 {
221 	return node_possible_map;
222 }
223 
224 #define cpuset_current_mems_allowed (node_states[N_MEMORY])
cpuset_init_current_mems_allowed(void)225 static inline void cpuset_init_current_mems_allowed(void) {}
226 
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)227 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
228 {
229 	return 1;
230 }
231 
__cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)232 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
233 {
234 	return true;
235 }
236 
cpuset_zone_allowed(struct zone * z,gfp_t gfp_mask)237 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
238 {
239 	return true;
240 }
241 
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)242 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
243 						 const struct task_struct *tsk2)
244 {
245 	return 1;
246 }
247 
cpuset_memory_pressure_bump(void)248 static inline void cpuset_memory_pressure_bump(void) {}
249 
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)250 static inline void cpuset_task_status_allowed(struct seq_file *m,
251 						struct task_struct *task)
252 {
253 }
254 
cpuset_mem_spread_node(void)255 static inline int cpuset_mem_spread_node(void)
256 {
257 	return 0;
258 }
259 
cpuset_do_page_mem_spread(void)260 static inline int cpuset_do_page_mem_spread(void)
261 {
262 	return 0;
263 }
264 
current_cpuset_is_being_rebound(void)265 static inline bool current_cpuset_is_being_rebound(void)
266 {
267 	return false;
268 }
269 
dl_rebuild_rd_accounting(void)270 static inline void dl_rebuild_rd_accounting(void)
271 {
272 }
273 
rebuild_sched_domains(void)274 static inline void rebuild_sched_domains(void)
275 {
276 	partition_sched_domains(1, NULL, NULL);
277 }
278 
cpuset_reset_sched_domains(void)279 static inline void cpuset_reset_sched_domains(void)
280 {
281 	partition_sched_domains(1, NULL, NULL);
282 }
283 
cpuset_print_current_mems_allowed(void)284 static inline void cpuset_print_current_mems_allowed(void)
285 {
286 }
287 
set_mems_allowed(nodemask_t nodemask)288 static inline void set_mems_allowed(nodemask_t nodemask)
289 {
290 }
291 
read_mems_allowed_begin(void)292 static inline unsigned int read_mems_allowed_begin(void)
293 {
294 	return 0;
295 }
296 
read_mems_allowed_retry(unsigned int seq)297 static inline bool read_mems_allowed_retry(unsigned int seq)
298 {
299 	return false;
300 }
301 
cpuset_nodes_allowed(struct cgroup * cgroup,nodemask_t * mask)302 static inline void cpuset_nodes_allowed(struct cgroup *cgroup, nodemask_t *mask)
303 {
304 	nodes_copy(*mask, node_states[N_MEMORY]);
305 }
306 #endif /* !CONFIG_CPUSETS */
307 
308 #endif /* _LINUX_CPUSET_H */
309