xref: /linux/kernel/cgroup/cpuset-internal.h (revision 272bd8183376a9e20fe08bacbaa44003d7c8acaa)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #ifndef __CPUSET_INTERNAL_H
4 #define __CPUSET_INTERNAL_H
5 
6 #include <linux/cgroup.h>
7 #include <linux/cpu.h>
8 #include <linux/cpumask.h>
9 #include <linux/cpuset.h>
10 #include <linux/spinlock.h>
11 #include <linux/union_find.h>
12 #include <linux/sched/isolation.h>
13 
14 /* See "Frequency meter" comments, below. */
15 
16 struct fmeter {
17 	int cnt;		/* unprocessed events count */
18 	int val;		/* most recent output value */
19 	time64_t time;		/* clock (secs) when val computed */
20 	spinlock_t lock;	/* guards read or write of above */
21 };
22 
23 /*
24  * Invalid partition error code
25  */
26 enum prs_errcode {
27 	PERR_NONE = 0,
28 	PERR_INVCPUS,
29 	PERR_INVPARENT,
30 	PERR_NOTPART,
31 	PERR_NOTEXCL,
32 	PERR_NOCPUS,
33 	PERR_HOTPLUG,
34 	PERR_CPUSEMPTY,
35 	PERR_HKEEPING,
36 	PERR_ACCESS,
37 	PERR_REMOTE,
38 };
39 
40 /* bits in struct cpuset flags field */
41 typedef enum {
42 	CS_CPU_EXCLUSIVE,
43 	CS_MEM_EXCLUSIVE,
44 	CS_MEM_HARDWALL,
45 	CS_MEMORY_MIGRATE,
46 	CS_SCHED_LOAD_BALANCE,
47 	CS_SPREAD_PAGE,
48 	CS_SPREAD_SLAB,
49 } cpuset_flagbits_t;
50 
51 /* The various types of files and directories in a cpuset file system */
52 
53 typedef enum {
54 	FILE_MEMORY_MIGRATE,
55 	FILE_CPULIST,
56 	FILE_MEMLIST,
57 	FILE_EFFECTIVE_CPULIST,
58 	FILE_EFFECTIVE_MEMLIST,
59 	FILE_SUBPARTS_CPULIST,
60 	FILE_EXCLUSIVE_CPULIST,
61 	FILE_EFFECTIVE_XCPULIST,
62 	FILE_ISOLATED_CPULIST,
63 	FILE_CPU_EXCLUSIVE,
64 	FILE_MEM_EXCLUSIVE,
65 	FILE_MEM_HARDWALL,
66 	FILE_SCHED_LOAD_BALANCE,
67 	FILE_PARTITION_ROOT,
68 	FILE_SCHED_RELAX_DOMAIN_LEVEL,
69 	FILE_MEMORY_PRESSURE_ENABLED,
70 	FILE_MEMORY_PRESSURE,
71 	FILE_SPREAD_PAGE,
72 	FILE_SPREAD_SLAB,
73 } cpuset_filetype_t;
74 
75 struct cpuset {
76 	struct cgroup_subsys_state css;
77 
78 	unsigned long flags;		/* "unsigned long" so bitops work */
79 
80 	/*
81 	 * On default hierarchy:
82 	 *
83 	 * The user-configured masks can only be changed by writing to
84 	 * cpuset.cpus and cpuset.mems, and won't be limited by the
85 	 * parent masks.
86 	 *
87 	 * The effective masks is the real masks that apply to the tasks
88 	 * in the cpuset. They may be changed if the configured masks are
89 	 * changed or hotplug happens.
90 	 *
91 	 * effective_mask == configured_mask & parent's effective_mask,
92 	 * and if it ends up empty, it will inherit the parent's mask.
93 	 *
94 	 *
95 	 * On legacy hierarchy:
96 	 *
97 	 * The user-configured masks are always the same with effective masks.
98 	 */
99 
100 	/* user-configured CPUs and Memory Nodes allow to tasks */
101 	cpumask_var_t cpus_allowed;
102 	nodemask_t mems_allowed;
103 
104 	/* effective CPUs and Memory Nodes allow to tasks */
105 	cpumask_var_t effective_cpus;
106 	nodemask_t effective_mems;
107 
108 	/*
109 	 * Exclusive CPUs dedicated to current cgroup (default hierarchy only)
110 	 *
111 	 * The effective_cpus of a valid partition root comes solely from its
112 	 * effective_xcpus and some of the effective_xcpus may be distributed
113 	 * to sub-partitions below & hence excluded from its effective_cpus.
114 	 * For a valid partition root, its effective_cpus have no relationship
115 	 * with cpus_allowed unless its exclusive_cpus isn't set.
116 	 *
117 	 * This value will only be set if either exclusive_cpus is set or
118 	 * when this cpuset becomes a local partition root.
119 	 */
120 	cpumask_var_t effective_xcpus;
121 
122 	/*
123 	 * Exclusive CPUs as requested by the user (default hierarchy only)
124 	 *
125 	 * Its value is independent of cpus_allowed and designates the set of
126 	 * CPUs that can be granted to the current cpuset or its children when
127 	 * it becomes a valid partition root. The effective set of exclusive
128 	 * CPUs granted (effective_xcpus) depends on whether those exclusive
129 	 * CPUs are passed down by its ancestors and not yet taken up by
130 	 * another sibling partition root along the way.
131 	 *
132 	 * If its value isn't set, it defaults to cpus_allowed.
133 	 */
134 	cpumask_var_t exclusive_cpus;
135 
136 	/*
137 	 * This is old Memory Nodes tasks took on.
138 	 *
139 	 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
140 	 * - A new cpuset's old_mems_allowed is initialized when some
141 	 *   task is moved into it.
142 	 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
143 	 *   cpuset.mems_allowed and have tasks' nodemask updated, and
144 	 *   then old_mems_allowed is updated to mems_allowed.
145 	 */
146 	nodemask_t old_mems_allowed;
147 
148 	/*
149 	 * Tasks are being attached to this cpuset.  Used to prevent
150 	 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
151 	 */
152 	int attach_in_progress;
153 
154 	/* partition root state */
155 	int partition_root_state;
156 
157 	/*
158 	 * Whether cpuset is a remote partition.
159 	 * It used to be a list anchoring all remote partitions — we can switch back
160 	 * to a list if we need to iterate over the remote partitions.
161 	 */
162 	bool remote_partition;
163 
164 	/*
165 	 * number of SCHED_DEADLINE tasks attached to this cpuset, so that we
166 	 * know when to rebuild associated root domain bandwidth information.
167 	 */
168 	int nr_deadline_tasks;
169 	int nr_migrate_dl_tasks;
170 	u64 sum_migrate_dl_bw;
171 
172 	/* Invalid partition error code, not lock protected */
173 	enum prs_errcode prs_err;
174 
175 	/* Handle for cpuset.cpus.partition */
176 	struct cgroup_file partition_file;
177 
178 #ifdef CONFIG_CPUSETS_V1
179 	struct fmeter fmeter;		/* memory_pressure filter */
180 
181 	/* for custom sched domain */
182 	int relax_domain_level;
183 
184 	/* Used to merge intersecting subsets for generate_sched_domains */
185 	struct uf_node node;
186 #endif
187 };
188 
189 extern struct cpuset top_cpuset;
190 
191 static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
192 {
193 	return css ? container_of(css, struct cpuset, css) : NULL;
194 }
195 
196 /* Retrieve the cpuset for a task */
197 static inline struct cpuset *task_cs(struct task_struct *task)
198 {
199 	return css_cs(task_css(task, cpuset_cgrp_id));
200 }
201 
202 static inline struct cpuset *parent_cs(struct cpuset *cs)
203 {
204 	return css_cs(cs->css.parent);
205 }
206 
207 /* convenient tests for these bits */
208 static inline bool is_cpuset_online(struct cpuset *cs)
209 {
210 	return css_is_online(&cs->css) && !css_is_dying(&cs->css);
211 }
212 
213 static inline int is_cpu_exclusive(const struct cpuset *cs)
214 {
215 	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
216 }
217 
218 static inline int is_mem_exclusive(const struct cpuset *cs)
219 {
220 	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
221 }
222 
223 static inline int is_mem_hardwall(const struct cpuset *cs)
224 {
225 	return test_bit(CS_MEM_HARDWALL, &cs->flags);
226 }
227 
228 static inline int is_sched_load_balance(const struct cpuset *cs)
229 {
230 	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
231 }
232 
233 static inline int is_memory_migrate(const struct cpuset *cs)
234 {
235 	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
236 }
237 
238 static inline int is_spread_page(const struct cpuset *cs)
239 {
240 	return test_bit(CS_SPREAD_PAGE, &cs->flags);
241 }
242 
243 static inline int is_spread_slab(const struct cpuset *cs)
244 {
245 	return test_bit(CS_SPREAD_SLAB, &cs->flags);
246 }
247 
248 /*
249  * Helper routine for generate_sched_domains().
250  * Do cpusets a, b have overlapping effective cpus_allowed masks?
251  */
252 static inline int cpusets_overlap(struct cpuset *a, struct cpuset *b)
253 {
254 	return cpumask_intersects(a->effective_cpus, b->effective_cpus);
255 }
256 
257 static inline int nr_cpusets(void)
258 {
259 	/* jump label reference count + the top-level cpuset */
260 	return static_key_count(&cpusets_enabled_key.key) + 1;
261 }
262 
263 static inline bool cpuset_is_populated(struct cpuset *cs)
264 {
265 	lockdep_assert_cpuset_lock_held();
266 
267 	/* Cpusets in the process of attaching should be considered as populated */
268 	return cgroup_is_populated(cs->css.cgroup) ||
269 		cs->attach_in_progress;
270 }
271 
272 /**
273  * cpuset_for_each_child - traverse online children of a cpuset
274  * @child_cs: loop cursor pointing to the current child
275  * @pos_css: used for iteration
276  * @parent_cs: target cpuset to walk children of
277  *
278  * Walk @child_cs through the online children of @parent_cs.  Must be used
279  * with RCU read locked.
280  */
281 #define cpuset_for_each_child(child_cs, pos_css, parent_cs)		\
282 	css_for_each_child((pos_css), &(parent_cs)->css)		\
283 		if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
284 
285 /**
286  * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
287  * @des_cs: loop cursor pointing to the current descendant
288  * @pos_css: used for iteration
289  * @root_cs: target cpuset to walk ancestor of
290  *
291  * Walk @des_cs through the online descendants of @root_cs.  Must be used
292  * with RCU read locked.  The caller may modify @pos_css by calling
293  * css_rightmost_descendant() to skip subtree.  @root_cs is included in the
294  * iteration and the first node to be visited.
295  */
296 #define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs)	\
297 	css_for_each_descendant_pre((pos_css), &(root_cs)->css)		\
298 		if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
299 
300 void rebuild_sched_domains_locked(void);
301 void cpuset_callback_lock_irq(void);
302 void cpuset_callback_unlock_irq(void);
303 void cpuset_update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus);
304 void cpuset_update_tasks_nodemask(struct cpuset *cs);
305 int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, int turning_on);
306 ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
307 				    char *buf, size_t nbytes, loff_t off);
308 int cpuset_common_seq_show(struct seq_file *sf, void *v);
309 void cpuset_full_lock(void);
310 void cpuset_full_unlock(void);
311 
312 /*
313  * cpuset-v1.c
314  */
315 #ifdef CONFIG_CPUSETS_V1
316 extern struct cftype cpuset1_files[];
317 void cpuset1_update_task_spread_flags(struct cpuset *cs,
318 					struct task_struct *tsk);
319 void cpuset1_update_tasks_flags(struct cpuset *cs);
320 void cpuset1_hotplug_update_tasks(struct cpuset *cs,
321 			    struct cpumask *new_cpus, nodemask_t *new_mems,
322 			    bool cpus_updated, bool mems_updated);
323 int cpuset1_validate_change(struct cpuset *cur, struct cpuset *trial);
324 bool cpuset1_cpus_excl_conflict(struct cpuset *cs1, struct cpuset *cs2);
325 void cpuset1_init(struct cpuset *cs);
326 void cpuset1_online_css(struct cgroup_subsys_state *css);
327 int cpuset1_generate_sched_domains(cpumask_var_t **domains,
328 			struct sched_domain_attr **attributes);
329 
330 #else
331 static inline void cpuset1_update_task_spread_flags(struct cpuset *cs,
332 					struct task_struct *tsk) {}
333 static inline void cpuset1_update_tasks_flags(struct cpuset *cs) {}
334 static inline void cpuset1_hotplug_update_tasks(struct cpuset *cs,
335 			    struct cpumask *new_cpus, nodemask_t *new_mems,
336 			    bool cpus_updated, bool mems_updated) {}
337 static inline int cpuset1_validate_change(struct cpuset *cur,
338 				struct cpuset *trial) { return 0; }
339 static inline bool cpuset1_cpus_excl_conflict(struct cpuset *cs1,
340 					struct cpuset *cs2) { return false; }
341 static inline void cpuset1_init(struct cpuset *cs) {}
342 static inline void cpuset1_online_css(struct cgroup_subsys_state *css) {}
343 static inline int cpuset1_generate_sched_domains(cpumask_var_t **domains,
344 			struct sched_domain_attr **attributes) { return 0; };
345 
346 #endif /* CONFIG_CPUSETS_V1 */
347 
348 #endif /* __CPUSET_INTERNAL_H */
349