xref: /linux/kernel/cgroup/cgroup-internal.h (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 #ifndef __CGROUP_INTERNAL_H
2 #define __CGROUP_INTERNAL_H
3 
4 #include <linux/cgroup.h>
5 #include <linux/kernfs.h>
6 #include <linux/workqueue.h>
7 #include <linux/list.h>
8 #include <linux/refcount.h>
9 
10 /*
11  * A cgroup can be associated with multiple css_sets as different tasks may
12  * belong to different cgroups on different hierarchies.  In the other
13  * direction, a css_set is naturally associated with multiple cgroups.
14  * This M:N relationship is represented by the following link structure
15  * which exists for each association and allows traversing the associations
16  * from both sides.
17  */
18 struct cgrp_cset_link {
19 	/* the cgroup and css_set this link associates */
20 	struct cgroup		*cgrp;
21 	struct css_set		*cset;
22 
23 	/* list of cgrp_cset_links anchored at cgrp->cset_links */
24 	struct list_head	cset_link;
25 
26 	/* list of cgrp_cset_links anchored at css_set->cgrp_links */
27 	struct list_head	cgrp_link;
28 };
29 
30 /* used to track tasks and csets during migration */
31 struct cgroup_taskset {
32 	/* the src and dst cset list running through cset->mg_node */
33 	struct list_head	src_csets;
34 	struct list_head	dst_csets;
35 
36 	/* the subsys currently being processed */
37 	int			ssid;
38 
39 	/*
40 	 * Fields for cgroup_taskset_*() iteration.
41 	 *
42 	 * Before migration is committed, the target migration tasks are on
43 	 * ->mg_tasks of the csets on ->src_csets.  After, on ->mg_tasks of
44 	 * the csets on ->dst_csets.  ->csets point to either ->src_csets
45 	 * or ->dst_csets depending on whether migration is committed.
46 	 *
47 	 * ->cur_csets and ->cur_task point to the current task position
48 	 * during iteration.
49 	 */
50 	struct list_head	*csets;
51 	struct css_set		*cur_cset;
52 	struct task_struct	*cur_task;
53 };
54 
55 /* migration context also tracks preloading */
56 struct cgroup_mgctx {
57 	/*
58 	 * Preloaded source and destination csets.  Used to guarantee
59 	 * atomic success or failure on actual migration.
60 	 */
61 	struct list_head	preloaded_src_csets;
62 	struct list_head	preloaded_dst_csets;
63 
64 	/* tasks and csets to migrate */
65 	struct cgroup_taskset	tset;
66 
67 	/* subsystems affected by migration */
68 	u16			ss_mask;
69 };
70 
71 #define CGROUP_TASKSET_INIT(tset)						\
72 {										\
73 	.src_csets		= LIST_HEAD_INIT(tset.src_csets),		\
74 	.dst_csets		= LIST_HEAD_INIT(tset.dst_csets),		\
75 	.csets			= &tset.src_csets,				\
76 }
77 
78 #define CGROUP_MGCTX_INIT(name)							\
79 {										\
80 	LIST_HEAD_INIT(name.preloaded_src_csets),				\
81 	LIST_HEAD_INIT(name.preloaded_dst_csets),				\
82 	CGROUP_TASKSET_INIT(name.tset),						\
83 }
84 
85 #define DEFINE_CGROUP_MGCTX(name)						\
86 	struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name)
87 
88 struct cgroup_sb_opts {
89 	u16 subsys_mask;
90 	unsigned int flags;
91 	char *release_agent;
92 	bool cpuset_clone_children;
93 	char *name;
94 	/* User explicitly requested empty subsystem */
95 	bool none;
96 };
97 
98 extern struct mutex cgroup_mutex;
99 extern spinlock_t css_set_lock;
100 extern struct cgroup_subsys *cgroup_subsys[];
101 extern struct list_head cgroup_roots;
102 extern struct file_system_type cgroup_fs_type;
103 
104 /* iterate across the hierarchies */
105 #define for_each_root(root)						\
106 	list_for_each_entry((root), &cgroup_roots, root_list)
107 
108 /**
109  * for_each_subsys - iterate all enabled cgroup subsystems
110  * @ss: the iteration cursor
111  * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
112  */
113 #define for_each_subsys(ss, ssid)					\
114 	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT &&		\
115 	     (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
116 
117 static inline bool cgroup_is_dead(const struct cgroup *cgrp)
118 {
119 	return !(cgrp->self.flags & CSS_ONLINE);
120 }
121 
122 static inline bool notify_on_release(const struct cgroup *cgrp)
123 {
124 	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
125 }
126 
127 void put_css_set_locked(struct css_set *cset);
128 
129 static inline void put_css_set(struct css_set *cset)
130 {
131 	unsigned long flags;
132 
133 	/*
134 	 * Ensure that the refcount doesn't hit zero while any readers
135 	 * can see it. Similar to atomic_dec_and_lock(), but for an
136 	 * rwlock
137 	 */
138 	if (refcount_dec_not_one(&cset->refcount))
139 		return;
140 
141 	spin_lock_irqsave(&css_set_lock, flags);
142 	put_css_set_locked(cset);
143 	spin_unlock_irqrestore(&css_set_lock, flags);
144 }
145 
146 /*
147  * refcounted get/put for css_set objects
148  */
149 static inline void get_css_set(struct css_set *cset)
150 {
151 	refcount_inc(&cset->refcount);
152 }
153 
154 bool cgroup_ssid_enabled(int ssid);
155 bool cgroup_on_dfl(const struct cgroup *cgrp);
156 
157 struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root);
158 struct cgroup *task_cgroup_from_root(struct task_struct *task,
159 				     struct cgroup_root *root);
160 struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline);
161 void cgroup_kn_unlock(struct kernfs_node *kn);
162 int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
163 			  struct cgroup_namespace *ns);
164 
165 void cgroup_free_root(struct cgroup_root *root);
166 void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts);
167 int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask, int ref_flags);
168 int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask);
169 struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
170 			       struct cgroup_root *root, unsigned long magic,
171 			       struct cgroup_namespace *ns);
172 
173 bool cgroup_may_migrate_to(struct cgroup *dst_cgrp);
174 void cgroup_migrate_finish(struct cgroup_mgctx *mgctx);
175 void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup *dst_cgrp,
176 			    struct cgroup_mgctx *mgctx);
177 int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx);
178 int cgroup_migrate(struct task_struct *leader, bool threadgroup,
179 		   struct cgroup_mgctx *mgctx);
180 
181 int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
182 		       bool threadgroup);
183 ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
184 			     size_t nbytes, loff_t off, bool threadgroup);
185 ssize_t cgroup_procs_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
186 			   loff_t off);
187 
188 void cgroup_lock_and_drain_offline(struct cgroup *cgrp);
189 
190 int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode);
191 int cgroup_rmdir(struct kernfs_node *kn);
192 int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
193 		     struct kernfs_root *kf_root);
194 
195 /*
196  * namespace.c
197  */
198 extern const struct proc_ns_operations cgroupns_operations;
199 
200 /*
201  * cgroup-v1.c
202  */
203 extern struct cftype cgroup1_base_files[];
204 extern const struct file_operations proc_cgroupstats_operations;
205 extern struct kernfs_syscall_ops cgroup1_kf_syscall_ops;
206 
207 bool cgroup1_ssid_disabled(int ssid);
208 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp);
209 void cgroup1_release_agent(struct work_struct *work);
210 void cgroup1_check_for_release(struct cgroup *cgrp);
211 struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
212 			     void *data, unsigned long magic,
213 			     struct cgroup_namespace *ns);
214 
215 #endif /* __CGROUP_INTERNAL_H */
216