xref: /linux/kernel/cgroup/cgroup-internal.h (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 #ifndef __CGROUP_INTERNAL_H
2 #define __CGROUP_INTERNAL_H
3 
4 #include <linux/cgroup.h>
5 #include <linux/kernfs.h>
6 #include <linux/workqueue.h>
7 #include <linux/list.h>
8 
9 /*
10  * A cgroup can be associated with multiple css_sets as different tasks may
11  * belong to different cgroups on different hierarchies.  In the other
12  * direction, a css_set is naturally associated with multiple cgroups.
13  * This M:N relationship is represented by the following link structure
14  * which exists for each association and allows traversing the associations
15  * from both sides.
16  */
17 struct cgrp_cset_link {
18 	/* the cgroup and css_set this link associates */
19 	struct cgroup		*cgrp;
20 	struct css_set		*cset;
21 
22 	/* list of cgrp_cset_links anchored at cgrp->cset_links */
23 	struct list_head	cset_link;
24 
25 	/* list of cgrp_cset_links anchored at css_set->cgrp_links */
26 	struct list_head	cgrp_link;
27 };
28 
29 /* used to track tasks and csets during migration */
30 struct cgroup_taskset {
31 	/* the src and dst cset list running through cset->mg_node */
32 	struct list_head	src_csets;
33 	struct list_head	dst_csets;
34 
35 	/* the subsys currently being processed */
36 	int			ssid;
37 
38 	/*
39 	 * Fields for cgroup_taskset_*() iteration.
40 	 *
41 	 * Before migration is committed, the target migration tasks are on
42 	 * ->mg_tasks of the csets on ->src_csets.  After, on ->mg_tasks of
43 	 * the csets on ->dst_csets.  ->csets point to either ->src_csets
44 	 * or ->dst_csets depending on whether migration is committed.
45 	 *
46 	 * ->cur_csets and ->cur_task point to the current task position
47 	 * during iteration.
48 	 */
49 	struct list_head	*csets;
50 	struct css_set		*cur_cset;
51 	struct task_struct	*cur_task;
52 };
53 
54 /* migration context also tracks preloading */
55 struct cgroup_mgctx {
56 	/*
57 	 * Preloaded source and destination csets.  Used to guarantee
58 	 * atomic success or failure on actual migration.
59 	 */
60 	struct list_head	preloaded_src_csets;
61 	struct list_head	preloaded_dst_csets;
62 
63 	/* tasks and csets to migrate */
64 	struct cgroup_taskset	tset;
65 
66 	/* subsystems affected by migration */
67 	u16			ss_mask;
68 };
69 
70 #define CGROUP_TASKSET_INIT(tset)						\
71 {										\
72 	.src_csets		= LIST_HEAD_INIT(tset.src_csets),		\
73 	.dst_csets		= LIST_HEAD_INIT(tset.dst_csets),		\
74 	.csets			= &tset.src_csets,				\
75 }
76 
77 #define CGROUP_MGCTX_INIT(name)							\
78 {										\
79 	LIST_HEAD_INIT(name.preloaded_src_csets),				\
80 	LIST_HEAD_INIT(name.preloaded_dst_csets),				\
81 	CGROUP_TASKSET_INIT(name.tset),						\
82 }
83 
84 #define DEFINE_CGROUP_MGCTX(name)						\
85 	struct cgroup_mgctx name = CGROUP_MGCTX_INIT(name)
86 
87 struct cgroup_sb_opts {
88 	u16 subsys_mask;
89 	unsigned int flags;
90 	char *release_agent;
91 	bool cpuset_clone_children;
92 	char *name;
93 	/* User explicitly requested empty subsystem */
94 	bool none;
95 };
96 
97 extern struct mutex cgroup_mutex;
98 extern spinlock_t css_set_lock;
99 extern struct cgroup_subsys *cgroup_subsys[];
100 extern struct list_head cgroup_roots;
101 extern struct file_system_type cgroup_fs_type;
102 
103 /* iterate across the hierarchies */
104 #define for_each_root(root)						\
105 	list_for_each_entry((root), &cgroup_roots, root_list)
106 
107 /**
108  * for_each_subsys - iterate all enabled cgroup subsystems
109  * @ss: the iteration cursor
110  * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
111  */
112 #define for_each_subsys(ss, ssid)					\
113 	for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT &&		\
114 	     (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
115 
116 static inline bool cgroup_is_dead(const struct cgroup *cgrp)
117 {
118 	return !(cgrp->self.flags & CSS_ONLINE);
119 }
120 
121 static inline bool notify_on_release(const struct cgroup *cgrp)
122 {
123 	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
124 }
125 
126 void put_css_set_locked(struct css_set *cset);
127 
128 static inline void put_css_set(struct css_set *cset)
129 {
130 	unsigned long flags;
131 
132 	/*
133 	 * Ensure that the refcount doesn't hit zero while any readers
134 	 * can see it. Similar to atomic_dec_and_lock(), but for an
135 	 * rwlock
136 	 */
137 	if (atomic_add_unless(&cset->refcount, -1, 1))
138 		return;
139 
140 	spin_lock_irqsave(&css_set_lock, flags);
141 	put_css_set_locked(cset);
142 	spin_unlock_irqrestore(&css_set_lock, flags);
143 }
144 
145 /*
146  * refcounted get/put for css_set objects
147  */
148 static inline void get_css_set(struct css_set *cset)
149 {
150 	atomic_inc(&cset->refcount);
151 }
152 
153 bool cgroup_ssid_enabled(int ssid);
154 bool cgroup_on_dfl(const struct cgroup *cgrp);
155 
156 struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root);
157 struct cgroup *task_cgroup_from_root(struct task_struct *task,
158 				     struct cgroup_root *root);
159 struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn, bool drain_offline);
160 void cgroup_kn_unlock(struct kernfs_node *kn);
161 int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
162 			  struct cgroup_namespace *ns);
163 
164 void cgroup_free_root(struct cgroup_root *root);
165 void init_cgroup_root(struct cgroup_root *root, struct cgroup_sb_opts *opts);
166 int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask);
167 int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask);
168 struct dentry *cgroup_do_mount(struct file_system_type *fs_type, int flags,
169 			       struct cgroup_root *root, unsigned long magic,
170 			       struct cgroup_namespace *ns);
171 
172 bool cgroup_may_migrate_to(struct cgroup *dst_cgrp);
173 void cgroup_migrate_finish(struct cgroup_mgctx *mgctx);
174 void cgroup_migrate_add_src(struct css_set *src_cset, struct cgroup *dst_cgrp,
175 			    struct cgroup_mgctx *mgctx);
176 int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx);
177 int cgroup_migrate(struct task_struct *leader, bool threadgroup,
178 		   struct cgroup_mgctx *mgctx);
179 
180 int cgroup_attach_task(struct cgroup *dst_cgrp, struct task_struct *leader,
181 		       bool threadgroup);
182 ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
183 			     size_t nbytes, loff_t off, bool threadgroup);
184 ssize_t cgroup_procs_write(struct kernfs_open_file *of, char *buf, size_t nbytes,
185 			   loff_t off);
186 
187 void cgroup_lock_and_drain_offline(struct cgroup *cgrp);
188 
189 int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, umode_t mode);
190 int cgroup_rmdir(struct kernfs_node *kn);
191 int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
192 		     struct kernfs_root *kf_root);
193 
194 /*
195  * namespace.c
196  */
197 extern const struct proc_ns_operations cgroupns_operations;
198 
199 /*
200  * cgroup-v1.c
201  */
202 extern struct cftype cgroup1_base_files[];
203 extern const struct file_operations proc_cgroupstats_operations;
204 extern struct kernfs_syscall_ops cgroup1_kf_syscall_ops;
205 
206 bool cgroup1_ssid_disabled(int ssid);
207 void cgroup1_pidlist_destroy_all(struct cgroup *cgrp);
208 void cgroup1_release_agent(struct work_struct *work);
209 void cgroup1_check_for_release(struct cgroup *cgrp);
210 struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags,
211 			     void *data, unsigned long magic,
212 			     struct cgroup_namespace *ns);
213 
214 #endif /* __CGROUP_INTERNAL_H */
215