xref: /linux/net/sched/cls_cgroup.c (revision 95e9fd10f06cb5642028b6b851e32b8c8afb4571)
1 /*
2  * net/sched/cls_cgroup.c	Control Group Classifier
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Thomas Graf <tgraf@suug.ch>
10  */
11 
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <linux/errno.h>
17 #include <linux/skbuff.h>
18 #include <linux/cgroup.h>
19 #include <linux/rcupdate.h>
20 #include <net/rtnetlink.h>
21 #include <net/pkt_cls.h>
22 #include <net/sock.h>
23 #include <net/cls_cgroup.h>
24 
25 static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
26 {
27 	return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
28 			    struct cgroup_cls_state, css);
29 }
30 
31 static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
32 {
33 	return container_of(task_subsys_state(p, net_cls_subsys_id),
34 			    struct cgroup_cls_state, css);
35 }
36 
37 static struct cgroup_subsys_state *cgrp_create(struct cgroup *cgrp)
38 {
39 	struct cgroup_cls_state *cs;
40 
41 	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
42 	if (!cs)
43 		return ERR_PTR(-ENOMEM);
44 
45 	if (cgrp->parent)
46 		cs->classid = cgrp_cls_state(cgrp->parent)->classid;
47 
48 	return &cs->css;
49 }
50 
51 static void cgrp_destroy(struct cgroup *cgrp)
52 {
53 	kfree(cgrp_cls_state(cgrp));
54 }
55 
56 static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
57 {
58 	return cgrp_cls_state(cgrp)->classid;
59 }
60 
61 static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
62 {
63 	cgrp_cls_state(cgrp)->classid = (u32) value;
64 	return 0;
65 }
66 
67 static struct cftype ss_files[] = {
68 	{
69 		.name = "classid",
70 		.read_u64 = read_classid,
71 		.write_u64 = write_classid,
72 	},
73 	{ }	/* terminate */
74 };
75 
76 struct cgroup_subsys net_cls_subsys = {
77 	.name		= "net_cls",
78 	.create		= cgrp_create,
79 	.destroy	= cgrp_destroy,
80 #ifdef CONFIG_NET_CLS_CGROUP
81 	.subsys_id	= net_cls_subsys_id,
82 #endif
83 	.base_cftypes	= ss_files,
84 	.module		= THIS_MODULE,
85 };
86 
87 struct cls_cgroup_head {
88 	u32			handle;
89 	struct tcf_exts		exts;
90 	struct tcf_ematch_tree	ematches;
91 };
92 
93 static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
94 			       struct tcf_result *res)
95 {
96 	struct cls_cgroup_head *head = tp->root;
97 	u32 classid;
98 
99 	rcu_read_lock();
100 	classid = task_cls_state(current)->classid;
101 	rcu_read_unlock();
102 
103 	/*
104 	 * Due to the nature of the classifier it is required to ignore all
105 	 * packets originating from softirq context as accessing `current'
106 	 * would lead to false results.
107 	 *
108 	 * This test assumes that all callers of dev_queue_xmit() explicitely
109 	 * disable bh. Knowing this, it is possible to detect softirq based
110 	 * calls by looking at the number of nested bh disable calls because
111 	 * softirqs always disables bh.
112 	 */
113 	if (in_serving_softirq()) {
114 		/* If there is an sk_classid we'll use that. */
115 		if (!skb->sk)
116 			return -1;
117 		classid = skb->sk->sk_classid;
118 	}
119 
120 	if (!classid)
121 		return -1;
122 
123 	if (!tcf_em_tree_match(skb, &head->ematches, NULL))
124 		return -1;
125 
126 	res->classid = classid;
127 	res->class = 0;
128 	return tcf_exts_exec(skb, &head->exts, res);
129 }
130 
131 static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
132 {
133 	return 0UL;
134 }
135 
136 static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f)
137 {
138 }
139 
140 static int cls_cgroup_init(struct tcf_proto *tp)
141 {
142 	return 0;
143 }
144 
145 static const struct tcf_ext_map cgroup_ext_map = {
146 	.action = TCA_CGROUP_ACT,
147 	.police = TCA_CGROUP_POLICE,
148 };
149 
150 static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
151 	[TCA_CGROUP_EMATCHES]	= { .type = NLA_NESTED },
152 };
153 
154 static int cls_cgroup_change(struct tcf_proto *tp, unsigned long base,
155 			     u32 handle, struct nlattr **tca,
156 			     unsigned long *arg)
157 {
158 	struct nlattr *tb[TCA_CGROUP_MAX + 1];
159 	struct cls_cgroup_head *head = tp->root;
160 	struct tcf_ematch_tree t;
161 	struct tcf_exts e;
162 	int err;
163 
164 	if (!tca[TCA_OPTIONS])
165 		return -EINVAL;
166 
167 	if (head == NULL) {
168 		if (!handle)
169 			return -EINVAL;
170 
171 		head = kzalloc(sizeof(*head), GFP_KERNEL);
172 		if (head == NULL)
173 			return -ENOBUFS;
174 
175 		head->handle = handle;
176 
177 		tcf_tree_lock(tp);
178 		tp->root = head;
179 		tcf_tree_unlock(tp);
180 	}
181 
182 	if (handle != head->handle)
183 		return -ENOENT;
184 
185 	err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
186 			       cgroup_policy);
187 	if (err < 0)
188 		return err;
189 
190 	err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &cgroup_ext_map);
191 	if (err < 0)
192 		return err;
193 
194 	err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
195 	if (err < 0)
196 		return err;
197 
198 	tcf_exts_change(tp, &head->exts, &e);
199 	tcf_em_tree_change(tp, &head->ematches, &t);
200 
201 	return 0;
202 }
203 
204 static void cls_cgroup_destroy(struct tcf_proto *tp)
205 {
206 	struct cls_cgroup_head *head = tp->root;
207 
208 	if (head) {
209 		tcf_exts_destroy(tp, &head->exts);
210 		tcf_em_tree_destroy(tp, &head->ematches);
211 		kfree(head);
212 	}
213 }
214 
215 static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
216 {
217 	return -EOPNOTSUPP;
218 }
219 
220 static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
221 {
222 	struct cls_cgroup_head *head = tp->root;
223 
224 	if (arg->count < arg->skip)
225 		goto skip;
226 
227 	if (arg->fn(tp, (unsigned long) head, arg) < 0) {
228 		arg->stop = 1;
229 		return;
230 	}
231 skip:
232 	arg->count++;
233 }
234 
235 static int cls_cgroup_dump(struct tcf_proto *tp, unsigned long fh,
236 			   struct sk_buff *skb, struct tcmsg *t)
237 {
238 	struct cls_cgroup_head *head = tp->root;
239 	unsigned char *b = skb_tail_pointer(skb);
240 	struct nlattr *nest;
241 
242 	t->tcm_handle = head->handle;
243 
244 	nest = nla_nest_start(skb, TCA_OPTIONS);
245 	if (nest == NULL)
246 		goto nla_put_failure;
247 
248 	if (tcf_exts_dump(skb, &head->exts, &cgroup_ext_map) < 0 ||
249 	    tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
250 		goto nla_put_failure;
251 
252 	nla_nest_end(skb, nest);
253 
254 	if (tcf_exts_dump_stats(skb, &head->exts, &cgroup_ext_map) < 0)
255 		goto nla_put_failure;
256 
257 	return skb->len;
258 
259 nla_put_failure:
260 	nlmsg_trim(skb, b);
261 	return -1;
262 }
263 
264 static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
265 	.kind		=	"cgroup",
266 	.init		=	cls_cgroup_init,
267 	.change		=	cls_cgroup_change,
268 	.classify	=	cls_cgroup_classify,
269 	.destroy	=	cls_cgroup_destroy,
270 	.get		=	cls_cgroup_get,
271 	.put		=	cls_cgroup_put,
272 	.delete		=	cls_cgroup_delete,
273 	.walk		=	cls_cgroup_walk,
274 	.dump		=	cls_cgroup_dump,
275 	.owner		=	THIS_MODULE,
276 };
277 
278 static int __init init_cgroup_cls(void)
279 {
280 	int ret;
281 
282 	ret = cgroup_load_subsys(&net_cls_subsys);
283 	if (ret)
284 		goto out;
285 
286 #ifndef CONFIG_NET_CLS_CGROUP
287 	/* We can't use rcu_assign_pointer because this is an int. */
288 	smp_wmb();
289 	net_cls_subsys_id = net_cls_subsys.subsys_id;
290 #endif
291 
292 	ret = register_tcf_proto_ops(&cls_cgroup_ops);
293 	if (ret)
294 		cgroup_unload_subsys(&net_cls_subsys);
295 
296 out:
297 	return ret;
298 }
299 
300 static void __exit exit_cgroup_cls(void)
301 {
302 	unregister_tcf_proto_ops(&cls_cgroup_ops);
303 
304 #ifndef CONFIG_NET_CLS_CGROUP
305 	net_cls_subsys_id = -1;
306 	synchronize_rcu();
307 #endif
308 
309 	cgroup_unload_subsys(&net_cls_subsys);
310 }
311 
312 module_init(init_cgroup_cls);
313 module_exit(exit_cgroup_cls);
314 MODULE_LICENSE("GPL");
315