xref: /linux/net/sched/cls_fw.c (revision 65782b2db7321d5f97c16718c4c7f6c7205a56be)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_fw.c	Classifier mapping ipchains' fwmark to traffic class.
4  *
5  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6  *
7  * Changes:
8  * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_walk off by one
9  * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_delete killed all the filter (and kernel).
10  * Alex <alex@pilotsoft.com> : 2004xxyy: Added Action extension
11  */
12 
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/skbuff.h>
20 #include <net/netlink.h>
21 #include <net/act_api.h>
22 #include <net/pkt_cls.h>
23 #include <net/sch_generic.h>
24 #include <net/tc_wrapper.h>
25 
26 #define HTSIZE 256
27 
28 struct fw_head {
29 	u32			mask;
30 	struct fw_filter __rcu	*ht[HTSIZE];
31 	struct rcu_head		rcu;
32 };
33 
34 struct fw_filter {
35 	struct fw_filter __rcu	*next;
36 	u32			id;
37 	struct tcf_result	res;
38 	int			ifindex;
39 	struct tcf_exts		exts;
40 	struct tcf_proto	*tp;
41 	struct rcu_work		rwork;
42 };
43 
44 static u32 fw_hash(u32 handle)
45 {
46 	handle ^= (handle >> 16);
47 	handle ^= (handle >> 8);
48 	return handle % HTSIZE;
49 }
50 
51 TC_INDIRECT_SCOPE int fw_classify(struct sk_buff *skb,
52 				  const struct tcf_proto *tp,
53 				  struct tcf_result *res)
54 {
55 	struct fw_head *head = rcu_dereference_bh(tp->root);
56 	struct fw_filter *f;
57 	int r;
58 	u32 id = skb->mark;
59 
60 	if (head != NULL) {
61 		id &= head->mask;
62 
63 		for (f = rcu_dereference_bh(head->ht[fw_hash(id)]); f;
64 		     f = rcu_dereference_bh(f->next)) {
65 			if (f->id == id) {
66 				*res = f->res;
67 				if (!tcf_match_indev(skb, f->ifindex))
68 					continue;
69 				r = tcf_exts_exec(skb, &f->exts, res);
70 				if (r < 0)
71 					continue;
72 
73 				return r;
74 			}
75 		}
76 	} else {
77 		struct Qdisc *q;
78 
79 		/* Old method: classify the packet using its skb mark. */
80 		if (tcf_block_shared(tp->chain->block))
81 			return -1;
82 
83 		q = tcf_block_q(tp->chain->block);
84 		if (id && (TC_H_MAJ(id) == 0 ||
85 			   !(TC_H_MAJ(id ^ q->handle)))) {
86 			res->classid = id;
87 			res->class = 0;
88 			return 0;
89 		}
90 	}
91 
92 	return -1;
93 }
94 
95 static void *fw_get(struct tcf_proto *tp, u32 handle)
96 {
97 	struct fw_head *head = rtnl_dereference(tp->root);
98 	struct fw_filter *f;
99 
100 	if (head == NULL)
101 		return NULL;
102 
103 	f = rtnl_dereference(head->ht[fw_hash(handle)]);
104 	for (; f; f = rtnl_dereference(f->next)) {
105 		if (f->id == handle)
106 			return f;
107 	}
108 	return NULL;
109 }
110 
111 static int fw_init(struct tcf_proto *tp)
112 {
113 	/* We don't allocate fw_head here, because in the old method
114 	 * we don't need it at all.
115 	 */
116 	return 0;
117 }
118 
119 static void __fw_delete_filter(struct fw_filter *f)
120 {
121 	tcf_exts_destroy(&f->exts);
122 	tcf_exts_put_net(&f->exts);
123 	kfree(f);
124 }
125 
126 static void fw_delete_filter_work(struct work_struct *work)
127 {
128 	struct fw_filter *f = container_of(to_rcu_work(work),
129 					   struct fw_filter,
130 					   rwork);
131 	rtnl_lock();
132 	__fw_delete_filter(f);
133 	rtnl_unlock();
134 }
135 
136 static void fw_destroy(struct tcf_proto *tp, bool rtnl_held,
137 		       struct netlink_ext_ack *extack)
138 {
139 	struct fw_head *head = rtnl_dereference(tp->root);
140 	struct fw_filter *f;
141 	int h;
142 
143 	if (head == NULL)
144 		return;
145 
146 	for (h = 0; h < HTSIZE; h++) {
147 		while ((f = rtnl_dereference(head->ht[h])) != NULL) {
148 			RCU_INIT_POINTER(head->ht[h],
149 					 rtnl_dereference(f->next));
150 			tcf_unbind_filter(tp, &f->res);
151 			if (tcf_exts_get_net(&f->exts))
152 				tcf_queue_work(&f->rwork, fw_delete_filter_work);
153 			else
154 				__fw_delete_filter(f);
155 		}
156 	}
157 	kfree_rcu(head, rcu);
158 }
159 
160 static int fw_delete(struct tcf_proto *tp, void *arg, bool *last,
161 		     bool rtnl_held, struct netlink_ext_ack *extack)
162 {
163 	struct fw_head *head = rtnl_dereference(tp->root);
164 	struct fw_filter *f = arg;
165 	struct fw_filter __rcu **fp;
166 	struct fw_filter *pfp;
167 	int ret = -EINVAL;
168 	int h;
169 
170 	if (head == NULL || f == NULL)
171 		goto out;
172 
173 	fp = &head->ht[fw_hash(f->id)];
174 
175 	for (pfp = rtnl_dereference(*fp); pfp;
176 	     fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
177 		if (pfp == f) {
178 			RCU_INIT_POINTER(*fp, rtnl_dereference(f->next));
179 			tcf_unbind_filter(tp, &f->res);
180 			tcf_exts_get_net(&f->exts);
181 			tcf_queue_work(&f->rwork, fw_delete_filter_work);
182 			ret = 0;
183 			break;
184 		}
185 	}
186 
187 	*last = true;
188 	for (h = 0; h < HTSIZE; h++) {
189 		if (rcu_access_pointer(head->ht[h])) {
190 			*last = false;
191 			break;
192 		}
193 	}
194 
195 out:
196 	return ret;
197 }
198 
199 static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
200 	[TCA_FW_CLASSID]	= { .type = NLA_U32 },
201 	[TCA_FW_INDEV]		= { .type = NLA_STRING, .len = IFNAMSIZ },
202 	[TCA_FW_MASK]		= { .type = NLA_U32 },
203 };
204 
205 static int fw_set_parms(struct net *net, struct tcf_proto *tp,
206 			struct fw_filter *f, struct nlattr **tb,
207 			struct nlattr **tca, unsigned long base, u32 flags,
208 			struct netlink_ext_ack *extack)
209 {
210 	struct fw_head *head = rtnl_dereference(tp->root);
211 	u32 mask;
212 	int err;
213 
214 	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &f->exts, flags,
215 				extack);
216 	if (err < 0)
217 		return err;
218 
219 	if (tb[TCA_FW_INDEV]) {
220 		int ret;
221 		ret = tcf_change_indev(net, tb[TCA_FW_INDEV], extack);
222 		if (ret < 0)
223 			return ret;
224 		f->ifindex = ret;
225 	}
226 
227 	err = -EINVAL;
228 	if (tb[TCA_FW_MASK]) {
229 		mask = nla_get_u32(tb[TCA_FW_MASK]);
230 		if (mask != head->mask)
231 			return err;
232 	} else if (head->mask != 0xFFFFFFFF)
233 		return err;
234 
235 	if (tb[TCA_FW_CLASSID]) {
236 		f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
237 		tcf_bind_filter(tp, &f->res, base);
238 	}
239 
240 	return 0;
241 }
242 
243 static int fw_change(struct net *net, struct sk_buff *in_skb,
244 		     struct tcf_proto *tp, unsigned long base,
245 		     u32 handle, struct nlattr **tca, void **arg,
246 		     u32 flags, struct netlink_ext_ack *extack)
247 {
248 	struct fw_head *head = rtnl_dereference(tp->root);
249 	struct fw_filter *f = *arg;
250 	struct nlattr *opt = tca[TCA_OPTIONS];
251 	struct nlattr *tb[TCA_FW_MAX + 1];
252 	int err;
253 
254 	if (!opt) {
255 		if (handle)
256 			return -EINVAL;
257 
258 		if (tcf_block_shared(tp->chain->block)) {
259 			NL_SET_ERR_MSG(extack,
260 				       "Must specify mark when attaching fw filter to block");
261 			return -EINVAL;
262 		}
263 
264 		return 0; /* Succeed if it is old method. */
265 	}
266 
267 	err = nla_parse_nested_deprecated(tb, TCA_FW_MAX, opt, fw_policy,
268 					  NULL);
269 	if (err < 0)
270 		return err;
271 
272 	if (f) {
273 		struct fw_filter *pfp, *fnew;
274 		struct fw_filter __rcu **fp;
275 
276 		if (f->id != handle && handle)
277 			return -EINVAL;
278 
279 		fnew = kzalloc_obj(struct fw_filter);
280 		if (!fnew)
281 			return -ENOBUFS;
282 
283 		fnew->id = f->id;
284 		fnew->ifindex = f->ifindex;
285 		fnew->tp = f->tp;
286 
287 		err = tcf_exts_init(&fnew->exts, net, TCA_FW_ACT,
288 				    TCA_FW_POLICE);
289 		if (err < 0) {
290 			kfree(fnew);
291 			return err;
292 		}
293 
294 		err = fw_set_parms(net, tp, fnew, tb, tca, base, flags, extack);
295 		if (err < 0) {
296 			tcf_exts_destroy(&fnew->exts);
297 			kfree(fnew);
298 			return err;
299 		}
300 
301 		fp = &head->ht[fw_hash(fnew->id)];
302 		for (pfp = rtnl_dereference(*fp); pfp;
303 		     fp = &pfp->next, pfp = rtnl_dereference(*fp))
304 			if (pfp == f)
305 				break;
306 
307 		RCU_INIT_POINTER(fnew->next, rtnl_dereference(pfp->next));
308 		rcu_assign_pointer(*fp, fnew);
309 		tcf_unbind_filter(tp, &f->res);
310 		tcf_exts_get_net(&f->exts);
311 		tcf_queue_work(&f->rwork, fw_delete_filter_work);
312 
313 		*arg = fnew;
314 		return err;
315 	}
316 
317 	if (!handle)
318 		return -EINVAL;
319 
320 	if (!head) {
321 		u32 mask = 0xFFFFFFFF;
322 		if (tb[TCA_FW_MASK])
323 			mask = nla_get_u32(tb[TCA_FW_MASK]);
324 
325 		head = kzalloc_obj(*head);
326 		if (!head)
327 			return -ENOBUFS;
328 		head->mask = mask;
329 
330 		rcu_assign_pointer(tp->root, head);
331 	}
332 
333 	f = kzalloc_obj(struct fw_filter);
334 	if (f == NULL)
335 		return -ENOBUFS;
336 
337 	err = tcf_exts_init(&f->exts, net, TCA_FW_ACT, TCA_FW_POLICE);
338 	if (err < 0)
339 		goto errout;
340 	f->id = handle;
341 	f->tp = tp;
342 
343 	err = fw_set_parms(net, tp, f, tb, tca, base, flags, extack);
344 	if (err < 0)
345 		goto errout;
346 
347 	RCU_INIT_POINTER(f->next, head->ht[fw_hash(handle)]);
348 	rcu_assign_pointer(head->ht[fw_hash(handle)], f);
349 
350 	*arg = f;
351 	return 0;
352 
353 errout:
354 	tcf_exts_destroy(&f->exts);
355 	kfree(f);
356 	return err;
357 }
358 
359 static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg,
360 		    bool rtnl_held)
361 {
362 	struct fw_head *head = rtnl_dereference(tp->root);
363 	int h;
364 
365 	if (head == NULL)
366 		arg->stop = 1;
367 
368 	if (arg->stop)
369 		return;
370 
371 	for (h = 0; h < HTSIZE; h++) {
372 		struct fw_filter *f;
373 
374 		for (f = rtnl_dereference(head->ht[h]); f;
375 		     f = rtnl_dereference(f->next)) {
376 			if (!tc_cls_stats_dump(tp, arg, f))
377 				return;
378 		}
379 	}
380 }
381 
382 static int fw_dump(struct net *net, struct tcf_proto *tp, void *fh,
383 		   struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
384 {
385 	struct fw_head *head = rtnl_dereference(tp->root);
386 	struct fw_filter *f = fh;
387 	struct nlattr *nest;
388 
389 	if (f == NULL)
390 		return skb->len;
391 
392 	t->tcm_handle = f->id;
393 
394 	if (!f->res.classid && !tcf_exts_has_actions(&f->exts))
395 		return skb->len;
396 
397 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
398 	if (nest == NULL)
399 		goto nla_put_failure;
400 
401 	if (f->res.classid &&
402 	    nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid))
403 		goto nla_put_failure;
404 	if (f->ifindex) {
405 		struct net_device *dev;
406 		dev = __dev_get_by_index(net, f->ifindex);
407 		if (dev && nla_put_string(skb, TCA_FW_INDEV, dev->name))
408 			goto nla_put_failure;
409 	}
410 	if (head->mask != 0xFFFFFFFF &&
411 	    nla_put_u32(skb, TCA_FW_MASK, head->mask))
412 		goto nla_put_failure;
413 
414 	if (tcf_exts_dump(skb, &f->exts) < 0)
415 		goto nla_put_failure;
416 
417 	nla_nest_end(skb, nest);
418 
419 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
420 		goto nla_put_failure;
421 
422 	return skb->len;
423 
424 nla_put_failure:
425 	nla_nest_cancel(skb, nest);
426 	return -1;
427 }
428 
429 static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q,
430 			  unsigned long base)
431 {
432 	struct fw_filter *f = fh;
433 
434 	tc_cls_bind_class(classid, cl, q, &f->res, base);
435 }
436 
437 static struct tcf_proto_ops cls_fw_ops __read_mostly = {
438 	.kind		=	"fw",
439 	.classify	=	fw_classify,
440 	.init		=	fw_init,
441 	.destroy	=	fw_destroy,
442 	.get		=	fw_get,
443 	.change		=	fw_change,
444 	.delete		=	fw_delete,
445 	.walk		=	fw_walk,
446 	.dump		=	fw_dump,
447 	.bind_class	=	fw_bind_class,
448 	.owner		=	THIS_MODULE,
449 };
450 MODULE_ALIAS_NET_CLS("fw");
451 
452 static int __init init_fw(void)
453 {
454 	return register_tcf_proto_ops(&cls_fw_ops);
455 }
456 
457 static void __exit exit_fw(void)
458 {
459 	unregister_tcf_proto_ops(&cls_fw_ops);
460 }
461 
462 module_init(init_fw)
463 module_exit(exit_fw)
464 MODULE_DESCRIPTION("SKB mark based TC classifier");
465 MODULE_LICENSE("GPL");
466