xref: /linux/net/sched/cls_route.c (revision 14b42963f64b98ab61fa9723c03d71aa5ef4f862)
1 /*
2  * net/sched/cls_route.c	ROUTE4 classifier.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  */
11 
12 #include <linux/module.h>
13 #include <asm/uaccess.h>
14 #include <asm/system.h>
15 #include <linux/bitops.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
20 #include <linux/mm.h>
21 #include <linux/socket.h>
22 #include <linux/sockios.h>
23 #include <linux/in.h>
24 #include <linux/errno.h>
25 #include <linux/interrupt.h>
26 #include <linux/if_ether.h>
27 #include <linux/inet.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/notifier.h>
31 #include <net/ip.h>
32 #include <net/route.h>
33 #include <linux/skbuff.h>
34 #include <net/sock.h>
35 #include <net/act_api.h>
36 #include <net/pkt_cls.h>
37 
38 /*
39    1. For now we assume that route tags < 256.
40       It allows to use direct table lookups, instead of hash tables.
41    2. For now we assume that "from TAG" and "fromdev DEV" statements
42       are mutually  exclusive.
43    3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
44  */
45 
46 struct route4_fastmap
47 {
48 	struct route4_filter	*filter;
49 	u32			id;
50 	int			iif;
51 };
52 
53 struct route4_head
54 {
55 	struct route4_fastmap	fastmap[16];
56 	struct route4_bucket	*table[256+1];
57 };
58 
59 struct route4_bucket
60 {
61 	/* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
62 	struct route4_filter	*ht[16+16+1];
63 };
64 
65 struct route4_filter
66 {
67 	struct route4_filter	*next;
68 	u32			id;
69 	int			iif;
70 
71 	struct tcf_result	res;
72 	struct tcf_exts		exts;
73 	u32			handle;
74 	struct route4_bucket	*bkt;
75 };
76 
77 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
78 
79 static struct tcf_ext_map route_ext_map = {
80 	.police = TCA_ROUTE4_POLICE,
81 	.action = TCA_ROUTE4_ACT
82 };
83 
84 static __inline__ int route4_fastmap_hash(u32 id, int iif)
85 {
86 	return id&0xF;
87 }
88 
89 static inline
90 void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
91 {
92 	spin_lock_bh(&dev->queue_lock);
93 	memset(head->fastmap, 0, sizeof(head->fastmap));
94 	spin_unlock_bh(&dev->queue_lock);
95 }
96 
97 static void __inline__
98 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
99 		   struct route4_filter *f)
100 {
101 	int h = route4_fastmap_hash(id, iif);
102 	head->fastmap[h].id = id;
103 	head->fastmap[h].iif = iif;
104 	head->fastmap[h].filter = f;
105 }
106 
107 static __inline__ int route4_hash_to(u32 id)
108 {
109 	return id&0xFF;
110 }
111 
112 static __inline__ int route4_hash_from(u32 id)
113 {
114 	return (id>>16)&0xF;
115 }
116 
117 static __inline__ int route4_hash_iif(int iif)
118 {
119 	return 16 + ((iif>>16)&0xF);
120 }
121 
122 static __inline__ int route4_hash_wild(void)
123 {
124 	return 32;
125 }
126 
127 #define ROUTE4_APPLY_RESULT()					\
128 {								\
129 	*res = f->res;						\
130 	if (tcf_exts_is_available(&f->exts)) {			\
131 		int r = tcf_exts_exec(skb, &f->exts, res);	\
132 		if (r < 0) {					\
133 			dont_cache = 1;				\
134 			continue;				\
135 		}						\
136 		return r;					\
137 	} else if (!dont_cache)					\
138 		route4_set_fastmap(head, id, iif, f);		\
139 	return 0;						\
140 }
141 
142 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
143 			   struct tcf_result *res)
144 {
145 	struct route4_head *head = (struct route4_head*)tp->root;
146 	struct dst_entry *dst;
147 	struct route4_bucket *b;
148 	struct route4_filter *f;
149 	u32 id, h;
150 	int iif, dont_cache = 0;
151 
152 	if ((dst = skb->dst) == NULL)
153 		goto failure;
154 
155 	id = dst->tclassid;
156 	if (head == NULL)
157 		goto old_method;
158 
159 	iif = ((struct rtable*)dst)->fl.iif;
160 
161 	h = route4_fastmap_hash(id, iif);
162 	if (id == head->fastmap[h].id &&
163 	    iif == head->fastmap[h].iif &&
164 	    (f = head->fastmap[h].filter) != NULL) {
165 		if (f == ROUTE4_FAILURE)
166 			goto failure;
167 
168 		*res = f->res;
169 		return 0;
170 	}
171 
172 	h = route4_hash_to(id);
173 
174 restart:
175 	if ((b = head->table[h]) != NULL) {
176 		for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
177 			if (f->id == id)
178 				ROUTE4_APPLY_RESULT();
179 
180 		for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
181 			if (f->iif == iif)
182 				ROUTE4_APPLY_RESULT();
183 
184 		for (f = b->ht[route4_hash_wild()]; f; f = f->next)
185 			ROUTE4_APPLY_RESULT();
186 
187 	}
188 	if (h < 256) {
189 		h = 256;
190 		id &= ~0xFFFF;
191 		goto restart;
192 	}
193 
194 	if (!dont_cache)
195 		route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
196 failure:
197 	return -1;
198 
199 old_method:
200 	if (id && (TC_H_MAJ(id) == 0 ||
201 		   !(TC_H_MAJ(id^tp->q->handle)))) {
202 		res->classid = id;
203 		res->class = 0;
204 		return 0;
205 	}
206 	return -1;
207 }
208 
209 static inline u32 to_hash(u32 id)
210 {
211 	u32 h = id&0xFF;
212 	if (id&0x8000)
213 		h += 256;
214 	return h;
215 }
216 
217 static inline u32 from_hash(u32 id)
218 {
219 	id &= 0xFFFF;
220 	if (id == 0xFFFF)
221 		return 32;
222 	if (!(id & 0x8000)) {
223 		if (id > 255)
224 			return 256;
225 		return id&0xF;
226 	}
227 	return 16 + (id&0xF);
228 }
229 
230 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
231 {
232 	struct route4_head *head = (struct route4_head*)tp->root;
233 	struct route4_bucket *b;
234 	struct route4_filter *f;
235 	unsigned h1, h2;
236 
237 	if (!head)
238 		return 0;
239 
240 	h1 = to_hash(handle);
241 	if (h1 > 256)
242 		return 0;
243 
244 	h2 = from_hash(handle>>16);
245 	if (h2 > 32)
246 		return 0;
247 
248 	if ((b = head->table[h1]) != NULL) {
249 		for (f = b->ht[h2]; f; f = f->next)
250 			if (f->handle == handle)
251 				return (unsigned long)f;
252 	}
253 	return 0;
254 }
255 
256 static void route4_put(struct tcf_proto *tp, unsigned long f)
257 {
258 }
259 
260 static int route4_init(struct tcf_proto *tp)
261 {
262 	return 0;
263 }
264 
265 static inline void
266 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
267 {
268 	tcf_unbind_filter(tp, &f->res);
269 	tcf_exts_destroy(tp, &f->exts);
270 	kfree(f);
271 }
272 
273 static void route4_destroy(struct tcf_proto *tp)
274 {
275 	struct route4_head *head = xchg(&tp->root, NULL);
276 	int h1, h2;
277 
278 	if (head == NULL)
279 		return;
280 
281 	for (h1=0; h1<=256; h1++) {
282 		struct route4_bucket *b;
283 
284 		if ((b = head->table[h1]) != NULL) {
285 			for (h2=0; h2<=32; h2++) {
286 				struct route4_filter *f;
287 
288 				while ((f = b->ht[h2]) != NULL) {
289 					b->ht[h2] = f->next;
290 					route4_delete_filter(tp, f);
291 				}
292 			}
293 			kfree(b);
294 		}
295 	}
296 	kfree(head);
297 }
298 
299 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
300 {
301 	struct route4_head *head = (struct route4_head*)tp->root;
302 	struct route4_filter **fp, *f = (struct route4_filter*)arg;
303 	unsigned h = 0;
304 	struct route4_bucket *b;
305 	int i;
306 
307 	if (!head || !f)
308 		return -EINVAL;
309 
310 	h = f->handle;
311 	b = f->bkt;
312 
313 	for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
314 		if (*fp == f) {
315 			tcf_tree_lock(tp);
316 			*fp = f->next;
317 			tcf_tree_unlock(tp);
318 
319 			route4_reset_fastmap(tp->q->dev, head, f->id);
320 			route4_delete_filter(tp, f);
321 
322 			/* Strip tree */
323 
324 			for (i=0; i<=32; i++)
325 				if (b->ht[i])
326 					return 0;
327 
328 			/* OK, session has no flows */
329 			tcf_tree_lock(tp);
330 			head->table[to_hash(h)] = NULL;
331 			tcf_tree_unlock(tp);
332 
333 			kfree(b);
334 			return 0;
335 		}
336 	}
337 	return 0;
338 }
339 
340 static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
341 	struct route4_filter *f, u32 handle, struct route4_head *head,
342 	struct rtattr **tb, struct rtattr *est, int new)
343 {
344 	int err;
345 	u32 id = 0, to = 0, nhandle = 0x8000;
346 	struct route4_filter *fp;
347 	unsigned int h1;
348 	struct route4_bucket *b;
349 	struct tcf_exts e;
350 
351 	err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
352 	if (err < 0)
353 		return err;
354 
355 	err = -EINVAL;
356 	if (tb[TCA_ROUTE4_CLASSID-1])
357 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < sizeof(u32))
358 			goto errout;
359 
360 	if (tb[TCA_ROUTE4_TO-1]) {
361 		if (new && handle & 0x8000)
362 			goto errout;
363 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < sizeof(u32))
364 			goto errout;
365 		to = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
366 		if (to > 0xFF)
367 			goto errout;
368 		nhandle = to;
369 	}
370 
371 	if (tb[TCA_ROUTE4_FROM-1]) {
372 		if (tb[TCA_ROUTE4_IIF-1])
373 			goto errout;
374 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < sizeof(u32))
375 			goto errout;
376 		id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]);
377 		if (id > 0xFF)
378 			goto errout;
379 		nhandle |= id << 16;
380 	} else if (tb[TCA_ROUTE4_IIF-1]) {
381 		if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < sizeof(u32))
382 			goto errout;
383 		id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
384 		if (id > 0x7FFF)
385 			goto errout;
386 		nhandle |= (id | 0x8000) << 16;
387 	} else
388 		nhandle |= 0xFFFF << 16;
389 
390 	if (handle && new) {
391 		nhandle |= handle & 0x7F00;
392 		if (nhandle != handle)
393 			goto errout;
394 	}
395 
396 	h1 = to_hash(nhandle);
397 	if ((b = head->table[h1]) == NULL) {
398 		err = -ENOBUFS;
399 		b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL);
400 		if (b == NULL)
401 			goto errout;
402 		memset(b, 0, sizeof(*b));
403 
404 		tcf_tree_lock(tp);
405 		head->table[h1] = b;
406 		tcf_tree_unlock(tp);
407 	} else {
408 		unsigned int h2 = from_hash(nhandle >> 16);
409 		err = -EEXIST;
410 		for (fp = b->ht[h2]; fp; fp = fp->next)
411 			if (fp->handle == f->handle)
412 				goto errout;
413 	}
414 
415 	tcf_tree_lock(tp);
416 	if (tb[TCA_ROUTE4_TO-1])
417 		f->id = to;
418 
419 	if (tb[TCA_ROUTE4_FROM-1])
420 		f->id = to | id<<16;
421 	else if (tb[TCA_ROUTE4_IIF-1])
422 		f->iif = id;
423 
424 	f->handle = nhandle;
425 	f->bkt = b;
426 	tcf_tree_unlock(tp);
427 
428 	if (tb[TCA_ROUTE4_CLASSID-1]) {
429 		f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
430 		tcf_bind_filter(tp, &f->res, base);
431 	}
432 
433 	tcf_exts_change(tp, &f->exts, &e);
434 
435 	return 0;
436 errout:
437 	tcf_exts_destroy(tp, &e);
438 	return err;
439 }
440 
441 static int route4_change(struct tcf_proto *tp, unsigned long base,
442 		       u32 handle,
443 		       struct rtattr **tca,
444 		       unsigned long *arg)
445 {
446 	struct route4_head *head = tp->root;
447 	struct route4_filter *f, *f1, **fp;
448 	struct route4_bucket *b;
449 	struct rtattr *opt = tca[TCA_OPTIONS-1];
450 	struct rtattr *tb[TCA_ROUTE4_MAX];
451 	unsigned int h, th;
452 	u32 old_handle = 0;
453 	int err;
454 
455 	if (opt == NULL)
456 		return handle ? -EINVAL : 0;
457 
458 	if (rtattr_parse_nested(tb, TCA_ROUTE4_MAX, opt) < 0)
459 		return -EINVAL;
460 
461 	if ((f = (struct route4_filter*)*arg) != NULL) {
462 		if (f->handle != handle && handle)
463 			return -EINVAL;
464 
465 		if (f->bkt)
466 			old_handle = f->handle;
467 
468 		err = route4_set_parms(tp, base, f, handle, head, tb,
469 			tca[TCA_RATE-1], 0);
470 		if (err < 0)
471 			return err;
472 
473 		goto reinsert;
474 	}
475 
476 	err = -ENOBUFS;
477 	if (head == NULL) {
478 		head = kmalloc(sizeof(struct route4_head), GFP_KERNEL);
479 		if (head == NULL)
480 			goto errout;
481 		memset(head, 0, sizeof(struct route4_head));
482 
483 		tcf_tree_lock(tp);
484 		tp->root = head;
485 		tcf_tree_unlock(tp);
486 	}
487 
488 	f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL);
489 	if (f == NULL)
490 		goto errout;
491 	memset(f, 0, sizeof(*f));
492 
493 	err = route4_set_parms(tp, base, f, handle, head, tb,
494 		tca[TCA_RATE-1], 1);
495 	if (err < 0)
496 		goto errout;
497 
498 reinsert:
499 	h = from_hash(f->handle >> 16);
500 	for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
501 		if (f->handle < f1->handle)
502 			break;
503 
504 	f->next = f1;
505 	tcf_tree_lock(tp);
506 	*fp = f;
507 
508 	if (old_handle && f->handle != old_handle) {
509 		th = to_hash(old_handle);
510 		h = from_hash(old_handle >> 16);
511 		if ((b = head->table[th]) != NULL) {
512 			for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
513 				if (*fp == f) {
514 					*fp = f->next;
515 					break;
516 				}
517 			}
518 		}
519 	}
520 	tcf_tree_unlock(tp);
521 
522 	route4_reset_fastmap(tp->q->dev, head, f->id);
523 	*arg = (unsigned long)f;
524 	return 0;
525 
526 errout:
527 	kfree(f);
528 	return err;
529 }
530 
531 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
532 {
533 	struct route4_head *head = tp->root;
534 	unsigned h, h1;
535 
536 	if (head == NULL)
537 		arg->stop = 1;
538 
539 	if (arg->stop)
540 		return;
541 
542 	for (h = 0; h <= 256; h++) {
543 		struct route4_bucket *b = head->table[h];
544 
545 		if (b) {
546 			for (h1 = 0; h1 <= 32; h1++) {
547 				struct route4_filter *f;
548 
549 				for (f = b->ht[h1]; f; f = f->next) {
550 					if (arg->count < arg->skip) {
551 						arg->count++;
552 						continue;
553 					}
554 					if (arg->fn(tp, (unsigned long)f, arg) < 0) {
555 						arg->stop = 1;
556 						return;
557 					}
558 					arg->count++;
559 				}
560 			}
561 		}
562 	}
563 }
564 
565 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
566 		       struct sk_buff *skb, struct tcmsg *t)
567 {
568 	struct route4_filter *f = (struct route4_filter*)fh;
569 	unsigned char	 *b = skb->tail;
570 	struct rtattr *rta;
571 	u32 id;
572 
573 	if (f == NULL)
574 		return skb->len;
575 
576 	t->tcm_handle = f->handle;
577 
578 	rta = (struct rtattr*)b;
579 	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
580 
581 	if (!(f->handle&0x8000)) {
582 		id = f->id&0xFF;
583 		RTA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
584 	}
585 	if (f->handle&0x80000000) {
586 		if ((f->handle>>16) != 0xFFFF)
587 			RTA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
588 	} else {
589 		id = f->id>>16;
590 		RTA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
591 	}
592 	if (f->res.classid)
593 		RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
594 
595 	if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
596 		goto rtattr_failure;
597 
598 	rta->rta_len = skb->tail - b;
599 
600 	if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
601 		goto rtattr_failure;
602 
603 	return skb->len;
604 
605 rtattr_failure:
606 	skb_trim(skb, b - skb->data);
607 	return -1;
608 }
609 
610 static struct tcf_proto_ops cls_route4_ops = {
611 	.next		=	NULL,
612 	.kind		=	"route",
613 	.classify	=	route4_classify,
614 	.init		=	route4_init,
615 	.destroy	=	route4_destroy,
616 	.get		=	route4_get,
617 	.put		=	route4_put,
618 	.change		=	route4_change,
619 	.delete		=	route4_delete,
620 	.walk		=	route4_walk,
621 	.dump		=	route4_dump,
622 	.owner		=	THIS_MODULE,
623 };
624 
625 static int __init init_route4(void)
626 {
627 	return register_tcf_proto_ops(&cls_route4_ops);
628 }
629 
630 static void __exit exit_route4(void)
631 {
632 	unregister_tcf_proto_ops(&cls_route4_ops);
633 }
634 
635 module_init(init_route4)
636 module_exit(exit_route4)
637 MODULE_LICENSE("GPL");
638