xref: /linux/net/sched/cls_u32.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * net/sched/cls_u32.c	Ugly (or Universal) 32bit key Packet Classifier.
3  *
4  *		This program is free software; you can redistribute it and/or
5  *		modify it under the terms of the GNU General Public License
6  *		as published by the Free Software Foundation; either version
7  *		2 of the License, or (at your option) any later version.
8  *
9  * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  *
11  *	The filters are packed to hash tables of key nodes
12  *	with a set of 32bit key/mask pairs at every node.
13  *	Nodes reference next level hash tables etc.
14  *
15  *	This scheme is the best universal classifier I managed to
16  *	invent; it is not super-fast, but it is not slow (provided you
17  *	program it correctly), and general enough.  And its relative
18  *	speed grows as the number of rules becomes larger.
19  *
20  *	It seems that it represents the best middle point between
21  *	speed and manageability both by human and by machine.
22  *
23  *	It is especially useful for link sharing combined with QoS;
24  *	pure RSVP doesn't need such a general approach and can use
25  *	much simpler (and faster) schemes, sort of cls_rsvp.c.
26  *
27  *	JHS: We should remove the CONFIG_NET_CLS_IND from here
28  *	eventually when the meta match extension is made available
29  *
30  *	nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31  */
32 
33 #include <asm/uaccess.h>
34 #include <asm/system.h>
35 #include <linux/bitops.h>
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <linux/string.h>
40 #include <linux/mm.h>
41 #include <linux/socket.h>
42 #include <linux/sockios.h>
43 #include <linux/in.h>
44 #include <linux/errno.h>
45 #include <linux/interrupt.h>
46 #include <linux/if_ether.h>
47 #include <linux/inet.h>
48 #include <linux/netdevice.h>
49 #include <linux/etherdevice.h>
50 #include <linux/notifier.h>
51 #include <linux/rtnetlink.h>
52 #include <net/ip.h>
53 #include <net/route.h>
54 #include <linux/skbuff.h>
55 #include <net/sock.h>
56 #include <net/act_api.h>
57 #include <net/pkt_cls.h>
58 
59 struct tc_u_knode
60 {
61 	struct tc_u_knode	*next;
62 	u32			handle;
63 	struct tc_u_hnode	*ht_up;
64 	struct tcf_exts		exts;
65 #ifdef CONFIG_NET_CLS_IND
66 	char                     indev[IFNAMSIZ];
67 #endif
68 	u8			fshift;
69 	struct tcf_result	res;
70 	struct tc_u_hnode	*ht_down;
71 #ifdef CONFIG_CLS_U32_PERF
72 	struct tc_u32_pcnt	*pf;
73 #endif
74 #ifdef CONFIG_CLS_U32_MARK
75 	struct tc_u32_mark	mark;
76 #endif
77 	struct tc_u32_sel	sel;
78 };
79 
80 struct tc_u_hnode
81 {
82 	struct tc_u_hnode	*next;
83 	u32			handle;
84 	u32			prio;
85 	struct tc_u_common	*tp_c;
86 	int			refcnt;
87 	unsigned		divisor;
88 	struct tc_u_knode	*ht[1];
89 };
90 
91 struct tc_u_common
92 {
93 	struct tc_u_common	*next;
94 	struct tc_u_hnode	*hlist;
95 	struct Qdisc		*q;
96 	int			refcnt;
97 	u32			hgenerator;
98 };
99 
100 static struct tcf_ext_map u32_ext_map = {
101 	.action = TCA_U32_ACT,
102 	.police = TCA_U32_POLICE
103 };
104 
105 static struct tc_u_common *u32_list;
106 
107 static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift)
108 {
109 	unsigned h = (key & sel->hmask)>>fshift;
110 
111 	return h;
112 }
113 
114 static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res)
115 {
116 	struct {
117 		struct tc_u_knode *knode;
118 		u8		  *ptr;
119 	} stack[TC_U32_MAXDEPTH];
120 
121 	struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
122 	u8 *ptr = skb->nh.raw;
123 	struct tc_u_knode *n;
124 	int sdepth = 0;
125 	int off2 = 0;
126 	int sel = 0;
127 #ifdef CONFIG_CLS_U32_PERF
128 	int j;
129 #endif
130 	int i, r;
131 
132 next_ht:
133 	n = ht->ht[sel];
134 
135 next_knode:
136 	if (n) {
137 		struct tc_u32_key *key = n->sel.keys;
138 
139 #ifdef CONFIG_CLS_U32_PERF
140 		n->pf->rcnt +=1;
141 		j = 0;
142 #endif
143 
144 #ifdef CONFIG_CLS_U32_MARK
145 		if ((skb->mark & n->mark.mask) != n->mark.val) {
146 			n = n->next;
147 			goto next_knode;
148 		} else {
149 			n->mark.success++;
150 		}
151 #endif
152 
153 		for (i = n->sel.nkeys; i>0; i--, key++) {
154 
155 			if ((*(u32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) {
156 				n = n->next;
157 				goto next_knode;
158 			}
159 #ifdef CONFIG_CLS_U32_PERF
160 			n->pf->kcnts[j] +=1;
161 			j++;
162 #endif
163 		}
164 		if (n->ht_down == NULL) {
165 check_terminal:
166 			if (n->sel.flags&TC_U32_TERMINAL) {
167 
168 				*res = n->res;
169 #ifdef CONFIG_NET_CLS_IND
170 				if (!tcf_match_indev(skb, n->indev)) {
171 					n = n->next;
172 					goto next_knode;
173 				}
174 #endif
175 #ifdef CONFIG_CLS_U32_PERF
176 				n->pf->rhit +=1;
177 #endif
178 				r = tcf_exts_exec(skb, &n->exts, res);
179 				if (r < 0) {
180 					n = n->next;
181 					goto next_knode;
182 				}
183 
184 				return r;
185 			}
186 			n = n->next;
187 			goto next_knode;
188 		}
189 
190 		/* PUSH */
191 		if (sdepth >= TC_U32_MAXDEPTH)
192 			goto deadloop;
193 		stack[sdepth].knode = n;
194 		stack[sdepth].ptr = ptr;
195 		sdepth++;
196 
197 		ht = n->ht_down;
198 		sel = 0;
199 		if (ht->divisor)
200 			sel = ht->divisor&u32_hash_fold(*(u32*)(ptr+n->sel.hoff), &n->sel,n->fshift);
201 
202 		if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
203 			goto next_ht;
204 
205 		if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
206 			off2 = n->sel.off + 3;
207 			if (n->sel.flags&TC_U32_VAROFFSET)
208 				off2 += ntohs(n->sel.offmask & *(u16*)(ptr+n->sel.offoff)) >>n->sel.offshift;
209 			off2 &= ~3;
210 		}
211 		if (n->sel.flags&TC_U32_EAT) {
212 			ptr += off2;
213 			off2 = 0;
214 		}
215 
216 		if (ptr < skb->tail)
217 			goto next_ht;
218 	}
219 
220 	/* POP */
221 	if (sdepth--) {
222 		n = stack[sdepth].knode;
223 		ht = n->ht_up;
224 		ptr = stack[sdepth].ptr;
225 		goto check_terminal;
226 	}
227 	return -1;
228 
229 deadloop:
230 	if (net_ratelimit())
231 		printk("cls_u32: dead loop\n");
232 	return -1;
233 }
234 
235 static __inline__ struct tc_u_hnode *
236 u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
237 {
238 	struct tc_u_hnode *ht;
239 
240 	for (ht = tp_c->hlist; ht; ht = ht->next)
241 		if (ht->handle == handle)
242 			break;
243 
244 	return ht;
245 }
246 
247 static __inline__ struct tc_u_knode *
248 u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
249 {
250 	unsigned sel;
251 	struct tc_u_knode *n = NULL;
252 
253 	sel = TC_U32_HASH(handle);
254 	if (sel > ht->divisor)
255 		goto out;
256 
257 	for (n = ht->ht[sel]; n; n = n->next)
258 		if (n->handle == handle)
259 			break;
260 out:
261 	return n;
262 }
263 
264 
265 static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
266 {
267 	struct tc_u_hnode *ht;
268 	struct tc_u_common *tp_c = tp->data;
269 
270 	if (TC_U32_HTID(handle) == TC_U32_ROOT)
271 		ht = tp->root;
272 	else
273 		ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
274 
275 	if (!ht)
276 		return 0;
277 
278 	if (TC_U32_KEY(handle) == 0)
279 		return (unsigned long)ht;
280 
281 	return (unsigned long)u32_lookup_key(ht, handle);
282 }
283 
284 static void u32_put(struct tcf_proto *tp, unsigned long f)
285 {
286 }
287 
288 static u32 gen_new_htid(struct tc_u_common *tp_c)
289 {
290 	int i = 0x800;
291 
292 	do {
293 		if (++tp_c->hgenerator == 0x7FF)
294 			tp_c->hgenerator = 1;
295 	} while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
296 
297 	return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
298 }
299 
300 static int u32_init(struct tcf_proto *tp)
301 {
302 	struct tc_u_hnode *root_ht;
303 	struct tc_u_common *tp_c;
304 
305 	for (tp_c = u32_list; tp_c; tp_c = tp_c->next)
306 		if (tp_c->q == tp->q)
307 			break;
308 
309 	root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
310 	if (root_ht == NULL)
311 		return -ENOBUFS;
312 
313 	root_ht->divisor = 0;
314 	root_ht->refcnt++;
315 	root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
316 	root_ht->prio = tp->prio;
317 
318 	if (tp_c == NULL) {
319 		tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
320 		if (tp_c == NULL) {
321 			kfree(root_ht);
322 			return -ENOBUFS;
323 		}
324 		tp_c->q = tp->q;
325 		tp_c->next = u32_list;
326 		u32_list = tp_c;
327 	}
328 
329 	tp_c->refcnt++;
330 	root_ht->next = tp_c->hlist;
331 	tp_c->hlist = root_ht;
332 	root_ht->tp_c = tp_c;
333 
334 	tp->root = root_ht;
335 	tp->data = tp_c;
336 	return 0;
337 }
338 
339 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
340 {
341 	tcf_unbind_filter(tp, &n->res);
342 	tcf_exts_destroy(tp, &n->exts);
343 	if (n->ht_down)
344 		n->ht_down->refcnt--;
345 #ifdef CONFIG_CLS_U32_PERF
346 	kfree(n->pf);
347 #endif
348 	kfree(n);
349 	return 0;
350 }
351 
352 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
353 {
354 	struct tc_u_knode **kp;
355 	struct tc_u_hnode *ht = key->ht_up;
356 
357 	if (ht) {
358 		for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
359 			if (*kp == key) {
360 				tcf_tree_lock(tp);
361 				*kp = key->next;
362 				tcf_tree_unlock(tp);
363 
364 				u32_destroy_key(tp, key);
365 				return 0;
366 			}
367 		}
368 	}
369 	BUG_TRAP(0);
370 	return 0;
371 }
372 
373 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
374 {
375 	struct tc_u_knode *n;
376 	unsigned h;
377 
378 	for (h=0; h<=ht->divisor; h++) {
379 		while ((n = ht->ht[h]) != NULL) {
380 			ht->ht[h] = n->next;
381 
382 			u32_destroy_key(tp, n);
383 		}
384 	}
385 }
386 
387 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
388 {
389 	struct tc_u_common *tp_c = tp->data;
390 	struct tc_u_hnode **hn;
391 
392 	BUG_TRAP(!ht->refcnt);
393 
394 	u32_clear_hnode(tp, ht);
395 
396 	for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
397 		if (*hn == ht) {
398 			*hn = ht->next;
399 			kfree(ht);
400 			return 0;
401 		}
402 	}
403 
404 	BUG_TRAP(0);
405 	return -ENOENT;
406 }
407 
408 static void u32_destroy(struct tcf_proto *tp)
409 {
410 	struct tc_u_common *tp_c = tp->data;
411 	struct tc_u_hnode *root_ht = xchg(&tp->root, NULL);
412 
413 	BUG_TRAP(root_ht != NULL);
414 
415 	if (root_ht && --root_ht->refcnt == 0)
416 		u32_destroy_hnode(tp, root_ht);
417 
418 	if (--tp_c->refcnt == 0) {
419 		struct tc_u_hnode *ht;
420 		struct tc_u_common **tp_cp;
421 
422 		for (tp_cp = &u32_list; *tp_cp; tp_cp = &(*tp_cp)->next) {
423 			if (*tp_cp == tp_c) {
424 				*tp_cp = tp_c->next;
425 				break;
426 			}
427 		}
428 
429 		for (ht=tp_c->hlist; ht; ht = ht->next)
430 			u32_clear_hnode(tp, ht);
431 
432 		while ((ht = tp_c->hlist) != NULL) {
433 			tp_c->hlist = ht->next;
434 
435 			BUG_TRAP(ht->refcnt == 0);
436 
437 			kfree(ht);
438 		};
439 
440 		kfree(tp_c);
441 	}
442 
443 	tp->data = NULL;
444 }
445 
446 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
447 {
448 	struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
449 
450 	if (ht == NULL)
451 		return 0;
452 
453 	if (TC_U32_KEY(ht->handle))
454 		return u32_delete_key(tp, (struct tc_u_knode*)ht);
455 
456 	if (tp->root == ht)
457 		return -EINVAL;
458 
459 	if (--ht->refcnt == 0)
460 		u32_destroy_hnode(tp, ht);
461 
462 	return 0;
463 }
464 
465 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
466 {
467 	struct tc_u_knode *n;
468 	unsigned i = 0x7FF;
469 
470 	for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
471 		if (i < TC_U32_NODE(n->handle))
472 			i = TC_U32_NODE(n->handle);
473 	i++;
474 
475 	return handle|(i>0xFFF ? 0xFFF : i);
476 }
477 
478 static int u32_set_parms(struct tcf_proto *tp, unsigned long base,
479 			 struct tc_u_hnode *ht,
480 			 struct tc_u_knode *n, struct rtattr **tb,
481 			 struct rtattr *est)
482 {
483 	int err;
484 	struct tcf_exts e;
485 
486 	err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map);
487 	if (err < 0)
488 		return err;
489 
490 	err = -EINVAL;
491 	if (tb[TCA_U32_LINK-1]) {
492 		u32 handle = *(u32*)RTA_DATA(tb[TCA_U32_LINK-1]);
493 		struct tc_u_hnode *ht_down = NULL;
494 
495 		if (TC_U32_KEY(handle))
496 			goto errout;
497 
498 		if (handle) {
499 			ht_down = u32_lookup_ht(ht->tp_c, handle);
500 
501 			if (ht_down == NULL)
502 				goto errout;
503 			ht_down->refcnt++;
504 		}
505 
506 		tcf_tree_lock(tp);
507 		ht_down = xchg(&n->ht_down, ht_down);
508 		tcf_tree_unlock(tp);
509 
510 		if (ht_down)
511 			ht_down->refcnt--;
512 	}
513 	if (tb[TCA_U32_CLASSID-1]) {
514 		n->res.classid = *(u32*)RTA_DATA(tb[TCA_U32_CLASSID-1]);
515 		tcf_bind_filter(tp, &n->res, base);
516 	}
517 
518 #ifdef CONFIG_NET_CLS_IND
519 	if (tb[TCA_U32_INDEV-1]) {
520 		int err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]);
521 		if (err < 0)
522 			goto errout;
523 	}
524 #endif
525 	tcf_exts_change(tp, &n->exts, &e);
526 
527 	return 0;
528 errout:
529 	tcf_exts_destroy(tp, &e);
530 	return err;
531 }
532 
533 static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
534 		      struct rtattr **tca,
535 		      unsigned long *arg)
536 {
537 	struct tc_u_common *tp_c = tp->data;
538 	struct tc_u_hnode *ht;
539 	struct tc_u_knode *n;
540 	struct tc_u32_sel *s;
541 	struct rtattr *opt = tca[TCA_OPTIONS-1];
542 	struct rtattr *tb[TCA_U32_MAX];
543 	u32 htid;
544 	int err;
545 
546 	if (opt == NULL)
547 		return handle ? -EINVAL : 0;
548 
549 	if (rtattr_parse_nested(tb, TCA_U32_MAX, opt) < 0)
550 		return -EINVAL;
551 
552 	if ((n = (struct tc_u_knode*)*arg) != NULL) {
553 		if (TC_U32_KEY(n->handle) == 0)
554 			return -EINVAL;
555 
556 		return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE-1]);
557 	}
558 
559 	if (tb[TCA_U32_DIVISOR-1]) {
560 		unsigned divisor = *(unsigned*)RTA_DATA(tb[TCA_U32_DIVISOR-1]);
561 
562 		if (--divisor > 0x100)
563 			return -EINVAL;
564 		if (TC_U32_KEY(handle))
565 			return -EINVAL;
566 		if (handle == 0) {
567 			handle = gen_new_htid(tp->data);
568 			if (handle == 0)
569 				return -ENOMEM;
570 		}
571 		ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
572 		if (ht == NULL)
573 			return -ENOBUFS;
574 		ht->tp_c = tp_c;
575 		ht->refcnt = 0;
576 		ht->divisor = divisor;
577 		ht->handle = handle;
578 		ht->prio = tp->prio;
579 		ht->next = tp_c->hlist;
580 		tp_c->hlist = ht;
581 		*arg = (unsigned long)ht;
582 		return 0;
583 	}
584 
585 	if (tb[TCA_U32_HASH-1]) {
586 		htid = *(unsigned*)RTA_DATA(tb[TCA_U32_HASH-1]);
587 		if (TC_U32_HTID(htid) == TC_U32_ROOT) {
588 			ht = tp->root;
589 			htid = ht->handle;
590 		} else {
591 			ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
592 			if (ht == NULL)
593 				return -EINVAL;
594 		}
595 	} else {
596 		ht = tp->root;
597 		htid = ht->handle;
598 	}
599 
600 	if (ht->divisor < TC_U32_HASH(htid))
601 		return -EINVAL;
602 
603 	if (handle) {
604 		if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
605 			return -EINVAL;
606 		handle = htid | TC_U32_NODE(handle);
607 	} else
608 		handle = gen_new_kid(ht, htid);
609 
610 	if (tb[TCA_U32_SEL-1] == 0 ||
611 	    RTA_PAYLOAD(tb[TCA_U32_SEL-1]) < sizeof(struct tc_u32_sel))
612 		return -EINVAL;
613 
614 	s = RTA_DATA(tb[TCA_U32_SEL-1]);
615 
616 	n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
617 	if (n == NULL)
618 		return -ENOBUFS;
619 
620 #ifdef CONFIG_CLS_U32_PERF
621 	n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
622 	if (n->pf == NULL) {
623 		kfree(n);
624 		return -ENOBUFS;
625 	}
626 #endif
627 
628 	memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
629 	n->ht_up = ht;
630 	n->handle = handle;
631 {
632 	u8 i = 0;
633 	u32 mask = s->hmask;
634 	if (mask) {
635 		while (!(mask & 1)) {
636 			i++;
637 			mask>>=1;
638 		}
639 	}
640 	n->fshift = i;
641 }
642 
643 #ifdef CONFIG_CLS_U32_MARK
644 	if (tb[TCA_U32_MARK-1]) {
645 		struct tc_u32_mark *mark;
646 
647 		if (RTA_PAYLOAD(tb[TCA_U32_MARK-1]) < sizeof(struct tc_u32_mark)) {
648 #ifdef CONFIG_CLS_U32_PERF
649 			kfree(n->pf);
650 #endif
651 			kfree(n);
652 			return -EINVAL;
653 		}
654 		mark = RTA_DATA(tb[TCA_U32_MARK-1]);
655 		memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
656 		n->mark.success = 0;
657 	}
658 #endif
659 
660 	err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE-1]);
661 	if (err == 0) {
662 		struct tc_u_knode **ins;
663 		for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
664 			if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
665 				break;
666 
667 		n->next = *ins;
668 		wmb();
669 		*ins = n;
670 
671 		*arg = (unsigned long)n;
672 		return 0;
673 	}
674 #ifdef CONFIG_CLS_U32_PERF
675 	kfree(n->pf);
676 #endif
677 	kfree(n);
678 	return err;
679 }
680 
681 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
682 {
683 	struct tc_u_common *tp_c = tp->data;
684 	struct tc_u_hnode *ht;
685 	struct tc_u_knode *n;
686 	unsigned h;
687 
688 	if (arg->stop)
689 		return;
690 
691 	for (ht = tp_c->hlist; ht; ht = ht->next) {
692 		if (ht->prio != tp->prio)
693 			continue;
694 		if (arg->count >= arg->skip) {
695 			if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
696 				arg->stop = 1;
697 				return;
698 			}
699 		}
700 		arg->count++;
701 		for (h = 0; h <= ht->divisor; h++) {
702 			for (n = ht->ht[h]; n; n = n->next) {
703 				if (arg->count < arg->skip) {
704 					arg->count++;
705 					continue;
706 				}
707 				if (arg->fn(tp, (unsigned long)n, arg) < 0) {
708 					arg->stop = 1;
709 					return;
710 				}
711 				arg->count++;
712 			}
713 		}
714 	}
715 }
716 
717 static int u32_dump(struct tcf_proto *tp, unsigned long fh,
718 		     struct sk_buff *skb, struct tcmsg *t)
719 {
720 	struct tc_u_knode *n = (struct tc_u_knode*)fh;
721 	unsigned char	 *b = skb->tail;
722 	struct rtattr *rta;
723 
724 	if (n == NULL)
725 		return skb->len;
726 
727 	t->tcm_handle = n->handle;
728 
729 	rta = (struct rtattr*)b;
730 	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
731 
732 	if (TC_U32_KEY(n->handle) == 0) {
733 		struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
734 		u32 divisor = ht->divisor+1;
735 		RTA_PUT(skb, TCA_U32_DIVISOR, 4, &divisor);
736 	} else {
737 		RTA_PUT(skb, TCA_U32_SEL,
738 			sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
739 			&n->sel);
740 		if (n->ht_up) {
741 			u32 htid = n->handle & 0xFFFFF000;
742 			RTA_PUT(skb, TCA_U32_HASH, 4, &htid);
743 		}
744 		if (n->res.classid)
745 			RTA_PUT(skb, TCA_U32_CLASSID, 4, &n->res.classid);
746 		if (n->ht_down)
747 			RTA_PUT(skb, TCA_U32_LINK, 4, &n->ht_down->handle);
748 
749 #ifdef CONFIG_CLS_U32_MARK
750 		if (n->mark.val || n->mark.mask)
751 			RTA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark);
752 #endif
753 
754 		if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
755 			goto rtattr_failure;
756 
757 #ifdef CONFIG_NET_CLS_IND
758 		if(strlen(n->indev))
759 			RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev);
760 #endif
761 #ifdef CONFIG_CLS_U32_PERF
762 		RTA_PUT(skb, TCA_U32_PCNT,
763 		sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
764 			n->pf);
765 #endif
766 	}
767 
768 	rta->rta_len = skb->tail - b;
769 	if (TC_U32_KEY(n->handle))
770 		if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0)
771 			goto rtattr_failure;
772 	return skb->len;
773 
774 rtattr_failure:
775 	skb_trim(skb, b - skb->data);
776 	return -1;
777 }
778 
779 static struct tcf_proto_ops cls_u32_ops = {
780 	.next		=	NULL,
781 	.kind		=	"u32",
782 	.classify	=	u32_classify,
783 	.init		=	u32_init,
784 	.destroy	=	u32_destroy,
785 	.get		=	u32_get,
786 	.put		=	u32_put,
787 	.change		=	u32_change,
788 	.delete		=	u32_delete,
789 	.walk		=	u32_walk,
790 	.dump		=	u32_dump,
791 	.owner		=	THIS_MODULE,
792 };
793 
794 static int __init init_u32(void)
795 {
796 	printk("u32 classifier\n");
797 #ifdef CONFIG_CLS_U32_PERF
798 	printk("    Performance counters on\n");
799 #endif
800 #ifdef CONFIG_NET_CLS_POLICE
801 	printk("    OLD policer on \n");
802 #endif
803 #ifdef CONFIG_NET_CLS_IND
804 	printk("    input device check on \n");
805 #endif
806 #ifdef CONFIG_NET_CLS_ACT
807 	printk("    Actions configured \n");
808 #endif
809 	return register_tcf_proto_ops(&cls_u32_ops);
810 }
811 
812 static void __exit exit_u32(void)
813 {
814 	unregister_tcf_proto_ops(&cls_u32_ops);
815 }
816 
817 module_init(init_u32)
818 module_exit(exit_u32)
819 MODULE_LICENSE("GPL");
820