xref: /linux/net/sched/cls_flow.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * net/sched/cls_flow.c		Generic flow classifier
3  *
4  * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/list.h>
15 #include <linux/jhash.h>
16 #include <linux/random.h>
17 #include <linux/pkt_cls.h>
18 #include <linux/skbuff.h>
19 #include <linux/in.h>
20 #include <linux/ip.h>
21 #include <linux/ipv6.h>
22 #include <linux/if_vlan.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 
26 #include <net/pkt_cls.h>
27 #include <net/ip.h>
28 #include <net/route.h>
29 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
30 #include <net/netfilter/nf_conntrack.h>
31 #endif
32 
33 struct flow_head {
34 	struct list_head	filters;
35 };
36 
37 struct flow_filter {
38 	struct list_head	list;
39 	struct tcf_exts		exts;
40 	struct tcf_ematch_tree	ematches;
41 	struct timer_list	perturb_timer;
42 	u32			perturb_period;
43 	u32			handle;
44 
45 	u32			nkeys;
46 	u32			keymask;
47 	u32			mode;
48 	u32			mask;
49 	u32			xor;
50 	u32			rshift;
51 	u32			addend;
52 	u32			divisor;
53 	u32			baseclass;
54 	u32			hashrnd;
55 };
56 
57 static const struct tcf_ext_map flow_ext_map = {
58 	.action	= TCA_FLOW_ACT,
59 	.police	= TCA_FLOW_POLICE,
60 };
61 
62 static inline u32 addr_fold(void *addr)
63 {
64 	unsigned long a = (unsigned long)addr;
65 
66 	return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
67 }
68 
69 static u32 flow_get_src(const struct sk_buff *skb, int nhoff)
70 {
71 	__be32 *data = NULL, hdata;
72 
73 	switch (skb->protocol) {
74 	case htons(ETH_P_IP):
75 		data = skb_header_pointer(skb,
76 					  nhoff + offsetof(struct iphdr,
77 							   saddr),
78 					  4, &hdata);
79 		break;
80 	case htons(ETH_P_IPV6):
81 		data = skb_header_pointer(skb,
82 					 nhoff + offsetof(struct ipv6hdr,
83 							  saddr.s6_addr32[3]),
84 					 4, &hdata);
85 		break;
86 	}
87 
88 	if (data)
89 		return ntohl(*data);
90 	return addr_fold(skb->sk);
91 }
92 
93 static u32 flow_get_dst(const struct sk_buff *skb, int nhoff)
94 {
95 	__be32 *data = NULL, hdata;
96 
97 	switch (skb->protocol) {
98 	case htons(ETH_P_IP):
99 		data = skb_header_pointer(skb,
100 					  nhoff + offsetof(struct iphdr,
101 							   daddr),
102 					  4, &hdata);
103 		break;
104 	case htons(ETH_P_IPV6):
105 		data = skb_header_pointer(skb,
106 					 nhoff + offsetof(struct ipv6hdr,
107 							  daddr.s6_addr32[3]),
108 					 4, &hdata);
109 		break;
110 	}
111 
112 	if (data)
113 		return ntohl(*data);
114 	return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
115 }
116 
117 static u32 flow_get_proto(const struct sk_buff *skb, int nhoff)
118 {
119 	__u8 *data = NULL, hdata;
120 
121 	switch (skb->protocol) {
122 	case htons(ETH_P_IP):
123 		data = skb_header_pointer(skb,
124 					  nhoff + offsetof(struct iphdr,
125 							   protocol),
126 					  1, &hdata);
127 		break;
128 	case htons(ETH_P_IPV6):
129 		data = skb_header_pointer(skb,
130 					 nhoff + offsetof(struct ipv6hdr,
131 							  nexthdr),
132 					 1, &hdata);
133 		break;
134 	}
135 	if (data)
136 		return *data;
137 	return 0;
138 }
139 
140 /* helper function to get either src or dst port */
141 static __be16 *flow_get_proto_common(const struct sk_buff *skb, int nhoff,
142 				     __be16 *_port, int dst)
143 {
144 	__be16 *port = NULL;
145 	int poff;
146 
147 	switch (skb->protocol) {
148 	case htons(ETH_P_IP): {
149 		struct iphdr *iph, _iph;
150 
151 		iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
152 		if (!iph)
153 			break;
154 		if (ip_is_fragment(iph))
155 			break;
156 		poff = proto_ports_offset(iph->protocol);
157 		if (poff >= 0)
158 			port = skb_header_pointer(skb,
159 					nhoff + iph->ihl * 4 + poff + dst,
160 					sizeof(*_port), _port);
161 		break;
162 	}
163 	case htons(ETH_P_IPV6): {
164 		struct ipv6hdr *iph, _iph;
165 
166 		iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
167 		if (!iph)
168 			break;
169 		poff = proto_ports_offset(iph->nexthdr);
170 		if (poff >= 0)
171 			port = skb_header_pointer(skb,
172 					nhoff + sizeof(*iph) + poff + dst,
173 					sizeof(*_port), _port);
174 		break;
175 	}
176 	}
177 
178 	return port;
179 }
180 
181 static u32 flow_get_proto_src(const struct sk_buff *skb, int nhoff)
182 {
183 	__be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 0);
184 
185 	if (port)
186 		return ntohs(*port);
187 
188 	return addr_fold(skb->sk);
189 }
190 
191 static u32 flow_get_proto_dst(const struct sk_buff *skb, int nhoff)
192 {
193 	__be16 _port, *port = flow_get_proto_common(skb, nhoff, &_port, 2);
194 
195 	if (port)
196 		return ntohs(*port);
197 
198 	return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
199 }
200 
201 static u32 flow_get_iif(const struct sk_buff *skb)
202 {
203 	return skb->skb_iif;
204 }
205 
206 static u32 flow_get_priority(const struct sk_buff *skb)
207 {
208 	return skb->priority;
209 }
210 
211 static u32 flow_get_mark(const struct sk_buff *skb)
212 {
213 	return skb->mark;
214 }
215 
216 static u32 flow_get_nfct(const struct sk_buff *skb)
217 {
218 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
219 	return addr_fold(skb->nfct);
220 #else
221 	return 0;
222 #endif
223 }
224 
225 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
226 #define CTTUPLE(skb, member)						\
227 ({									\
228 	enum ip_conntrack_info ctinfo;					\
229 	const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);		\
230 	if (ct == NULL)							\
231 		goto fallback;						\
232 	ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member;			\
233 })
234 #else
235 #define CTTUPLE(skb, member)						\
236 ({									\
237 	goto fallback;							\
238 	0;								\
239 })
240 #endif
241 
242 static u32 flow_get_nfct_src(const struct sk_buff *skb, int nhoff)
243 {
244 	switch (skb->protocol) {
245 	case htons(ETH_P_IP):
246 		return ntohl(CTTUPLE(skb, src.u3.ip));
247 	case htons(ETH_P_IPV6):
248 		return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
249 	}
250 fallback:
251 	return flow_get_src(skb, nhoff);
252 }
253 
254 static u32 flow_get_nfct_dst(const struct sk_buff *skb, int nhoff)
255 {
256 	switch (skb->protocol) {
257 	case htons(ETH_P_IP):
258 		return ntohl(CTTUPLE(skb, dst.u3.ip));
259 	case htons(ETH_P_IPV6):
260 		return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
261 	}
262 fallback:
263 	return flow_get_dst(skb, nhoff);
264 }
265 
266 static u32 flow_get_nfct_proto_src(const struct sk_buff *skb, int nhoff)
267 {
268 	return ntohs(CTTUPLE(skb, src.u.all));
269 fallback:
270 	return flow_get_proto_src(skb, nhoff);
271 }
272 
273 static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb, int nhoff)
274 {
275 	return ntohs(CTTUPLE(skb, dst.u.all));
276 fallback:
277 	return flow_get_proto_dst(skb, nhoff);
278 }
279 
280 static u32 flow_get_rtclassid(const struct sk_buff *skb)
281 {
282 #ifdef CONFIG_IP_ROUTE_CLASSID
283 	if (skb_dst(skb))
284 		return skb_dst(skb)->tclassid;
285 #endif
286 	return 0;
287 }
288 
289 static u32 flow_get_skuid(const struct sk_buff *skb)
290 {
291 	if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
292 		return skb->sk->sk_socket->file->f_cred->fsuid;
293 	return 0;
294 }
295 
296 static u32 flow_get_skgid(const struct sk_buff *skb)
297 {
298 	if (skb->sk && skb->sk->sk_socket && skb->sk->sk_socket->file)
299 		return skb->sk->sk_socket->file->f_cred->fsgid;
300 	return 0;
301 }
302 
303 static u32 flow_get_vlan_tag(const struct sk_buff *skb)
304 {
305 	u16 uninitialized_var(tag);
306 
307 	if (vlan_get_tag(skb, &tag) < 0)
308 		return 0;
309 	return tag & VLAN_VID_MASK;
310 }
311 
312 static u32 flow_get_rxhash(struct sk_buff *skb)
313 {
314 	return skb_get_rxhash(skb);
315 }
316 
317 static u32 flow_key_get(struct sk_buff *skb, int key)
318 {
319 	int nhoff = skb_network_offset(skb);
320 
321 	switch (key) {
322 	case FLOW_KEY_SRC:
323 		return flow_get_src(skb, nhoff);
324 	case FLOW_KEY_DST:
325 		return flow_get_dst(skb, nhoff);
326 	case FLOW_KEY_PROTO:
327 		return flow_get_proto(skb, nhoff);
328 	case FLOW_KEY_PROTO_SRC:
329 		return flow_get_proto_src(skb, nhoff);
330 	case FLOW_KEY_PROTO_DST:
331 		return flow_get_proto_dst(skb, nhoff);
332 	case FLOW_KEY_IIF:
333 		return flow_get_iif(skb);
334 	case FLOW_KEY_PRIORITY:
335 		return flow_get_priority(skb);
336 	case FLOW_KEY_MARK:
337 		return flow_get_mark(skb);
338 	case FLOW_KEY_NFCT:
339 		return flow_get_nfct(skb);
340 	case FLOW_KEY_NFCT_SRC:
341 		return flow_get_nfct_src(skb, nhoff);
342 	case FLOW_KEY_NFCT_DST:
343 		return flow_get_nfct_dst(skb, nhoff);
344 	case FLOW_KEY_NFCT_PROTO_SRC:
345 		return flow_get_nfct_proto_src(skb, nhoff);
346 	case FLOW_KEY_NFCT_PROTO_DST:
347 		return flow_get_nfct_proto_dst(skb, nhoff);
348 	case FLOW_KEY_RTCLASSID:
349 		return flow_get_rtclassid(skb);
350 	case FLOW_KEY_SKUID:
351 		return flow_get_skuid(skb);
352 	case FLOW_KEY_SKGID:
353 		return flow_get_skgid(skb);
354 	case FLOW_KEY_VLAN_TAG:
355 		return flow_get_vlan_tag(skb);
356 	case FLOW_KEY_RXHASH:
357 		return flow_get_rxhash(skb);
358 	default:
359 		WARN_ON(1);
360 		return 0;
361 	}
362 }
363 
364 static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
365 			 struct tcf_result *res)
366 {
367 	struct flow_head *head = tp->root;
368 	struct flow_filter *f;
369 	u32 keymask;
370 	u32 classid;
371 	unsigned int n, key;
372 	int r;
373 
374 	list_for_each_entry(f, &head->filters, list) {
375 		u32 keys[f->nkeys];
376 
377 		if (!tcf_em_tree_match(skb, &f->ematches, NULL))
378 			continue;
379 
380 		keymask = f->keymask;
381 
382 		for (n = 0; n < f->nkeys; n++) {
383 			key = ffs(keymask) - 1;
384 			keymask &= ~(1 << key);
385 			keys[n] = flow_key_get(skb, key);
386 		}
387 
388 		if (f->mode == FLOW_MODE_HASH)
389 			classid = jhash2(keys, f->nkeys, f->hashrnd);
390 		else {
391 			classid = keys[0];
392 			classid = (classid & f->mask) ^ f->xor;
393 			classid = (classid >> f->rshift) + f->addend;
394 		}
395 
396 		if (f->divisor)
397 			classid %= f->divisor;
398 
399 		res->class   = 0;
400 		res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
401 
402 		r = tcf_exts_exec(skb, &f->exts, res);
403 		if (r < 0)
404 			continue;
405 		return r;
406 	}
407 	return -1;
408 }
409 
410 static void flow_perturbation(unsigned long arg)
411 {
412 	struct flow_filter *f = (struct flow_filter *)arg;
413 
414 	get_random_bytes(&f->hashrnd, 4);
415 	if (f->perturb_period)
416 		mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
417 }
418 
419 static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
420 	[TCA_FLOW_KEYS]		= { .type = NLA_U32 },
421 	[TCA_FLOW_MODE]		= { .type = NLA_U32 },
422 	[TCA_FLOW_BASECLASS]	= { .type = NLA_U32 },
423 	[TCA_FLOW_RSHIFT]	= { .type = NLA_U32 },
424 	[TCA_FLOW_ADDEND]	= { .type = NLA_U32 },
425 	[TCA_FLOW_MASK]		= { .type = NLA_U32 },
426 	[TCA_FLOW_XOR]		= { .type = NLA_U32 },
427 	[TCA_FLOW_DIVISOR]	= { .type = NLA_U32 },
428 	[TCA_FLOW_ACT]		= { .type = NLA_NESTED },
429 	[TCA_FLOW_POLICE]	= { .type = NLA_NESTED },
430 	[TCA_FLOW_EMATCHES]	= { .type = NLA_NESTED },
431 	[TCA_FLOW_PERTURB]	= { .type = NLA_U32 },
432 };
433 
434 static int flow_change(struct tcf_proto *tp, unsigned long base,
435 		       u32 handle, struct nlattr **tca,
436 		       unsigned long *arg)
437 {
438 	struct flow_head *head = tp->root;
439 	struct flow_filter *f;
440 	struct nlattr *opt = tca[TCA_OPTIONS];
441 	struct nlattr *tb[TCA_FLOW_MAX + 1];
442 	struct tcf_exts e;
443 	struct tcf_ematch_tree t;
444 	unsigned int nkeys = 0;
445 	unsigned int perturb_period = 0;
446 	u32 baseclass = 0;
447 	u32 keymask = 0;
448 	u32 mode;
449 	int err;
450 
451 	if (opt == NULL)
452 		return -EINVAL;
453 
454 	err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy);
455 	if (err < 0)
456 		return err;
457 
458 	if (tb[TCA_FLOW_BASECLASS]) {
459 		baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
460 		if (TC_H_MIN(baseclass) == 0)
461 			return -EINVAL;
462 	}
463 
464 	if (tb[TCA_FLOW_KEYS]) {
465 		keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
466 
467 		nkeys = hweight32(keymask);
468 		if (nkeys == 0)
469 			return -EINVAL;
470 
471 		if (fls(keymask) - 1 > FLOW_KEY_MAX)
472 			return -EOPNOTSUPP;
473 	}
474 
475 	err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &flow_ext_map);
476 	if (err < 0)
477 		return err;
478 
479 	err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &t);
480 	if (err < 0)
481 		goto err1;
482 
483 	f = (struct flow_filter *)*arg;
484 	if (f != NULL) {
485 		err = -EINVAL;
486 		if (f->handle != handle && handle)
487 			goto err2;
488 
489 		mode = f->mode;
490 		if (tb[TCA_FLOW_MODE])
491 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
492 		if (mode != FLOW_MODE_HASH && nkeys > 1)
493 			goto err2;
494 
495 		if (mode == FLOW_MODE_HASH)
496 			perturb_period = f->perturb_period;
497 		if (tb[TCA_FLOW_PERTURB]) {
498 			if (mode != FLOW_MODE_HASH)
499 				goto err2;
500 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
501 		}
502 	} else {
503 		err = -EINVAL;
504 		if (!handle)
505 			goto err2;
506 		if (!tb[TCA_FLOW_KEYS])
507 			goto err2;
508 
509 		mode = FLOW_MODE_MAP;
510 		if (tb[TCA_FLOW_MODE])
511 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
512 		if (mode != FLOW_MODE_HASH && nkeys > 1)
513 			goto err2;
514 
515 		if (tb[TCA_FLOW_PERTURB]) {
516 			if (mode != FLOW_MODE_HASH)
517 				goto err2;
518 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
519 		}
520 
521 		if (TC_H_MAJ(baseclass) == 0)
522 			baseclass = TC_H_MAKE(tp->q->handle, baseclass);
523 		if (TC_H_MIN(baseclass) == 0)
524 			baseclass = TC_H_MAKE(baseclass, 1);
525 
526 		err = -ENOBUFS;
527 		f = kzalloc(sizeof(*f), GFP_KERNEL);
528 		if (f == NULL)
529 			goto err2;
530 
531 		f->handle = handle;
532 		f->mask	  = ~0U;
533 
534 		get_random_bytes(&f->hashrnd, 4);
535 		f->perturb_timer.function = flow_perturbation;
536 		f->perturb_timer.data = (unsigned long)f;
537 		init_timer_deferrable(&f->perturb_timer);
538 	}
539 
540 	tcf_exts_change(tp, &f->exts, &e);
541 	tcf_em_tree_change(tp, &f->ematches, &t);
542 
543 	tcf_tree_lock(tp);
544 
545 	if (tb[TCA_FLOW_KEYS]) {
546 		f->keymask = keymask;
547 		f->nkeys   = nkeys;
548 	}
549 
550 	f->mode = mode;
551 
552 	if (tb[TCA_FLOW_MASK])
553 		f->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
554 	if (tb[TCA_FLOW_XOR])
555 		f->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
556 	if (tb[TCA_FLOW_RSHIFT])
557 		f->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
558 	if (tb[TCA_FLOW_ADDEND])
559 		f->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
560 
561 	if (tb[TCA_FLOW_DIVISOR])
562 		f->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
563 	if (baseclass)
564 		f->baseclass = baseclass;
565 
566 	f->perturb_period = perturb_period;
567 	del_timer(&f->perturb_timer);
568 	if (perturb_period)
569 		mod_timer(&f->perturb_timer, jiffies + perturb_period);
570 
571 	if (*arg == 0)
572 		list_add_tail(&f->list, &head->filters);
573 
574 	tcf_tree_unlock(tp);
575 
576 	*arg = (unsigned long)f;
577 	return 0;
578 
579 err2:
580 	tcf_em_tree_destroy(tp, &t);
581 err1:
582 	tcf_exts_destroy(tp, &e);
583 	return err;
584 }
585 
586 static void flow_destroy_filter(struct tcf_proto *tp, struct flow_filter *f)
587 {
588 	del_timer_sync(&f->perturb_timer);
589 	tcf_exts_destroy(tp, &f->exts);
590 	tcf_em_tree_destroy(tp, &f->ematches);
591 	kfree(f);
592 }
593 
594 static int flow_delete(struct tcf_proto *tp, unsigned long arg)
595 {
596 	struct flow_filter *f = (struct flow_filter *)arg;
597 
598 	tcf_tree_lock(tp);
599 	list_del(&f->list);
600 	tcf_tree_unlock(tp);
601 	flow_destroy_filter(tp, f);
602 	return 0;
603 }
604 
605 static int flow_init(struct tcf_proto *tp)
606 {
607 	struct flow_head *head;
608 
609 	head = kzalloc(sizeof(*head), GFP_KERNEL);
610 	if (head == NULL)
611 		return -ENOBUFS;
612 	INIT_LIST_HEAD(&head->filters);
613 	tp->root = head;
614 	return 0;
615 }
616 
617 static void flow_destroy(struct tcf_proto *tp)
618 {
619 	struct flow_head *head = tp->root;
620 	struct flow_filter *f, *next;
621 
622 	list_for_each_entry_safe(f, next, &head->filters, list) {
623 		list_del(&f->list);
624 		flow_destroy_filter(tp, f);
625 	}
626 	kfree(head);
627 }
628 
629 static unsigned long flow_get(struct tcf_proto *tp, u32 handle)
630 {
631 	struct flow_head *head = tp->root;
632 	struct flow_filter *f;
633 
634 	list_for_each_entry(f, &head->filters, list)
635 		if (f->handle == handle)
636 			return (unsigned long)f;
637 	return 0;
638 }
639 
640 static void flow_put(struct tcf_proto *tp, unsigned long f)
641 {
642 }
643 
644 static int flow_dump(struct tcf_proto *tp, unsigned long fh,
645 		     struct sk_buff *skb, struct tcmsg *t)
646 {
647 	struct flow_filter *f = (struct flow_filter *)fh;
648 	struct nlattr *nest;
649 
650 	if (f == NULL)
651 		return skb->len;
652 
653 	t->tcm_handle = f->handle;
654 
655 	nest = nla_nest_start(skb, TCA_OPTIONS);
656 	if (nest == NULL)
657 		goto nla_put_failure;
658 
659 	NLA_PUT_U32(skb, TCA_FLOW_KEYS, f->keymask);
660 	NLA_PUT_U32(skb, TCA_FLOW_MODE, f->mode);
661 
662 	if (f->mask != ~0 || f->xor != 0) {
663 		NLA_PUT_U32(skb, TCA_FLOW_MASK, f->mask);
664 		NLA_PUT_U32(skb, TCA_FLOW_XOR, f->xor);
665 	}
666 	if (f->rshift)
667 		NLA_PUT_U32(skb, TCA_FLOW_RSHIFT, f->rshift);
668 	if (f->addend)
669 		NLA_PUT_U32(skb, TCA_FLOW_ADDEND, f->addend);
670 
671 	if (f->divisor)
672 		NLA_PUT_U32(skb, TCA_FLOW_DIVISOR, f->divisor);
673 	if (f->baseclass)
674 		NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass);
675 
676 	if (f->perturb_period)
677 		NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ);
678 
679 	if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0)
680 		goto nla_put_failure;
681 #ifdef CONFIG_NET_EMATCH
682 	if (f->ematches.hdr.nmatches &&
683 	    tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
684 		goto nla_put_failure;
685 #endif
686 	nla_nest_end(skb, nest);
687 
688 	if (tcf_exts_dump_stats(skb, &f->exts, &flow_ext_map) < 0)
689 		goto nla_put_failure;
690 
691 	return skb->len;
692 
693 nla_put_failure:
694 	nlmsg_trim(skb, nest);
695 	return -1;
696 }
697 
698 static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
699 {
700 	struct flow_head *head = tp->root;
701 	struct flow_filter *f;
702 
703 	list_for_each_entry(f, &head->filters, list) {
704 		if (arg->count < arg->skip)
705 			goto skip;
706 		if (arg->fn(tp, (unsigned long)f, arg) < 0) {
707 			arg->stop = 1;
708 			break;
709 		}
710 skip:
711 		arg->count++;
712 	}
713 }
714 
715 static struct tcf_proto_ops cls_flow_ops __read_mostly = {
716 	.kind		= "flow",
717 	.classify	= flow_classify,
718 	.init		= flow_init,
719 	.destroy	= flow_destroy,
720 	.change		= flow_change,
721 	.delete		= flow_delete,
722 	.get		= flow_get,
723 	.put		= flow_put,
724 	.dump		= flow_dump,
725 	.walk		= flow_walk,
726 	.owner		= THIS_MODULE,
727 };
728 
729 static int __init cls_flow_init(void)
730 {
731 	return register_tcf_proto_ops(&cls_flow_ops);
732 }
733 
734 static void __exit cls_flow_exit(void)
735 {
736 	unregister_tcf_proto_ops(&cls_flow_ops);
737 }
738 
739 module_init(cls_flow_init);
740 module_exit(cls_flow_exit);
741 
742 MODULE_LICENSE("GPL");
743 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
744 MODULE_DESCRIPTION("TC flow classifier");
745