xref: /linux/net/sched/cls_flow.c (revision fd639726bf15fca8ee1a00dce8e0096d0ad9bd18)
1 /*
2  * net/sched/cls_flow.c		Generic flow classifier
3  *
4  * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/list.h>
15 #include <linux/jhash.h>
16 #include <linux/random.h>
17 #include <linux/pkt_cls.h>
18 #include <linux/skbuff.h>
19 #include <linux/in.h>
20 #include <linux/ip.h>
21 #include <linux/ipv6.h>
22 #include <linux/if_vlan.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <net/inet_sock.h>
26 
27 #include <net/pkt_cls.h>
28 #include <net/ip.h>
29 #include <net/route.h>
30 #include <net/flow_dissector.h>
31 
32 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
33 #include <net/netfilter/nf_conntrack.h>
34 #endif
35 
36 struct flow_head {
37 	struct list_head	filters;
38 	struct rcu_head		rcu;
39 };
40 
41 struct flow_filter {
42 	struct list_head	list;
43 	struct tcf_exts		exts;
44 	struct tcf_ematch_tree	ematches;
45 	struct tcf_proto	*tp;
46 	struct timer_list	perturb_timer;
47 	u32			perturb_period;
48 	u32			handle;
49 
50 	u32			nkeys;
51 	u32			keymask;
52 	u32			mode;
53 	u32			mask;
54 	u32			xor;
55 	u32			rshift;
56 	u32			addend;
57 	u32			divisor;
58 	u32			baseclass;
59 	u32			hashrnd;
60 	union {
61 		struct work_struct	work;
62 		struct rcu_head		rcu;
63 	};
64 };
65 
66 static inline u32 addr_fold(void *addr)
67 {
68 	unsigned long a = (unsigned long)addr;
69 
70 	return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
71 }
72 
73 static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
74 {
75 	__be32 src = flow_get_u32_src(flow);
76 
77 	if (src)
78 		return ntohl(src);
79 
80 	return addr_fold(skb->sk);
81 }
82 
83 static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
84 {
85 	__be32 dst = flow_get_u32_dst(flow);
86 
87 	if (dst)
88 		return ntohl(dst);
89 
90 	return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
91 }
92 
93 static u32 flow_get_proto(const struct sk_buff *skb,
94 			  const struct flow_keys *flow)
95 {
96 	return flow->basic.ip_proto;
97 }
98 
99 static u32 flow_get_proto_src(const struct sk_buff *skb,
100 			      const struct flow_keys *flow)
101 {
102 	if (flow->ports.ports)
103 		return ntohs(flow->ports.src);
104 
105 	return addr_fold(skb->sk);
106 }
107 
108 static u32 flow_get_proto_dst(const struct sk_buff *skb,
109 			      const struct flow_keys *flow)
110 {
111 	if (flow->ports.ports)
112 		return ntohs(flow->ports.dst);
113 
114 	return addr_fold(skb_dst(skb)) ^ (__force u16) tc_skb_protocol(skb);
115 }
116 
117 static u32 flow_get_iif(const struct sk_buff *skb)
118 {
119 	return skb->skb_iif;
120 }
121 
122 static u32 flow_get_priority(const struct sk_buff *skb)
123 {
124 	return skb->priority;
125 }
126 
127 static u32 flow_get_mark(const struct sk_buff *skb)
128 {
129 	return skb->mark;
130 }
131 
132 static u32 flow_get_nfct(const struct sk_buff *skb)
133 {
134 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
135 	return addr_fold(skb_nfct(skb));
136 #else
137 	return 0;
138 #endif
139 }
140 
141 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
142 #define CTTUPLE(skb, member)						\
143 ({									\
144 	enum ip_conntrack_info ctinfo;					\
145 	const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);		\
146 	if (ct == NULL)							\
147 		goto fallback;						\
148 	ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member;			\
149 })
150 #else
151 #define CTTUPLE(skb, member)						\
152 ({									\
153 	goto fallback;							\
154 	0;								\
155 })
156 #endif
157 
158 static u32 flow_get_nfct_src(const struct sk_buff *skb,
159 			     const struct flow_keys *flow)
160 {
161 	switch (tc_skb_protocol(skb)) {
162 	case htons(ETH_P_IP):
163 		return ntohl(CTTUPLE(skb, src.u3.ip));
164 	case htons(ETH_P_IPV6):
165 		return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
166 	}
167 fallback:
168 	return flow_get_src(skb, flow);
169 }
170 
171 static u32 flow_get_nfct_dst(const struct sk_buff *skb,
172 			     const struct flow_keys *flow)
173 {
174 	switch (tc_skb_protocol(skb)) {
175 	case htons(ETH_P_IP):
176 		return ntohl(CTTUPLE(skb, dst.u3.ip));
177 	case htons(ETH_P_IPV6):
178 		return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
179 	}
180 fallback:
181 	return flow_get_dst(skb, flow);
182 }
183 
184 static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
185 				   const struct flow_keys *flow)
186 {
187 	return ntohs(CTTUPLE(skb, src.u.all));
188 fallback:
189 	return flow_get_proto_src(skb, flow);
190 }
191 
192 static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
193 				   const struct flow_keys *flow)
194 {
195 	return ntohs(CTTUPLE(skb, dst.u.all));
196 fallback:
197 	return flow_get_proto_dst(skb, flow);
198 }
199 
200 static u32 flow_get_rtclassid(const struct sk_buff *skb)
201 {
202 #ifdef CONFIG_IP_ROUTE_CLASSID
203 	if (skb_dst(skb))
204 		return skb_dst(skb)->tclassid;
205 #endif
206 	return 0;
207 }
208 
209 static u32 flow_get_skuid(const struct sk_buff *skb)
210 {
211 	struct sock *sk = skb_to_full_sk(skb);
212 
213 	if (sk && sk->sk_socket && sk->sk_socket->file) {
214 		kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
215 
216 		return from_kuid(&init_user_ns, skuid);
217 	}
218 	return 0;
219 }
220 
221 static u32 flow_get_skgid(const struct sk_buff *skb)
222 {
223 	struct sock *sk = skb_to_full_sk(skb);
224 
225 	if (sk && sk->sk_socket && sk->sk_socket->file) {
226 		kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
227 
228 		return from_kgid(&init_user_ns, skgid);
229 	}
230 	return 0;
231 }
232 
233 static u32 flow_get_vlan_tag(const struct sk_buff *skb)
234 {
235 	u16 uninitialized_var(tag);
236 
237 	if (vlan_get_tag(skb, &tag) < 0)
238 		return 0;
239 	return tag & VLAN_VID_MASK;
240 }
241 
242 static u32 flow_get_rxhash(struct sk_buff *skb)
243 {
244 	return skb_get_hash(skb);
245 }
246 
247 static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
248 {
249 	switch (key) {
250 	case FLOW_KEY_SRC:
251 		return flow_get_src(skb, flow);
252 	case FLOW_KEY_DST:
253 		return flow_get_dst(skb, flow);
254 	case FLOW_KEY_PROTO:
255 		return flow_get_proto(skb, flow);
256 	case FLOW_KEY_PROTO_SRC:
257 		return flow_get_proto_src(skb, flow);
258 	case FLOW_KEY_PROTO_DST:
259 		return flow_get_proto_dst(skb, flow);
260 	case FLOW_KEY_IIF:
261 		return flow_get_iif(skb);
262 	case FLOW_KEY_PRIORITY:
263 		return flow_get_priority(skb);
264 	case FLOW_KEY_MARK:
265 		return flow_get_mark(skb);
266 	case FLOW_KEY_NFCT:
267 		return flow_get_nfct(skb);
268 	case FLOW_KEY_NFCT_SRC:
269 		return flow_get_nfct_src(skb, flow);
270 	case FLOW_KEY_NFCT_DST:
271 		return flow_get_nfct_dst(skb, flow);
272 	case FLOW_KEY_NFCT_PROTO_SRC:
273 		return flow_get_nfct_proto_src(skb, flow);
274 	case FLOW_KEY_NFCT_PROTO_DST:
275 		return flow_get_nfct_proto_dst(skb, flow);
276 	case FLOW_KEY_RTCLASSID:
277 		return flow_get_rtclassid(skb);
278 	case FLOW_KEY_SKUID:
279 		return flow_get_skuid(skb);
280 	case FLOW_KEY_SKGID:
281 		return flow_get_skgid(skb);
282 	case FLOW_KEY_VLAN_TAG:
283 		return flow_get_vlan_tag(skb);
284 	case FLOW_KEY_RXHASH:
285 		return flow_get_rxhash(skb);
286 	default:
287 		WARN_ON(1);
288 		return 0;
289 	}
290 }
291 
292 #define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | 		\
293 			  (1 << FLOW_KEY_DST) |			\
294 			  (1 << FLOW_KEY_PROTO) |		\
295 			  (1 << FLOW_KEY_PROTO_SRC) |		\
296 			  (1 << FLOW_KEY_PROTO_DST) | 		\
297 			  (1 << FLOW_KEY_NFCT_SRC) |		\
298 			  (1 << FLOW_KEY_NFCT_DST) |		\
299 			  (1 << FLOW_KEY_NFCT_PROTO_SRC) |	\
300 			  (1 << FLOW_KEY_NFCT_PROTO_DST))
301 
302 static int flow_classify(struct sk_buff *skb, const struct tcf_proto *tp,
303 			 struct tcf_result *res)
304 {
305 	struct flow_head *head = rcu_dereference_bh(tp->root);
306 	struct flow_filter *f;
307 	u32 keymask;
308 	u32 classid;
309 	unsigned int n, key;
310 	int r;
311 
312 	list_for_each_entry_rcu(f, &head->filters, list) {
313 		u32 keys[FLOW_KEY_MAX + 1];
314 		struct flow_keys flow_keys;
315 
316 		if (!tcf_em_tree_match(skb, &f->ematches, NULL))
317 			continue;
318 
319 		keymask = f->keymask;
320 		if (keymask & FLOW_KEYS_NEEDED)
321 			skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
322 
323 		for (n = 0; n < f->nkeys; n++) {
324 			key = ffs(keymask) - 1;
325 			keymask &= ~(1 << key);
326 			keys[n] = flow_key_get(skb, key, &flow_keys);
327 		}
328 
329 		if (f->mode == FLOW_MODE_HASH)
330 			classid = jhash2(keys, f->nkeys, f->hashrnd);
331 		else {
332 			classid = keys[0];
333 			classid = (classid & f->mask) ^ f->xor;
334 			classid = (classid >> f->rshift) + f->addend;
335 		}
336 
337 		if (f->divisor)
338 			classid %= f->divisor;
339 
340 		res->class   = 0;
341 		res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
342 
343 		r = tcf_exts_exec(skb, &f->exts, res);
344 		if (r < 0)
345 			continue;
346 		return r;
347 	}
348 	return -1;
349 }
350 
351 static void flow_perturbation(struct timer_list *t)
352 {
353 	struct flow_filter *f = from_timer(f, t, perturb_timer);
354 
355 	get_random_bytes(&f->hashrnd, 4);
356 	if (f->perturb_period)
357 		mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
358 }
359 
360 static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
361 	[TCA_FLOW_KEYS]		= { .type = NLA_U32 },
362 	[TCA_FLOW_MODE]		= { .type = NLA_U32 },
363 	[TCA_FLOW_BASECLASS]	= { .type = NLA_U32 },
364 	[TCA_FLOW_RSHIFT]	= { .type = NLA_U32 },
365 	[TCA_FLOW_ADDEND]	= { .type = NLA_U32 },
366 	[TCA_FLOW_MASK]		= { .type = NLA_U32 },
367 	[TCA_FLOW_XOR]		= { .type = NLA_U32 },
368 	[TCA_FLOW_DIVISOR]	= { .type = NLA_U32 },
369 	[TCA_FLOW_ACT]		= { .type = NLA_NESTED },
370 	[TCA_FLOW_POLICE]	= { .type = NLA_NESTED },
371 	[TCA_FLOW_EMATCHES]	= { .type = NLA_NESTED },
372 	[TCA_FLOW_PERTURB]	= { .type = NLA_U32 },
373 };
374 
375 static void __flow_destroy_filter(struct flow_filter *f)
376 {
377 	del_timer_sync(&f->perturb_timer);
378 	tcf_exts_destroy(&f->exts);
379 	tcf_em_tree_destroy(&f->ematches);
380 	tcf_exts_put_net(&f->exts);
381 	kfree(f);
382 }
383 
384 static void flow_destroy_filter_work(struct work_struct *work)
385 {
386 	struct flow_filter *f = container_of(work, struct flow_filter, work);
387 
388 	rtnl_lock();
389 	__flow_destroy_filter(f);
390 	rtnl_unlock();
391 }
392 
393 static void flow_destroy_filter(struct rcu_head *head)
394 {
395 	struct flow_filter *f = container_of(head, struct flow_filter, rcu);
396 
397 	INIT_WORK(&f->work, flow_destroy_filter_work);
398 	tcf_queue_work(&f->work);
399 }
400 
401 static int flow_change(struct net *net, struct sk_buff *in_skb,
402 		       struct tcf_proto *tp, unsigned long base,
403 		       u32 handle, struct nlattr **tca,
404 		       void **arg, bool ovr)
405 {
406 	struct flow_head *head = rtnl_dereference(tp->root);
407 	struct flow_filter *fold, *fnew;
408 	struct nlattr *opt = tca[TCA_OPTIONS];
409 	struct nlattr *tb[TCA_FLOW_MAX + 1];
410 	unsigned int nkeys = 0;
411 	unsigned int perturb_period = 0;
412 	u32 baseclass = 0;
413 	u32 keymask = 0;
414 	u32 mode;
415 	int err;
416 
417 	if (opt == NULL)
418 		return -EINVAL;
419 
420 	err = nla_parse_nested(tb, TCA_FLOW_MAX, opt, flow_policy, NULL);
421 	if (err < 0)
422 		return err;
423 
424 	if (tb[TCA_FLOW_BASECLASS]) {
425 		baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
426 		if (TC_H_MIN(baseclass) == 0)
427 			return -EINVAL;
428 	}
429 
430 	if (tb[TCA_FLOW_KEYS]) {
431 		keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
432 
433 		nkeys = hweight32(keymask);
434 		if (nkeys == 0)
435 			return -EINVAL;
436 
437 		if (fls(keymask) - 1 > FLOW_KEY_MAX)
438 			return -EOPNOTSUPP;
439 
440 		if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
441 		    sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
442 			return -EOPNOTSUPP;
443 	}
444 
445 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
446 	if (!fnew)
447 		return -ENOBUFS;
448 
449 	err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
450 	if (err < 0)
451 		goto err1;
452 
453 	err = tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
454 	if (err < 0)
455 		goto err2;
456 
457 	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, ovr);
458 	if (err < 0)
459 		goto err2;
460 
461 	fold = *arg;
462 	if (fold) {
463 		err = -EINVAL;
464 		if (fold->handle != handle && handle)
465 			goto err2;
466 
467 		/* Copy fold into fnew */
468 		fnew->tp = fold->tp;
469 		fnew->handle = fold->handle;
470 		fnew->nkeys = fold->nkeys;
471 		fnew->keymask = fold->keymask;
472 		fnew->mode = fold->mode;
473 		fnew->mask = fold->mask;
474 		fnew->xor = fold->xor;
475 		fnew->rshift = fold->rshift;
476 		fnew->addend = fold->addend;
477 		fnew->divisor = fold->divisor;
478 		fnew->baseclass = fold->baseclass;
479 		fnew->hashrnd = fold->hashrnd;
480 
481 		mode = fold->mode;
482 		if (tb[TCA_FLOW_MODE])
483 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
484 		if (mode != FLOW_MODE_HASH && nkeys > 1)
485 			goto err2;
486 
487 		if (mode == FLOW_MODE_HASH)
488 			perturb_period = fold->perturb_period;
489 		if (tb[TCA_FLOW_PERTURB]) {
490 			if (mode != FLOW_MODE_HASH)
491 				goto err2;
492 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
493 		}
494 	} else {
495 		err = -EINVAL;
496 		if (!handle)
497 			goto err2;
498 		if (!tb[TCA_FLOW_KEYS])
499 			goto err2;
500 
501 		mode = FLOW_MODE_MAP;
502 		if (tb[TCA_FLOW_MODE])
503 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
504 		if (mode != FLOW_MODE_HASH && nkeys > 1)
505 			goto err2;
506 
507 		if (tb[TCA_FLOW_PERTURB]) {
508 			if (mode != FLOW_MODE_HASH)
509 				goto err2;
510 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
511 		}
512 
513 		if (TC_H_MAJ(baseclass) == 0) {
514 			struct Qdisc *q = tcf_block_q(tp->chain->block);
515 
516 			baseclass = TC_H_MAKE(q->handle, baseclass);
517 		}
518 		if (TC_H_MIN(baseclass) == 0)
519 			baseclass = TC_H_MAKE(baseclass, 1);
520 
521 		fnew->handle = handle;
522 		fnew->mask  = ~0U;
523 		fnew->tp = tp;
524 		get_random_bytes(&fnew->hashrnd, 4);
525 	}
526 
527 	timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
528 
529 	netif_keep_dst(qdisc_dev(tp->q));
530 
531 	if (tb[TCA_FLOW_KEYS]) {
532 		fnew->keymask = keymask;
533 		fnew->nkeys   = nkeys;
534 	}
535 
536 	fnew->mode = mode;
537 
538 	if (tb[TCA_FLOW_MASK])
539 		fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
540 	if (tb[TCA_FLOW_XOR])
541 		fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
542 	if (tb[TCA_FLOW_RSHIFT])
543 		fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
544 	if (tb[TCA_FLOW_ADDEND])
545 		fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
546 
547 	if (tb[TCA_FLOW_DIVISOR])
548 		fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
549 	if (baseclass)
550 		fnew->baseclass = baseclass;
551 
552 	fnew->perturb_period = perturb_period;
553 	if (perturb_period)
554 		mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
555 
556 	if (!*arg)
557 		list_add_tail_rcu(&fnew->list, &head->filters);
558 	else
559 		list_replace_rcu(&fold->list, &fnew->list);
560 
561 	*arg = fnew;
562 
563 	if (fold) {
564 		tcf_exts_get_net(&fold->exts);
565 		call_rcu(&fold->rcu, flow_destroy_filter);
566 	}
567 	return 0;
568 
569 err2:
570 	tcf_exts_destroy(&fnew->exts);
571 	tcf_em_tree_destroy(&fnew->ematches);
572 err1:
573 	kfree(fnew);
574 	return err;
575 }
576 
577 static int flow_delete(struct tcf_proto *tp, void *arg, bool *last)
578 {
579 	struct flow_head *head = rtnl_dereference(tp->root);
580 	struct flow_filter *f = arg;
581 
582 	list_del_rcu(&f->list);
583 	tcf_exts_get_net(&f->exts);
584 	call_rcu(&f->rcu, flow_destroy_filter);
585 	*last = list_empty(&head->filters);
586 	return 0;
587 }
588 
589 static int flow_init(struct tcf_proto *tp)
590 {
591 	struct flow_head *head;
592 
593 	head = kzalloc(sizeof(*head), GFP_KERNEL);
594 	if (head == NULL)
595 		return -ENOBUFS;
596 	INIT_LIST_HEAD(&head->filters);
597 	rcu_assign_pointer(tp->root, head);
598 	return 0;
599 }
600 
601 static void flow_destroy(struct tcf_proto *tp)
602 {
603 	struct flow_head *head = rtnl_dereference(tp->root);
604 	struct flow_filter *f, *next;
605 
606 	list_for_each_entry_safe(f, next, &head->filters, list) {
607 		list_del_rcu(&f->list);
608 		if (tcf_exts_get_net(&f->exts))
609 			call_rcu(&f->rcu, flow_destroy_filter);
610 		else
611 			__flow_destroy_filter(f);
612 	}
613 	kfree_rcu(head, rcu);
614 }
615 
616 static void *flow_get(struct tcf_proto *tp, u32 handle)
617 {
618 	struct flow_head *head = rtnl_dereference(tp->root);
619 	struct flow_filter *f;
620 
621 	list_for_each_entry(f, &head->filters, list)
622 		if (f->handle == handle)
623 			return f;
624 	return NULL;
625 }
626 
627 static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh,
628 		     struct sk_buff *skb, struct tcmsg *t)
629 {
630 	struct flow_filter *f = fh;
631 	struct nlattr *nest;
632 
633 	if (f == NULL)
634 		return skb->len;
635 
636 	t->tcm_handle = f->handle;
637 
638 	nest = nla_nest_start(skb, TCA_OPTIONS);
639 	if (nest == NULL)
640 		goto nla_put_failure;
641 
642 	if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
643 	    nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
644 		goto nla_put_failure;
645 
646 	if (f->mask != ~0 || f->xor != 0) {
647 		if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
648 		    nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
649 			goto nla_put_failure;
650 	}
651 	if (f->rshift &&
652 	    nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
653 		goto nla_put_failure;
654 	if (f->addend &&
655 	    nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
656 		goto nla_put_failure;
657 
658 	if (f->divisor &&
659 	    nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
660 		goto nla_put_failure;
661 	if (f->baseclass &&
662 	    nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
663 		goto nla_put_failure;
664 
665 	if (f->perturb_period &&
666 	    nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
667 		goto nla_put_failure;
668 
669 	if (tcf_exts_dump(skb, &f->exts) < 0)
670 		goto nla_put_failure;
671 #ifdef CONFIG_NET_EMATCH
672 	if (f->ematches.hdr.nmatches &&
673 	    tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
674 		goto nla_put_failure;
675 #endif
676 	nla_nest_end(skb, nest);
677 
678 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
679 		goto nla_put_failure;
680 
681 	return skb->len;
682 
683 nla_put_failure:
684 	nla_nest_cancel(skb, nest);
685 	return -1;
686 }
687 
688 static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg)
689 {
690 	struct flow_head *head = rtnl_dereference(tp->root);
691 	struct flow_filter *f;
692 
693 	list_for_each_entry(f, &head->filters, list) {
694 		if (arg->count < arg->skip)
695 			goto skip;
696 		if (arg->fn(tp, f, arg) < 0) {
697 			arg->stop = 1;
698 			break;
699 		}
700 skip:
701 		arg->count++;
702 	}
703 }
704 
705 static struct tcf_proto_ops cls_flow_ops __read_mostly = {
706 	.kind		= "flow",
707 	.classify	= flow_classify,
708 	.init		= flow_init,
709 	.destroy	= flow_destroy,
710 	.change		= flow_change,
711 	.delete		= flow_delete,
712 	.get		= flow_get,
713 	.dump		= flow_dump,
714 	.walk		= flow_walk,
715 	.owner		= THIS_MODULE,
716 };
717 
718 static int __init cls_flow_init(void)
719 {
720 	return register_tcf_proto_ops(&cls_flow_ops);
721 }
722 
723 static void __exit cls_flow_exit(void)
724 {
725 	unregister_tcf_proto_ops(&cls_flow_ops);
726 }
727 
728 module_init(cls_flow_init);
729 module_exit(cls_flow_exit);
730 
731 MODULE_LICENSE("GPL");
732 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
733 MODULE_DESCRIPTION("TC flow classifier");
734