xref: /linux/net/sched/cls_flow.c (revision 8e07e0e3964ca4e23ce7b68e2096fe660a888942)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/sched/cls_flow.c		Generic flow classifier
4  *
5  * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/list.h>
11 #include <linux/jhash.h>
12 #include <linux/random.h>
13 #include <linux/pkt_cls.h>
14 #include <linux/skbuff.h>
15 #include <linux/in.h>
16 #include <linux/ip.h>
17 #include <linux/ipv6.h>
18 #include <linux/if_vlan.h>
19 #include <linux/slab.h>
20 #include <linux/module.h>
21 #include <net/inet_sock.h>
22 
23 #include <net/pkt_cls.h>
24 #include <net/ip.h>
25 #include <net/route.h>
26 #include <net/flow_dissector.h>
27 #include <net/tc_wrapper.h>
28 
29 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
30 #include <net/netfilter/nf_conntrack.h>
31 #endif
32 
33 struct flow_head {
34 	struct list_head	filters;
35 	struct rcu_head		rcu;
36 };
37 
38 struct flow_filter {
39 	struct list_head	list;
40 	struct tcf_exts		exts;
41 	struct tcf_ematch_tree	ematches;
42 	struct tcf_proto	*tp;
43 	struct timer_list	perturb_timer;
44 	u32			perturb_period;
45 	u32			handle;
46 
47 	u32			nkeys;
48 	u32			keymask;
49 	u32			mode;
50 	u32			mask;
51 	u32			xor;
52 	u32			rshift;
53 	u32			addend;
54 	u32			divisor;
55 	u32			baseclass;
56 	u32			hashrnd;
57 	struct rcu_work		rwork;
58 };
59 
60 static inline u32 addr_fold(void *addr)
61 {
62 	unsigned long a = (unsigned long)addr;
63 
64 	return (a & 0xFFFFFFFF) ^ (BITS_PER_LONG > 32 ? a >> 32 : 0);
65 }
66 
67 static u32 flow_get_src(const struct sk_buff *skb, const struct flow_keys *flow)
68 {
69 	__be32 src = flow_get_u32_src(flow);
70 
71 	if (src)
72 		return ntohl(src);
73 
74 	return addr_fold(skb->sk);
75 }
76 
77 static u32 flow_get_dst(const struct sk_buff *skb, const struct flow_keys *flow)
78 {
79 	__be32 dst = flow_get_u32_dst(flow);
80 
81 	if (dst)
82 		return ntohl(dst);
83 
84 	return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
85 }
86 
87 static u32 flow_get_proto(const struct sk_buff *skb,
88 			  const struct flow_keys *flow)
89 {
90 	return flow->basic.ip_proto;
91 }
92 
93 static u32 flow_get_proto_src(const struct sk_buff *skb,
94 			      const struct flow_keys *flow)
95 {
96 	if (flow->ports.ports)
97 		return ntohs(flow->ports.src);
98 
99 	return addr_fold(skb->sk);
100 }
101 
102 static u32 flow_get_proto_dst(const struct sk_buff *skb,
103 			      const struct flow_keys *flow)
104 {
105 	if (flow->ports.ports)
106 		return ntohs(flow->ports.dst);
107 
108 	return addr_fold(skb_dst(skb)) ^ (__force u16)skb_protocol(skb, true);
109 }
110 
111 static u32 flow_get_iif(const struct sk_buff *skb)
112 {
113 	return skb->skb_iif;
114 }
115 
116 static u32 flow_get_priority(const struct sk_buff *skb)
117 {
118 	return skb->priority;
119 }
120 
121 static u32 flow_get_mark(const struct sk_buff *skb)
122 {
123 	return skb->mark;
124 }
125 
126 static u32 flow_get_nfct(const struct sk_buff *skb)
127 {
128 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
129 	return addr_fold(skb_nfct(skb));
130 #else
131 	return 0;
132 #endif
133 }
134 
135 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
136 #define CTTUPLE(skb, member)						\
137 ({									\
138 	enum ip_conntrack_info ctinfo;					\
139 	const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);		\
140 	if (ct == NULL)							\
141 		goto fallback;						\
142 	ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member;			\
143 })
144 #else
145 #define CTTUPLE(skb, member)						\
146 ({									\
147 	goto fallback;							\
148 	0;								\
149 })
150 #endif
151 
152 static u32 flow_get_nfct_src(const struct sk_buff *skb,
153 			     const struct flow_keys *flow)
154 {
155 	switch (skb_protocol(skb, true)) {
156 	case htons(ETH_P_IP):
157 		return ntohl(CTTUPLE(skb, src.u3.ip));
158 	case htons(ETH_P_IPV6):
159 		return ntohl(CTTUPLE(skb, src.u3.ip6[3]));
160 	}
161 fallback:
162 	return flow_get_src(skb, flow);
163 }
164 
165 static u32 flow_get_nfct_dst(const struct sk_buff *skb,
166 			     const struct flow_keys *flow)
167 {
168 	switch (skb_protocol(skb, true)) {
169 	case htons(ETH_P_IP):
170 		return ntohl(CTTUPLE(skb, dst.u3.ip));
171 	case htons(ETH_P_IPV6):
172 		return ntohl(CTTUPLE(skb, dst.u3.ip6[3]));
173 	}
174 fallback:
175 	return flow_get_dst(skb, flow);
176 }
177 
178 static u32 flow_get_nfct_proto_src(const struct sk_buff *skb,
179 				   const struct flow_keys *flow)
180 {
181 	return ntohs(CTTUPLE(skb, src.u.all));
182 fallback:
183 	return flow_get_proto_src(skb, flow);
184 }
185 
186 static u32 flow_get_nfct_proto_dst(const struct sk_buff *skb,
187 				   const struct flow_keys *flow)
188 {
189 	return ntohs(CTTUPLE(skb, dst.u.all));
190 fallback:
191 	return flow_get_proto_dst(skb, flow);
192 }
193 
194 static u32 flow_get_rtclassid(const struct sk_buff *skb)
195 {
196 #ifdef CONFIG_IP_ROUTE_CLASSID
197 	if (skb_dst(skb))
198 		return skb_dst(skb)->tclassid;
199 #endif
200 	return 0;
201 }
202 
203 static u32 flow_get_skuid(const struct sk_buff *skb)
204 {
205 	struct sock *sk = skb_to_full_sk(skb);
206 
207 	if (sk && sk->sk_socket && sk->sk_socket->file) {
208 		kuid_t skuid = sk->sk_socket->file->f_cred->fsuid;
209 
210 		return from_kuid(&init_user_ns, skuid);
211 	}
212 	return 0;
213 }
214 
215 static u32 flow_get_skgid(const struct sk_buff *skb)
216 {
217 	struct sock *sk = skb_to_full_sk(skb);
218 
219 	if (sk && sk->sk_socket && sk->sk_socket->file) {
220 		kgid_t skgid = sk->sk_socket->file->f_cred->fsgid;
221 
222 		return from_kgid(&init_user_ns, skgid);
223 	}
224 	return 0;
225 }
226 
227 static u32 flow_get_vlan_tag(const struct sk_buff *skb)
228 {
229 	u16 tag;
230 
231 	if (vlan_get_tag(skb, &tag) < 0)
232 		return 0;
233 	return tag & VLAN_VID_MASK;
234 }
235 
236 static u32 flow_get_rxhash(struct sk_buff *skb)
237 {
238 	return skb_get_hash(skb);
239 }
240 
241 static u32 flow_key_get(struct sk_buff *skb, int key, struct flow_keys *flow)
242 {
243 	switch (key) {
244 	case FLOW_KEY_SRC:
245 		return flow_get_src(skb, flow);
246 	case FLOW_KEY_DST:
247 		return flow_get_dst(skb, flow);
248 	case FLOW_KEY_PROTO:
249 		return flow_get_proto(skb, flow);
250 	case FLOW_KEY_PROTO_SRC:
251 		return flow_get_proto_src(skb, flow);
252 	case FLOW_KEY_PROTO_DST:
253 		return flow_get_proto_dst(skb, flow);
254 	case FLOW_KEY_IIF:
255 		return flow_get_iif(skb);
256 	case FLOW_KEY_PRIORITY:
257 		return flow_get_priority(skb);
258 	case FLOW_KEY_MARK:
259 		return flow_get_mark(skb);
260 	case FLOW_KEY_NFCT:
261 		return flow_get_nfct(skb);
262 	case FLOW_KEY_NFCT_SRC:
263 		return flow_get_nfct_src(skb, flow);
264 	case FLOW_KEY_NFCT_DST:
265 		return flow_get_nfct_dst(skb, flow);
266 	case FLOW_KEY_NFCT_PROTO_SRC:
267 		return flow_get_nfct_proto_src(skb, flow);
268 	case FLOW_KEY_NFCT_PROTO_DST:
269 		return flow_get_nfct_proto_dst(skb, flow);
270 	case FLOW_KEY_RTCLASSID:
271 		return flow_get_rtclassid(skb);
272 	case FLOW_KEY_SKUID:
273 		return flow_get_skuid(skb);
274 	case FLOW_KEY_SKGID:
275 		return flow_get_skgid(skb);
276 	case FLOW_KEY_VLAN_TAG:
277 		return flow_get_vlan_tag(skb);
278 	case FLOW_KEY_RXHASH:
279 		return flow_get_rxhash(skb);
280 	default:
281 		WARN_ON(1);
282 		return 0;
283 	}
284 }
285 
286 #define FLOW_KEYS_NEEDED ((1 << FLOW_KEY_SRC) | 		\
287 			  (1 << FLOW_KEY_DST) |			\
288 			  (1 << FLOW_KEY_PROTO) |		\
289 			  (1 << FLOW_KEY_PROTO_SRC) |		\
290 			  (1 << FLOW_KEY_PROTO_DST) | 		\
291 			  (1 << FLOW_KEY_NFCT_SRC) |		\
292 			  (1 << FLOW_KEY_NFCT_DST) |		\
293 			  (1 << FLOW_KEY_NFCT_PROTO_SRC) |	\
294 			  (1 << FLOW_KEY_NFCT_PROTO_DST))
295 
296 TC_INDIRECT_SCOPE int flow_classify(struct sk_buff *skb,
297 				    const struct tcf_proto *tp,
298 				    struct tcf_result *res)
299 {
300 	struct flow_head *head = rcu_dereference_bh(tp->root);
301 	struct flow_filter *f;
302 	u32 keymask;
303 	u32 classid;
304 	unsigned int n, key;
305 	int r;
306 
307 	list_for_each_entry_rcu(f, &head->filters, list) {
308 		u32 keys[FLOW_KEY_MAX + 1];
309 		struct flow_keys flow_keys;
310 
311 		if (!tcf_em_tree_match(skb, &f->ematches, NULL))
312 			continue;
313 
314 		keymask = f->keymask;
315 		if (keymask & FLOW_KEYS_NEEDED)
316 			skb_flow_dissect_flow_keys(skb, &flow_keys, 0);
317 
318 		for (n = 0; n < f->nkeys; n++) {
319 			key = ffs(keymask) - 1;
320 			keymask &= ~(1 << key);
321 			keys[n] = flow_key_get(skb, key, &flow_keys);
322 		}
323 
324 		if (f->mode == FLOW_MODE_HASH)
325 			classid = jhash2(keys, f->nkeys, f->hashrnd);
326 		else {
327 			classid = keys[0];
328 			classid = (classid & f->mask) ^ f->xor;
329 			classid = (classid >> f->rshift) + f->addend;
330 		}
331 
332 		if (f->divisor)
333 			classid %= f->divisor;
334 
335 		res->class   = 0;
336 		res->classid = TC_H_MAKE(f->baseclass, f->baseclass + classid);
337 
338 		r = tcf_exts_exec(skb, &f->exts, res);
339 		if (r < 0)
340 			continue;
341 		return r;
342 	}
343 	return -1;
344 }
345 
346 static void flow_perturbation(struct timer_list *t)
347 {
348 	struct flow_filter *f = from_timer(f, t, perturb_timer);
349 
350 	get_random_bytes(&f->hashrnd, 4);
351 	if (f->perturb_period)
352 		mod_timer(&f->perturb_timer, jiffies + f->perturb_period);
353 }
354 
355 static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
356 	[TCA_FLOW_KEYS]		= { .type = NLA_U32 },
357 	[TCA_FLOW_MODE]		= { .type = NLA_U32 },
358 	[TCA_FLOW_BASECLASS]	= { .type = NLA_U32 },
359 	[TCA_FLOW_RSHIFT]	= { .type = NLA_U32 },
360 	[TCA_FLOW_ADDEND]	= { .type = NLA_U32 },
361 	[TCA_FLOW_MASK]		= { .type = NLA_U32 },
362 	[TCA_FLOW_XOR]		= { .type = NLA_U32 },
363 	[TCA_FLOW_DIVISOR]	= { .type = NLA_U32 },
364 	[TCA_FLOW_ACT]		= { .type = NLA_NESTED },
365 	[TCA_FLOW_POLICE]	= { .type = NLA_NESTED },
366 	[TCA_FLOW_EMATCHES]	= { .type = NLA_NESTED },
367 	[TCA_FLOW_PERTURB]	= { .type = NLA_U32 },
368 };
369 
370 static void __flow_destroy_filter(struct flow_filter *f)
371 {
372 	timer_shutdown_sync(&f->perturb_timer);
373 	tcf_exts_destroy(&f->exts);
374 	tcf_em_tree_destroy(&f->ematches);
375 	tcf_exts_put_net(&f->exts);
376 	kfree(f);
377 }
378 
379 static void flow_destroy_filter_work(struct work_struct *work)
380 {
381 	struct flow_filter *f = container_of(to_rcu_work(work),
382 					     struct flow_filter,
383 					     rwork);
384 	rtnl_lock();
385 	__flow_destroy_filter(f);
386 	rtnl_unlock();
387 }
388 
389 static int flow_change(struct net *net, struct sk_buff *in_skb,
390 		       struct tcf_proto *tp, unsigned long base,
391 		       u32 handle, struct nlattr **tca,
392 		       void **arg, u32 flags,
393 		       struct netlink_ext_ack *extack)
394 {
395 	struct flow_head *head = rtnl_dereference(tp->root);
396 	struct flow_filter *fold, *fnew;
397 	struct nlattr *opt = tca[TCA_OPTIONS];
398 	struct nlattr *tb[TCA_FLOW_MAX + 1];
399 	unsigned int nkeys = 0;
400 	unsigned int perturb_period = 0;
401 	u32 baseclass = 0;
402 	u32 keymask = 0;
403 	u32 mode;
404 	int err;
405 
406 	if (opt == NULL)
407 		return -EINVAL;
408 
409 	err = nla_parse_nested_deprecated(tb, TCA_FLOW_MAX, opt, flow_policy,
410 					  NULL);
411 	if (err < 0)
412 		return err;
413 
414 	if (tb[TCA_FLOW_BASECLASS]) {
415 		baseclass = nla_get_u32(tb[TCA_FLOW_BASECLASS]);
416 		if (TC_H_MIN(baseclass) == 0)
417 			return -EINVAL;
418 	}
419 
420 	if (tb[TCA_FLOW_KEYS]) {
421 		keymask = nla_get_u32(tb[TCA_FLOW_KEYS]);
422 
423 		nkeys = hweight32(keymask);
424 		if (nkeys == 0)
425 			return -EINVAL;
426 
427 		if (fls(keymask) - 1 > FLOW_KEY_MAX)
428 			return -EOPNOTSUPP;
429 
430 		if ((keymask & (FLOW_KEY_SKUID|FLOW_KEY_SKGID)) &&
431 		    sk_user_ns(NETLINK_CB(in_skb).sk) != &init_user_ns)
432 			return -EOPNOTSUPP;
433 	}
434 
435 	fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
436 	if (!fnew)
437 		return -ENOBUFS;
438 
439 	err = tcf_em_tree_validate(tp, tb[TCA_FLOW_EMATCHES], &fnew->ematches);
440 	if (err < 0)
441 		goto err1;
442 
443 	err = tcf_exts_init(&fnew->exts, net, TCA_FLOW_ACT, TCA_FLOW_POLICE);
444 	if (err < 0)
445 		goto err2;
446 
447 	err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &fnew->exts, flags,
448 				extack);
449 	if (err < 0)
450 		goto err2;
451 
452 	fold = *arg;
453 	if (fold) {
454 		err = -EINVAL;
455 		if (fold->handle != handle && handle)
456 			goto err2;
457 
458 		/* Copy fold into fnew */
459 		fnew->tp = fold->tp;
460 		fnew->handle = fold->handle;
461 		fnew->nkeys = fold->nkeys;
462 		fnew->keymask = fold->keymask;
463 		fnew->mode = fold->mode;
464 		fnew->mask = fold->mask;
465 		fnew->xor = fold->xor;
466 		fnew->rshift = fold->rshift;
467 		fnew->addend = fold->addend;
468 		fnew->divisor = fold->divisor;
469 		fnew->baseclass = fold->baseclass;
470 		fnew->hashrnd = fold->hashrnd;
471 
472 		mode = fold->mode;
473 		if (tb[TCA_FLOW_MODE])
474 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
475 		if (mode != FLOW_MODE_HASH && nkeys > 1)
476 			goto err2;
477 
478 		if (mode == FLOW_MODE_HASH)
479 			perturb_period = fold->perturb_period;
480 		if (tb[TCA_FLOW_PERTURB]) {
481 			if (mode != FLOW_MODE_HASH)
482 				goto err2;
483 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
484 		}
485 	} else {
486 		err = -EINVAL;
487 		if (!handle)
488 			goto err2;
489 		if (!tb[TCA_FLOW_KEYS])
490 			goto err2;
491 
492 		mode = FLOW_MODE_MAP;
493 		if (tb[TCA_FLOW_MODE])
494 			mode = nla_get_u32(tb[TCA_FLOW_MODE]);
495 		if (mode != FLOW_MODE_HASH && nkeys > 1)
496 			goto err2;
497 
498 		if (tb[TCA_FLOW_PERTURB]) {
499 			if (mode != FLOW_MODE_HASH)
500 				goto err2;
501 			perturb_period = nla_get_u32(tb[TCA_FLOW_PERTURB]) * HZ;
502 		}
503 
504 		if (TC_H_MAJ(baseclass) == 0) {
505 			struct Qdisc *q = tcf_block_q(tp->chain->block);
506 
507 			baseclass = TC_H_MAKE(q->handle, baseclass);
508 		}
509 		if (TC_H_MIN(baseclass) == 0)
510 			baseclass = TC_H_MAKE(baseclass, 1);
511 
512 		fnew->handle = handle;
513 		fnew->mask  = ~0U;
514 		fnew->tp = tp;
515 		get_random_bytes(&fnew->hashrnd, 4);
516 	}
517 
518 	timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE);
519 
520 	tcf_block_netif_keep_dst(tp->chain->block);
521 
522 	if (tb[TCA_FLOW_KEYS]) {
523 		fnew->keymask = keymask;
524 		fnew->nkeys   = nkeys;
525 	}
526 
527 	fnew->mode = mode;
528 
529 	if (tb[TCA_FLOW_MASK])
530 		fnew->mask = nla_get_u32(tb[TCA_FLOW_MASK]);
531 	if (tb[TCA_FLOW_XOR])
532 		fnew->xor = nla_get_u32(tb[TCA_FLOW_XOR]);
533 	if (tb[TCA_FLOW_RSHIFT])
534 		fnew->rshift = nla_get_u32(tb[TCA_FLOW_RSHIFT]);
535 	if (tb[TCA_FLOW_ADDEND])
536 		fnew->addend = nla_get_u32(tb[TCA_FLOW_ADDEND]);
537 
538 	if (tb[TCA_FLOW_DIVISOR])
539 		fnew->divisor = nla_get_u32(tb[TCA_FLOW_DIVISOR]);
540 	if (baseclass)
541 		fnew->baseclass = baseclass;
542 
543 	fnew->perturb_period = perturb_period;
544 	if (perturb_period)
545 		mod_timer(&fnew->perturb_timer, jiffies + perturb_period);
546 
547 	if (!*arg)
548 		list_add_tail_rcu(&fnew->list, &head->filters);
549 	else
550 		list_replace_rcu(&fold->list, &fnew->list);
551 
552 	*arg = fnew;
553 
554 	if (fold) {
555 		tcf_exts_get_net(&fold->exts);
556 		tcf_queue_work(&fold->rwork, flow_destroy_filter_work);
557 	}
558 	return 0;
559 
560 err2:
561 	tcf_exts_destroy(&fnew->exts);
562 	tcf_em_tree_destroy(&fnew->ematches);
563 err1:
564 	kfree(fnew);
565 	return err;
566 }
567 
568 static int flow_delete(struct tcf_proto *tp, void *arg, bool *last,
569 		       bool rtnl_held, struct netlink_ext_ack *extack)
570 {
571 	struct flow_head *head = rtnl_dereference(tp->root);
572 	struct flow_filter *f = arg;
573 
574 	list_del_rcu(&f->list);
575 	tcf_exts_get_net(&f->exts);
576 	tcf_queue_work(&f->rwork, flow_destroy_filter_work);
577 	*last = list_empty(&head->filters);
578 	return 0;
579 }
580 
581 static int flow_init(struct tcf_proto *tp)
582 {
583 	struct flow_head *head;
584 
585 	head = kzalloc(sizeof(*head), GFP_KERNEL);
586 	if (head == NULL)
587 		return -ENOBUFS;
588 	INIT_LIST_HEAD(&head->filters);
589 	rcu_assign_pointer(tp->root, head);
590 	return 0;
591 }
592 
593 static void flow_destroy(struct tcf_proto *tp, bool rtnl_held,
594 			 struct netlink_ext_ack *extack)
595 {
596 	struct flow_head *head = rtnl_dereference(tp->root);
597 	struct flow_filter *f, *next;
598 
599 	list_for_each_entry_safe(f, next, &head->filters, list) {
600 		list_del_rcu(&f->list);
601 		if (tcf_exts_get_net(&f->exts))
602 			tcf_queue_work(&f->rwork, flow_destroy_filter_work);
603 		else
604 			__flow_destroy_filter(f);
605 	}
606 	kfree_rcu(head, rcu);
607 }
608 
609 static void *flow_get(struct tcf_proto *tp, u32 handle)
610 {
611 	struct flow_head *head = rtnl_dereference(tp->root);
612 	struct flow_filter *f;
613 
614 	list_for_each_entry(f, &head->filters, list)
615 		if (f->handle == handle)
616 			return f;
617 	return NULL;
618 }
619 
620 static int flow_dump(struct net *net, struct tcf_proto *tp, void *fh,
621 		     struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
622 {
623 	struct flow_filter *f = fh;
624 	struct nlattr *nest;
625 
626 	if (f == NULL)
627 		return skb->len;
628 
629 	t->tcm_handle = f->handle;
630 
631 	nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
632 	if (nest == NULL)
633 		goto nla_put_failure;
634 
635 	if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
636 	    nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
637 		goto nla_put_failure;
638 
639 	if (f->mask != ~0 || f->xor != 0) {
640 		if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
641 		    nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
642 			goto nla_put_failure;
643 	}
644 	if (f->rshift &&
645 	    nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
646 		goto nla_put_failure;
647 	if (f->addend &&
648 	    nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
649 		goto nla_put_failure;
650 
651 	if (f->divisor &&
652 	    nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
653 		goto nla_put_failure;
654 	if (f->baseclass &&
655 	    nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
656 		goto nla_put_failure;
657 
658 	if (f->perturb_period &&
659 	    nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
660 		goto nla_put_failure;
661 
662 	if (tcf_exts_dump(skb, &f->exts) < 0)
663 		goto nla_put_failure;
664 #ifdef CONFIG_NET_EMATCH
665 	if (f->ematches.hdr.nmatches &&
666 	    tcf_em_tree_dump(skb, &f->ematches, TCA_FLOW_EMATCHES) < 0)
667 		goto nla_put_failure;
668 #endif
669 	nla_nest_end(skb, nest);
670 
671 	if (tcf_exts_dump_stats(skb, &f->exts) < 0)
672 		goto nla_put_failure;
673 
674 	return skb->len;
675 
676 nla_put_failure:
677 	nla_nest_cancel(skb, nest);
678 	return -1;
679 }
680 
681 static void flow_walk(struct tcf_proto *tp, struct tcf_walker *arg,
682 		      bool rtnl_held)
683 {
684 	struct flow_head *head = rtnl_dereference(tp->root);
685 	struct flow_filter *f;
686 
687 	list_for_each_entry(f, &head->filters, list) {
688 		if (!tc_cls_stats_dump(tp, arg, f))
689 			break;
690 	}
691 }
692 
693 static struct tcf_proto_ops cls_flow_ops __read_mostly = {
694 	.kind		= "flow",
695 	.classify	= flow_classify,
696 	.init		= flow_init,
697 	.destroy	= flow_destroy,
698 	.change		= flow_change,
699 	.delete		= flow_delete,
700 	.get		= flow_get,
701 	.dump		= flow_dump,
702 	.walk		= flow_walk,
703 	.owner		= THIS_MODULE,
704 };
705 
706 static int __init cls_flow_init(void)
707 {
708 	return register_tcf_proto_ops(&cls_flow_ops);
709 }
710 
711 static void __exit cls_flow_exit(void)
712 {
713 	unregister_tcf_proto_ops(&cls_flow_ops);
714 }
715 
716 module_init(cls_flow_init);
717 module_exit(cls_flow_exit);
718 
719 MODULE_LICENSE("GPL");
720 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
721 MODULE_DESCRIPTION("TC flow classifier");
722