xref: /linux/include/net/pkt_cls.h (revision 995231c820e3bd3633cb38bf4ea6f2541e1da331)
1 #ifndef __NET_PKT_CLS_H
2 #define __NET_PKT_CLS_H
3 
4 #include <linux/pkt_cls.h>
5 #include <net/sch_generic.h>
6 #include <net/act_api.h>
7 
8 /* Basic packet classifier frontend definitions. */
9 
10 struct tcf_walker {
11 	int	stop;
12 	int	skip;
13 	int	count;
14 	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
15 };
16 
17 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
18 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
19 
20 enum tcf_block_binder_type {
21 	TCF_BLOCK_BINDER_TYPE_UNSPEC,
22 	TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
23 	TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
24 };
25 
26 struct tcf_block_ext_info {
27 	enum tcf_block_binder_type binder_type;
28 };
29 
30 struct tcf_block_cb;
31 
32 #ifdef CONFIG_NET_CLS
33 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
34 				bool create);
35 void tcf_chain_put(struct tcf_chain *chain);
36 int tcf_block_get(struct tcf_block **p_block,
37 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q);
38 int tcf_block_get_ext(struct tcf_block **p_block,
39 		      struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
40 		      struct tcf_block_ext_info *ei);
41 void tcf_block_put(struct tcf_block *block);
42 void tcf_block_put_ext(struct tcf_block *block,
43 		       struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
44 		       struct tcf_block_ext_info *ei);
45 
46 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
47 {
48 	return block->q;
49 }
50 
51 static inline struct net_device *tcf_block_dev(struct tcf_block *block)
52 {
53 	return tcf_block_q(block)->dev_queue->dev;
54 }
55 
56 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
57 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
58 					 tc_setup_cb_t *cb, void *cb_ident);
59 void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
60 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
61 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
62 					     tc_setup_cb_t *cb, void *cb_ident,
63 					     void *cb_priv);
64 int tcf_block_cb_register(struct tcf_block *block,
65 			  tc_setup_cb_t *cb, void *cb_ident,
66 			  void *cb_priv);
67 void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb);
68 void tcf_block_cb_unregister(struct tcf_block *block,
69 			     tc_setup_cb_t *cb, void *cb_ident);
70 
71 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
72 		 struct tcf_result *res, bool compat_mode);
73 
74 #else
75 static inline
76 int tcf_block_get(struct tcf_block **p_block,
77 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
78 {
79 	return 0;
80 }
81 
82 static inline
83 int tcf_block_get_ext(struct tcf_block **p_block,
84 		      struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
85 		      struct tcf_block_ext_info *ei)
86 {
87 	return 0;
88 }
89 
90 static inline void tcf_block_put(struct tcf_block *block)
91 {
92 }
93 
94 static inline
95 void tcf_block_put_ext(struct tcf_block *block,
96 		       struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
97 		       struct tcf_block_ext_info *ei)
98 {
99 }
100 
101 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
102 {
103 	return NULL;
104 }
105 
106 static inline struct net_device *tcf_block_dev(struct tcf_block *block)
107 {
108 	return NULL;
109 }
110 
111 static inline
112 int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
113 			       void *cb_priv)
114 {
115 	return 0;
116 }
117 
118 static inline
119 void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
120 				  void *cb_priv)
121 {
122 }
123 
124 static inline
125 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
126 {
127 	return NULL;
128 }
129 
130 static inline
131 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
132 					 tc_setup_cb_t *cb, void *cb_ident)
133 {
134 	return NULL;
135 }
136 
137 static inline
138 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
139 {
140 }
141 
142 static inline
143 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
144 {
145 	return 0;
146 }
147 
148 static inline
149 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
150 					     tc_setup_cb_t *cb, void *cb_ident,
151 					     void *cb_priv)
152 {
153 	return NULL;
154 }
155 
156 static inline
157 int tcf_block_cb_register(struct tcf_block *block,
158 			  tc_setup_cb_t *cb, void *cb_ident,
159 			  void *cb_priv)
160 {
161 	return 0;
162 }
163 
164 static inline
165 void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
166 {
167 }
168 
169 static inline
170 void tcf_block_cb_unregister(struct tcf_block *block,
171 			     tc_setup_cb_t *cb, void *cb_ident)
172 {
173 }
174 
175 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
176 			       struct tcf_result *res, bool compat_mode)
177 {
178 	return TC_ACT_UNSPEC;
179 }
180 #endif
181 
182 static inline unsigned long
183 __cls_set_class(unsigned long *clp, unsigned long cl)
184 {
185 	return xchg(clp, cl);
186 }
187 
188 static inline unsigned long
189 cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
190 {
191 	unsigned long old_cl;
192 
193 	sch_tree_lock(q);
194 	old_cl = __cls_set_class(clp, cl);
195 	sch_tree_unlock(q);
196 	return old_cl;
197 }
198 
199 static inline void
200 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
201 {
202 	struct Qdisc *q = tp->chain->block->q;
203 	unsigned long cl;
204 
205 	/* Check q as it is not set for shared blocks. In that case,
206 	 * setting class is not supported.
207 	 */
208 	if (!q)
209 		return;
210 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
211 	cl = cls_set_class(q, &r->class, cl);
212 	if (cl)
213 		q->ops->cl_ops->unbind_tcf(q, cl);
214 }
215 
216 static inline void
217 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
218 {
219 	struct Qdisc *q = tp->chain->block->q;
220 	unsigned long cl;
221 
222 	if (!q)
223 		return;
224 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
225 		q->ops->cl_ops->unbind_tcf(q, cl);
226 }
227 
228 struct tcf_exts {
229 #ifdef CONFIG_NET_CLS_ACT
230 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
231 	int nr_actions;
232 	struct tc_action **actions;
233 #endif
234 	/* Map to export classifier specific extension TLV types to the
235 	 * generic extensions API. Unsupported extensions must be set to 0.
236 	 */
237 	int action;
238 	int police;
239 };
240 
241 static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
242 {
243 #ifdef CONFIG_NET_CLS_ACT
244 	exts->type = 0;
245 	exts->nr_actions = 0;
246 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
247 				GFP_KERNEL);
248 	if (!exts->actions)
249 		return -ENOMEM;
250 #endif
251 	exts->action = action;
252 	exts->police = police;
253 	return 0;
254 }
255 
256 static inline void tcf_exts_to_list(const struct tcf_exts *exts,
257 				    struct list_head *actions)
258 {
259 #ifdef CONFIG_NET_CLS_ACT
260 	int i;
261 
262 	for (i = 0; i < exts->nr_actions; i++) {
263 		struct tc_action *a = exts->actions[i];
264 
265 		list_add_tail(&a->list, actions);
266 	}
267 #endif
268 }
269 
270 static inline void
271 tcf_exts_stats_update(const struct tcf_exts *exts,
272 		      u64 bytes, u64 packets, u64 lastuse)
273 {
274 #ifdef CONFIG_NET_CLS_ACT
275 	int i;
276 
277 	preempt_disable();
278 
279 	for (i = 0; i < exts->nr_actions; i++) {
280 		struct tc_action *a = exts->actions[i];
281 
282 		tcf_action_stats_update(a, bytes, packets, lastuse);
283 	}
284 
285 	preempt_enable();
286 #endif
287 }
288 
289 /**
290  * tcf_exts_has_actions - check if at least one action is present
291  * @exts: tc filter extensions handle
292  *
293  * Returns true if at least one action is present.
294  */
295 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
296 {
297 #ifdef CONFIG_NET_CLS_ACT
298 	return exts->nr_actions;
299 #else
300 	return false;
301 #endif
302 }
303 
304 /**
305  * tcf_exts_has_one_action - check if exactly one action is present
306  * @exts: tc filter extensions handle
307  *
308  * Returns true if exactly one action is present.
309  */
310 static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
311 {
312 #ifdef CONFIG_NET_CLS_ACT
313 	return exts->nr_actions == 1;
314 #else
315 	return false;
316 #endif
317 }
318 
319 /**
320  * tcf_exts_exec - execute tc filter extensions
321  * @skb: socket buffer
322  * @exts: tc filter extensions handle
323  * @res: desired result
324  *
325  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
326  * a negative number if the filter must be considered unmatched or
327  * a positive action code (TC_ACT_*) which must be returned to the
328  * underlying layer.
329  */
330 static inline int
331 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
332 	      struct tcf_result *res)
333 {
334 #ifdef CONFIG_NET_CLS_ACT
335 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
336 #endif
337 	return TC_ACT_OK;
338 }
339 
340 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
341 		      struct nlattr **tb, struct nlattr *rate_tlv,
342 		      struct tcf_exts *exts, bool ovr);
343 void tcf_exts_destroy(struct tcf_exts *exts);
344 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
345 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
346 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
347 
348 /**
349  * struct tcf_pkt_info - packet information
350  */
351 struct tcf_pkt_info {
352 	unsigned char *		ptr;
353 	int			nexthdr;
354 };
355 
356 #ifdef CONFIG_NET_EMATCH
357 
358 struct tcf_ematch_ops;
359 
360 /**
361  * struct tcf_ematch - extended match (ematch)
362  *
363  * @matchid: identifier to allow userspace to reidentify a match
364  * @flags: flags specifying attributes and the relation to other matches
365  * @ops: the operations lookup table of the corresponding ematch module
366  * @datalen: length of the ematch specific configuration data
367  * @data: ematch specific data
368  */
369 struct tcf_ematch {
370 	struct tcf_ematch_ops * ops;
371 	unsigned long		data;
372 	unsigned int		datalen;
373 	u16			matchid;
374 	u16			flags;
375 	struct net		*net;
376 };
377 
378 static inline int tcf_em_is_container(struct tcf_ematch *em)
379 {
380 	return !em->ops;
381 }
382 
383 static inline int tcf_em_is_simple(struct tcf_ematch *em)
384 {
385 	return em->flags & TCF_EM_SIMPLE;
386 }
387 
388 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
389 {
390 	return em->flags & TCF_EM_INVERT;
391 }
392 
393 static inline int tcf_em_last_match(struct tcf_ematch *em)
394 {
395 	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
396 }
397 
398 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
399 {
400 	if (tcf_em_last_match(em))
401 		return 1;
402 
403 	if (result == 0 && em->flags & TCF_EM_REL_AND)
404 		return 1;
405 
406 	if (result != 0 && em->flags & TCF_EM_REL_OR)
407 		return 1;
408 
409 	return 0;
410 }
411 
412 /**
413  * struct tcf_ematch_tree - ematch tree handle
414  *
415  * @hdr: ematch tree header supplied by userspace
416  * @matches: array of ematches
417  */
418 struct tcf_ematch_tree {
419 	struct tcf_ematch_tree_hdr hdr;
420 	struct tcf_ematch *	matches;
421 
422 };
423 
424 /**
425  * struct tcf_ematch_ops - ematch module operations
426  *
427  * @kind: identifier (kind) of this ematch module
428  * @datalen: length of expected configuration data (optional)
429  * @change: called during validation (optional)
430  * @match: called during ematch tree evaluation, must return 1/0
431  * @destroy: called during destroyage (optional)
432  * @dump: called during dumping process (optional)
433  * @owner: owner, must be set to THIS_MODULE
434  * @link: link to previous/next ematch module (internal use)
435  */
436 struct tcf_ematch_ops {
437 	int			kind;
438 	int			datalen;
439 	int			(*change)(struct net *net, void *,
440 					  int, struct tcf_ematch *);
441 	int			(*match)(struct sk_buff *, struct tcf_ematch *,
442 					 struct tcf_pkt_info *);
443 	void			(*destroy)(struct tcf_ematch *);
444 	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
445 	struct module		*owner;
446 	struct list_head	link;
447 };
448 
449 int tcf_em_register(struct tcf_ematch_ops *);
450 void tcf_em_unregister(struct tcf_ematch_ops *);
451 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
452 			 struct tcf_ematch_tree *);
453 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
454 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
455 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
456 			struct tcf_pkt_info *);
457 
458 /**
459  * tcf_em_tree_match - evaulate an ematch tree
460  *
461  * @skb: socket buffer of the packet in question
462  * @tree: ematch tree to be used for evaluation
463  * @info: packet information examined by classifier
464  *
465  * This function matches @skb against the ematch tree in @tree by going
466  * through all ematches respecting their logic relations returning
467  * as soon as the result is obvious.
468  *
469  * Returns 1 if the ematch tree as-one matches, no ematches are configured
470  * or ematch is not enabled in the kernel, otherwise 0 is returned.
471  */
472 static inline int tcf_em_tree_match(struct sk_buff *skb,
473 				    struct tcf_ematch_tree *tree,
474 				    struct tcf_pkt_info *info)
475 {
476 	if (tree->hdr.nmatches)
477 		return __tcf_em_tree_match(skb, tree, info);
478 	else
479 		return 1;
480 }
481 
482 #define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
483 
484 #else /* CONFIG_NET_EMATCH */
485 
486 struct tcf_ematch_tree {
487 };
488 
489 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
490 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
491 #define tcf_em_tree_dump(skb, t, tlv) (0)
492 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
493 
494 #endif /* CONFIG_NET_EMATCH */
495 
496 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
497 {
498 	switch (layer) {
499 		case TCF_LAYER_LINK:
500 			return skb->data;
501 		case TCF_LAYER_NETWORK:
502 			return skb_network_header(skb);
503 		case TCF_LAYER_TRANSPORT:
504 			return skb_transport_header(skb);
505 	}
506 
507 	return NULL;
508 }
509 
510 static inline int tcf_valid_offset(const struct sk_buff *skb,
511 				   const unsigned char *ptr, const int len)
512 {
513 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
514 		      ptr >= skb->head &&
515 		      (ptr <= (ptr + len)));
516 }
517 
518 #ifdef CONFIG_NET_CLS_IND
519 #include <net/net_namespace.h>
520 
521 static inline int
522 tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
523 {
524 	char indev[IFNAMSIZ];
525 	struct net_device *dev;
526 
527 	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
528 		return -EINVAL;
529 	dev = __dev_get_by_name(net, indev);
530 	if (!dev)
531 		return -ENODEV;
532 	return dev->ifindex;
533 }
534 
535 static inline bool
536 tcf_match_indev(struct sk_buff *skb, int ifindex)
537 {
538 	if (!ifindex)
539 		return true;
540 	if  (!skb->skb_iif)
541 		return false;
542 	return ifindex == skb->skb_iif;
543 }
544 #endif /* CONFIG_NET_CLS_IND */
545 
546 int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
547 		     enum tc_setup_type type, void *type_data, bool err_stop);
548 
549 enum tc_block_command {
550 	TC_BLOCK_BIND,
551 	TC_BLOCK_UNBIND,
552 };
553 
554 struct tc_block_offload {
555 	enum tc_block_command command;
556 	enum tcf_block_binder_type binder_type;
557 	struct tcf_block *block;
558 };
559 
560 struct tc_cls_common_offload {
561 	u32 chain_index;
562 	__be16 protocol;
563 	u32 prio;
564 };
565 
566 static inline void
567 tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
568 			   const struct tcf_proto *tp)
569 {
570 	cls_common->chain_index = tp->chain->index;
571 	cls_common->protocol = tp->protocol;
572 	cls_common->prio = tp->prio;
573 }
574 
575 struct tc_cls_u32_knode {
576 	struct tcf_exts *exts;
577 	struct tc_u32_sel *sel;
578 	u32 handle;
579 	u32 val;
580 	u32 mask;
581 	u32 link_handle;
582 	u8 fshift;
583 };
584 
585 struct tc_cls_u32_hnode {
586 	u32 handle;
587 	u32 prio;
588 	unsigned int divisor;
589 };
590 
591 enum tc_clsu32_command {
592 	TC_CLSU32_NEW_KNODE,
593 	TC_CLSU32_REPLACE_KNODE,
594 	TC_CLSU32_DELETE_KNODE,
595 	TC_CLSU32_NEW_HNODE,
596 	TC_CLSU32_REPLACE_HNODE,
597 	TC_CLSU32_DELETE_HNODE,
598 };
599 
600 struct tc_cls_u32_offload {
601 	struct tc_cls_common_offload common;
602 	/* knode values */
603 	enum tc_clsu32_command command;
604 	union {
605 		struct tc_cls_u32_knode knode;
606 		struct tc_cls_u32_hnode hnode;
607 	};
608 };
609 
610 static inline bool tc_can_offload(const struct net_device *dev)
611 {
612 	if (!(dev->features & NETIF_F_HW_TC))
613 		return false;
614 	if (!dev->netdev_ops->ndo_setup_tc)
615 		return false;
616 	return true;
617 }
618 
619 static inline bool tc_skip_hw(u32 flags)
620 {
621 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
622 }
623 
624 static inline bool tc_should_offload(const struct net_device *dev, u32 flags)
625 {
626 	if (tc_skip_hw(flags))
627 		return false;
628 	return tc_can_offload(dev);
629 }
630 
631 static inline bool tc_skip_sw(u32 flags)
632 {
633 	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
634 }
635 
636 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
637 static inline bool tc_flags_valid(u32 flags)
638 {
639 	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
640 		return false;
641 
642 	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
643 		return false;
644 
645 	return true;
646 }
647 
648 static inline bool tc_in_hw(u32 flags)
649 {
650 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
651 }
652 
653 enum tc_fl_command {
654 	TC_CLSFLOWER_REPLACE,
655 	TC_CLSFLOWER_DESTROY,
656 	TC_CLSFLOWER_STATS,
657 };
658 
659 struct tc_cls_flower_offload {
660 	struct tc_cls_common_offload common;
661 	enum tc_fl_command command;
662 	unsigned long cookie;
663 	struct flow_dissector *dissector;
664 	struct fl_flow_key *mask;
665 	struct fl_flow_key *key;
666 	struct tcf_exts *exts;
667 };
668 
669 enum tc_matchall_command {
670 	TC_CLSMATCHALL_REPLACE,
671 	TC_CLSMATCHALL_DESTROY,
672 };
673 
674 struct tc_cls_matchall_offload {
675 	struct tc_cls_common_offload common;
676 	enum tc_matchall_command command;
677 	struct tcf_exts *exts;
678 	unsigned long cookie;
679 };
680 
681 enum tc_clsbpf_command {
682 	TC_CLSBPF_ADD,
683 	TC_CLSBPF_REPLACE,
684 	TC_CLSBPF_DESTROY,
685 	TC_CLSBPF_STATS,
686 };
687 
688 struct tc_cls_bpf_offload {
689 	struct tc_cls_common_offload common;
690 	enum tc_clsbpf_command command;
691 	struct tcf_exts *exts;
692 	struct bpf_prog *prog;
693 	const char *name;
694 	bool exts_integrated;
695 	u32 gen_flags;
696 };
697 
698 struct tc_mqprio_qopt_offload {
699 	/* struct tc_mqprio_qopt must always be the first element */
700 	struct tc_mqprio_qopt qopt;
701 	u16 mode;
702 	u16 shaper;
703 	u32 flags;
704 	u64 min_rate[TC_QOPT_MAX_QUEUE];
705 	u64 max_rate[TC_QOPT_MAX_QUEUE];
706 };
707 
708 /* This structure holds cookie structure that is passed from user
709  * to the kernel for actions and classifiers
710  */
711 struct tc_cookie {
712 	u8  *data;
713 	u32 len;
714 };
715 #endif
716