xref: /linux/include/net/pkt_cls.h (revision 22ac5ad4a7d4e201d19b7f04ce8d79346c80a34b)
1 #ifndef __NET_PKT_CLS_H
2 #define __NET_PKT_CLS_H
3 
4 #include <linux/pkt_cls.h>
5 #include <linux/workqueue.h>
6 #include <net/sch_generic.h>
7 #include <net/act_api.h>
8 
9 /* Basic packet classifier frontend definitions. */
10 
11 struct tcf_walker {
12 	int	stop;
13 	int	skip;
14 	int	count;
15 	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
16 };
17 
18 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
19 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
20 
21 enum tcf_block_binder_type {
22 	TCF_BLOCK_BINDER_TYPE_UNSPEC,
23 	TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
24 	TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
25 };
26 
27 struct tcf_block_ext_info {
28 	enum tcf_block_binder_type binder_type;
29 };
30 
31 struct tcf_block_cb;
32 bool tcf_queue_work(struct work_struct *work);
33 
34 #ifdef CONFIG_NET_CLS
35 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
36 				bool create);
37 void tcf_chain_put(struct tcf_chain *chain);
38 int tcf_block_get(struct tcf_block **p_block,
39 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q);
40 int tcf_block_get_ext(struct tcf_block **p_block,
41 		      struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
42 		      struct tcf_block_ext_info *ei);
43 void tcf_block_put(struct tcf_block *block);
44 void tcf_block_put_ext(struct tcf_block *block,
45 		       struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
46 		       struct tcf_block_ext_info *ei);
47 
48 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
49 {
50 	return block->q;
51 }
52 
53 static inline struct net_device *tcf_block_dev(struct tcf_block *block)
54 {
55 	return tcf_block_q(block)->dev_queue->dev;
56 }
57 
58 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
59 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
60 					 tc_setup_cb_t *cb, void *cb_ident);
61 void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
62 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
63 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
64 					     tc_setup_cb_t *cb, void *cb_ident,
65 					     void *cb_priv);
66 int tcf_block_cb_register(struct tcf_block *block,
67 			  tc_setup_cb_t *cb, void *cb_ident,
68 			  void *cb_priv);
69 void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb);
70 void tcf_block_cb_unregister(struct tcf_block *block,
71 			     tc_setup_cb_t *cb, void *cb_ident);
72 
73 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
74 		 struct tcf_result *res, bool compat_mode);
75 
76 #else
77 static inline
78 int tcf_block_get(struct tcf_block **p_block,
79 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q)
80 {
81 	return 0;
82 }
83 
84 static inline
85 int tcf_block_get_ext(struct tcf_block **p_block,
86 		      struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
87 		      struct tcf_block_ext_info *ei)
88 {
89 	return 0;
90 }
91 
92 static inline void tcf_block_put(struct tcf_block *block)
93 {
94 }
95 
96 static inline
97 void tcf_block_put_ext(struct tcf_block *block,
98 		       struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
99 		       struct tcf_block_ext_info *ei)
100 {
101 }
102 
103 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
104 {
105 	return NULL;
106 }
107 
108 static inline struct net_device *tcf_block_dev(struct tcf_block *block)
109 {
110 	return NULL;
111 }
112 
113 static inline
114 int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
115 			       void *cb_priv)
116 {
117 	return 0;
118 }
119 
120 static inline
121 void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
122 				  void *cb_priv)
123 {
124 }
125 
126 static inline
127 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
128 {
129 	return NULL;
130 }
131 
132 static inline
133 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
134 					 tc_setup_cb_t *cb, void *cb_ident)
135 {
136 	return NULL;
137 }
138 
139 static inline
140 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
141 {
142 }
143 
144 static inline
145 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
146 {
147 	return 0;
148 }
149 
150 static inline
151 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
152 					     tc_setup_cb_t *cb, void *cb_ident,
153 					     void *cb_priv)
154 {
155 	return NULL;
156 }
157 
158 static inline
159 int tcf_block_cb_register(struct tcf_block *block,
160 			  tc_setup_cb_t *cb, void *cb_ident,
161 			  void *cb_priv)
162 {
163 	return 0;
164 }
165 
166 static inline
167 void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
168 {
169 }
170 
171 static inline
172 void tcf_block_cb_unregister(struct tcf_block *block,
173 			     tc_setup_cb_t *cb, void *cb_ident)
174 {
175 }
176 
177 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
178 			       struct tcf_result *res, bool compat_mode)
179 {
180 	return TC_ACT_UNSPEC;
181 }
182 #endif
183 
184 static inline unsigned long
185 __cls_set_class(unsigned long *clp, unsigned long cl)
186 {
187 	return xchg(clp, cl);
188 }
189 
190 static inline unsigned long
191 cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
192 {
193 	unsigned long old_cl;
194 
195 	sch_tree_lock(q);
196 	old_cl = __cls_set_class(clp, cl);
197 	sch_tree_unlock(q);
198 	return old_cl;
199 }
200 
201 static inline void
202 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
203 {
204 	struct Qdisc *q = tp->chain->block->q;
205 	unsigned long cl;
206 
207 	/* Check q as it is not set for shared blocks. In that case,
208 	 * setting class is not supported.
209 	 */
210 	if (!q)
211 		return;
212 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
213 	cl = cls_set_class(q, &r->class, cl);
214 	if (cl)
215 		q->ops->cl_ops->unbind_tcf(q, cl);
216 }
217 
218 static inline void
219 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
220 {
221 	struct Qdisc *q = tp->chain->block->q;
222 	unsigned long cl;
223 
224 	if (!q)
225 		return;
226 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
227 		q->ops->cl_ops->unbind_tcf(q, cl);
228 }
229 
230 struct tcf_exts {
231 #ifdef CONFIG_NET_CLS_ACT
232 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
233 	int nr_actions;
234 	struct tc_action **actions;
235 #endif
236 	/* Map to export classifier specific extension TLV types to the
237 	 * generic extensions API. Unsupported extensions must be set to 0.
238 	 */
239 	int action;
240 	int police;
241 };
242 
243 static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
244 {
245 #ifdef CONFIG_NET_CLS_ACT
246 	exts->type = 0;
247 	exts->nr_actions = 0;
248 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
249 				GFP_KERNEL);
250 	if (!exts->actions)
251 		return -ENOMEM;
252 #endif
253 	exts->action = action;
254 	exts->police = police;
255 	return 0;
256 }
257 
258 static inline void tcf_exts_to_list(const struct tcf_exts *exts,
259 				    struct list_head *actions)
260 {
261 #ifdef CONFIG_NET_CLS_ACT
262 	int i;
263 
264 	for (i = 0; i < exts->nr_actions; i++) {
265 		struct tc_action *a = exts->actions[i];
266 
267 		list_add_tail(&a->list, actions);
268 	}
269 #endif
270 }
271 
272 static inline void
273 tcf_exts_stats_update(const struct tcf_exts *exts,
274 		      u64 bytes, u64 packets, u64 lastuse)
275 {
276 #ifdef CONFIG_NET_CLS_ACT
277 	int i;
278 
279 	preempt_disable();
280 
281 	for (i = 0; i < exts->nr_actions; i++) {
282 		struct tc_action *a = exts->actions[i];
283 
284 		tcf_action_stats_update(a, bytes, packets, lastuse);
285 	}
286 
287 	preempt_enable();
288 #endif
289 }
290 
291 /**
292  * tcf_exts_has_actions - check if at least one action is present
293  * @exts: tc filter extensions handle
294  *
295  * Returns true if at least one action is present.
296  */
297 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
298 {
299 #ifdef CONFIG_NET_CLS_ACT
300 	return exts->nr_actions;
301 #else
302 	return false;
303 #endif
304 }
305 
306 /**
307  * tcf_exts_has_one_action - check if exactly one action is present
308  * @exts: tc filter extensions handle
309  *
310  * Returns true if exactly one action is present.
311  */
312 static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
313 {
314 #ifdef CONFIG_NET_CLS_ACT
315 	return exts->nr_actions == 1;
316 #else
317 	return false;
318 #endif
319 }
320 
321 /**
322  * tcf_exts_exec - execute tc filter extensions
323  * @skb: socket buffer
324  * @exts: tc filter extensions handle
325  * @res: desired result
326  *
327  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
328  * a negative number if the filter must be considered unmatched or
329  * a positive action code (TC_ACT_*) which must be returned to the
330  * underlying layer.
331  */
332 static inline int
333 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
334 	      struct tcf_result *res)
335 {
336 #ifdef CONFIG_NET_CLS_ACT
337 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
338 #endif
339 	return TC_ACT_OK;
340 }
341 
342 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
343 		      struct nlattr **tb, struct nlattr *rate_tlv,
344 		      struct tcf_exts *exts, bool ovr);
345 void tcf_exts_destroy(struct tcf_exts *exts);
346 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
347 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
348 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
349 
350 /**
351  * struct tcf_pkt_info - packet information
352  */
353 struct tcf_pkt_info {
354 	unsigned char *		ptr;
355 	int			nexthdr;
356 };
357 
358 #ifdef CONFIG_NET_EMATCH
359 
360 struct tcf_ematch_ops;
361 
362 /**
363  * struct tcf_ematch - extended match (ematch)
364  *
365  * @matchid: identifier to allow userspace to reidentify a match
366  * @flags: flags specifying attributes and the relation to other matches
367  * @ops: the operations lookup table of the corresponding ematch module
368  * @datalen: length of the ematch specific configuration data
369  * @data: ematch specific data
370  */
371 struct tcf_ematch {
372 	struct tcf_ematch_ops * ops;
373 	unsigned long		data;
374 	unsigned int		datalen;
375 	u16			matchid;
376 	u16			flags;
377 	struct net		*net;
378 };
379 
380 static inline int tcf_em_is_container(struct tcf_ematch *em)
381 {
382 	return !em->ops;
383 }
384 
385 static inline int tcf_em_is_simple(struct tcf_ematch *em)
386 {
387 	return em->flags & TCF_EM_SIMPLE;
388 }
389 
390 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
391 {
392 	return em->flags & TCF_EM_INVERT;
393 }
394 
395 static inline int tcf_em_last_match(struct tcf_ematch *em)
396 {
397 	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
398 }
399 
400 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
401 {
402 	if (tcf_em_last_match(em))
403 		return 1;
404 
405 	if (result == 0 && em->flags & TCF_EM_REL_AND)
406 		return 1;
407 
408 	if (result != 0 && em->flags & TCF_EM_REL_OR)
409 		return 1;
410 
411 	return 0;
412 }
413 
414 /**
415  * struct tcf_ematch_tree - ematch tree handle
416  *
417  * @hdr: ematch tree header supplied by userspace
418  * @matches: array of ematches
419  */
420 struct tcf_ematch_tree {
421 	struct tcf_ematch_tree_hdr hdr;
422 	struct tcf_ematch *	matches;
423 
424 };
425 
426 /**
427  * struct tcf_ematch_ops - ematch module operations
428  *
429  * @kind: identifier (kind) of this ematch module
430  * @datalen: length of expected configuration data (optional)
431  * @change: called during validation (optional)
432  * @match: called during ematch tree evaluation, must return 1/0
433  * @destroy: called during destroyage (optional)
434  * @dump: called during dumping process (optional)
435  * @owner: owner, must be set to THIS_MODULE
436  * @link: link to previous/next ematch module (internal use)
437  */
438 struct tcf_ematch_ops {
439 	int			kind;
440 	int			datalen;
441 	int			(*change)(struct net *net, void *,
442 					  int, struct tcf_ematch *);
443 	int			(*match)(struct sk_buff *, struct tcf_ematch *,
444 					 struct tcf_pkt_info *);
445 	void			(*destroy)(struct tcf_ematch *);
446 	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
447 	struct module		*owner;
448 	struct list_head	link;
449 };
450 
451 int tcf_em_register(struct tcf_ematch_ops *);
452 void tcf_em_unregister(struct tcf_ematch_ops *);
453 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
454 			 struct tcf_ematch_tree *);
455 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
456 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
457 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
458 			struct tcf_pkt_info *);
459 
460 /**
461  * tcf_em_tree_match - evaulate an ematch tree
462  *
463  * @skb: socket buffer of the packet in question
464  * @tree: ematch tree to be used for evaluation
465  * @info: packet information examined by classifier
466  *
467  * This function matches @skb against the ematch tree in @tree by going
468  * through all ematches respecting their logic relations returning
469  * as soon as the result is obvious.
470  *
471  * Returns 1 if the ematch tree as-one matches, no ematches are configured
472  * or ematch is not enabled in the kernel, otherwise 0 is returned.
473  */
474 static inline int tcf_em_tree_match(struct sk_buff *skb,
475 				    struct tcf_ematch_tree *tree,
476 				    struct tcf_pkt_info *info)
477 {
478 	if (tree->hdr.nmatches)
479 		return __tcf_em_tree_match(skb, tree, info);
480 	else
481 		return 1;
482 }
483 
484 #define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
485 
486 #else /* CONFIG_NET_EMATCH */
487 
488 struct tcf_ematch_tree {
489 };
490 
491 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
492 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
493 #define tcf_em_tree_dump(skb, t, tlv) (0)
494 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
495 
496 #endif /* CONFIG_NET_EMATCH */
497 
498 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
499 {
500 	switch (layer) {
501 		case TCF_LAYER_LINK:
502 			return skb->data;
503 		case TCF_LAYER_NETWORK:
504 			return skb_network_header(skb);
505 		case TCF_LAYER_TRANSPORT:
506 			return skb_transport_header(skb);
507 	}
508 
509 	return NULL;
510 }
511 
512 static inline int tcf_valid_offset(const struct sk_buff *skb,
513 				   const unsigned char *ptr, const int len)
514 {
515 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
516 		      ptr >= skb->head &&
517 		      (ptr <= (ptr + len)));
518 }
519 
520 #ifdef CONFIG_NET_CLS_IND
521 #include <net/net_namespace.h>
522 
523 static inline int
524 tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
525 {
526 	char indev[IFNAMSIZ];
527 	struct net_device *dev;
528 
529 	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
530 		return -EINVAL;
531 	dev = __dev_get_by_name(net, indev);
532 	if (!dev)
533 		return -ENODEV;
534 	return dev->ifindex;
535 }
536 
537 static inline bool
538 tcf_match_indev(struct sk_buff *skb, int ifindex)
539 {
540 	if (!ifindex)
541 		return true;
542 	if  (!skb->skb_iif)
543 		return false;
544 	return ifindex == skb->skb_iif;
545 }
546 #endif /* CONFIG_NET_CLS_IND */
547 
548 int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
549 		     enum tc_setup_type type, void *type_data, bool err_stop);
550 
551 enum tc_block_command {
552 	TC_BLOCK_BIND,
553 	TC_BLOCK_UNBIND,
554 };
555 
556 struct tc_block_offload {
557 	enum tc_block_command command;
558 	enum tcf_block_binder_type binder_type;
559 	struct tcf_block *block;
560 };
561 
562 struct tc_cls_common_offload {
563 	u32 chain_index;
564 	__be16 protocol;
565 	u32 prio;
566 };
567 
568 static inline void
569 tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
570 			   const struct tcf_proto *tp)
571 {
572 	cls_common->chain_index = tp->chain->index;
573 	cls_common->protocol = tp->protocol;
574 	cls_common->prio = tp->prio;
575 }
576 
577 struct tc_cls_u32_knode {
578 	struct tcf_exts *exts;
579 	struct tc_u32_sel *sel;
580 	u32 handle;
581 	u32 val;
582 	u32 mask;
583 	u32 link_handle;
584 	u8 fshift;
585 };
586 
587 struct tc_cls_u32_hnode {
588 	u32 handle;
589 	u32 prio;
590 	unsigned int divisor;
591 };
592 
593 enum tc_clsu32_command {
594 	TC_CLSU32_NEW_KNODE,
595 	TC_CLSU32_REPLACE_KNODE,
596 	TC_CLSU32_DELETE_KNODE,
597 	TC_CLSU32_NEW_HNODE,
598 	TC_CLSU32_REPLACE_HNODE,
599 	TC_CLSU32_DELETE_HNODE,
600 };
601 
602 struct tc_cls_u32_offload {
603 	struct tc_cls_common_offload common;
604 	/* knode values */
605 	enum tc_clsu32_command command;
606 	union {
607 		struct tc_cls_u32_knode knode;
608 		struct tc_cls_u32_hnode hnode;
609 	};
610 };
611 
612 static inline bool tc_can_offload(const struct net_device *dev)
613 {
614 	if (!(dev->features & NETIF_F_HW_TC))
615 		return false;
616 	if (!dev->netdev_ops->ndo_setup_tc)
617 		return false;
618 	return true;
619 }
620 
621 static inline bool tc_skip_hw(u32 flags)
622 {
623 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
624 }
625 
626 static inline bool tc_should_offload(const struct net_device *dev, u32 flags)
627 {
628 	if (tc_skip_hw(flags))
629 		return false;
630 	return tc_can_offload(dev);
631 }
632 
633 static inline bool tc_skip_sw(u32 flags)
634 {
635 	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
636 }
637 
638 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
639 static inline bool tc_flags_valid(u32 flags)
640 {
641 	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
642 		return false;
643 
644 	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
645 		return false;
646 
647 	return true;
648 }
649 
650 static inline bool tc_in_hw(u32 flags)
651 {
652 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
653 }
654 
655 enum tc_fl_command {
656 	TC_CLSFLOWER_REPLACE,
657 	TC_CLSFLOWER_DESTROY,
658 	TC_CLSFLOWER_STATS,
659 };
660 
661 struct tc_cls_flower_offload {
662 	struct tc_cls_common_offload common;
663 	enum tc_fl_command command;
664 	unsigned long cookie;
665 	struct flow_dissector *dissector;
666 	struct fl_flow_key *mask;
667 	struct fl_flow_key *key;
668 	struct tcf_exts *exts;
669 	u32 classid;
670 };
671 
672 enum tc_matchall_command {
673 	TC_CLSMATCHALL_REPLACE,
674 	TC_CLSMATCHALL_DESTROY,
675 };
676 
677 struct tc_cls_matchall_offload {
678 	struct tc_cls_common_offload common;
679 	enum tc_matchall_command command;
680 	struct tcf_exts *exts;
681 	unsigned long cookie;
682 };
683 
684 enum tc_clsbpf_command {
685 	TC_CLSBPF_ADD,
686 	TC_CLSBPF_REPLACE,
687 	TC_CLSBPF_DESTROY,
688 	TC_CLSBPF_STATS,
689 };
690 
691 struct tc_cls_bpf_offload {
692 	struct tc_cls_common_offload common;
693 	enum tc_clsbpf_command command;
694 	struct tcf_exts *exts;
695 	struct bpf_prog *prog;
696 	const char *name;
697 	bool exts_integrated;
698 	u32 gen_flags;
699 };
700 
701 struct tc_mqprio_qopt_offload {
702 	/* struct tc_mqprio_qopt must always be the first element */
703 	struct tc_mqprio_qopt qopt;
704 	u16 mode;
705 	u16 shaper;
706 	u32 flags;
707 	u64 min_rate[TC_QOPT_MAX_QUEUE];
708 	u64 max_rate[TC_QOPT_MAX_QUEUE];
709 };
710 
711 /* This structure holds cookie structure that is passed from user
712  * to the kernel for actions and classifiers
713  */
714 struct tc_cookie {
715 	u8  *data;
716 	u32 len;
717 };
718 #endif
719