xref: /linux/include/net/pkt_cls.h (revision ef347a340b1a8507c22ee3cf981cd5cd64188431)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
4 
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 
10 /* Basic packet classifier frontend definitions. */
11 
12 struct tcf_walker {
13 	int	stop;
14 	int	skip;
15 	int	count;
16 	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
17 };
18 
19 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
20 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
21 
22 enum tcf_block_binder_type {
23 	TCF_BLOCK_BINDER_TYPE_UNSPEC,
24 	TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
25 	TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
26 };
27 
28 struct tcf_block_ext_info {
29 	enum tcf_block_binder_type binder_type;
30 	tcf_chain_head_change_t *chain_head_change;
31 	void *chain_head_change_priv;
32 	u32 block_index;
33 };
34 
35 struct tcf_block_cb;
36 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
37 
38 #ifdef CONFIG_NET_CLS
39 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
40 				bool create);
41 void tcf_chain_put(struct tcf_chain *chain);
42 void tcf_block_netif_keep_dst(struct tcf_block *block);
43 int tcf_block_get(struct tcf_block **p_block,
44 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
45 		  struct netlink_ext_ack *extack);
46 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
47 		      struct tcf_block_ext_info *ei,
48 		      struct netlink_ext_ack *extack);
49 void tcf_block_put(struct tcf_block *block);
50 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
51 		       struct tcf_block_ext_info *ei);
52 
53 static inline bool tcf_block_shared(struct tcf_block *block)
54 {
55 	return block->index;
56 }
57 
58 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
59 {
60 	WARN_ON(tcf_block_shared(block));
61 	return block->q;
62 }
63 
64 static inline struct net_device *tcf_block_dev(struct tcf_block *block)
65 {
66 	return tcf_block_q(block)->dev_queue->dev;
67 }
68 
69 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
70 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
71 					 tc_setup_cb_t *cb, void *cb_ident);
72 void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
73 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
74 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
75 					     tc_setup_cb_t *cb, void *cb_ident,
76 					     void *cb_priv,
77 					     struct netlink_ext_ack *extack);
78 int tcf_block_cb_register(struct tcf_block *block,
79 			  tc_setup_cb_t *cb, void *cb_ident,
80 			  void *cb_priv, struct netlink_ext_ack *extack);
81 void __tcf_block_cb_unregister(struct tcf_block *block,
82 			       struct tcf_block_cb *block_cb);
83 void tcf_block_cb_unregister(struct tcf_block *block,
84 			     tc_setup_cb_t *cb, void *cb_ident);
85 
86 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
87 		 struct tcf_result *res, bool compat_mode);
88 
89 #else
90 static inline
91 int tcf_block_get(struct tcf_block **p_block,
92 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
93 		  struct netlink_ext_ack *extack)
94 {
95 	return 0;
96 }
97 
98 static inline
99 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
100 		      struct tcf_block_ext_info *ei,
101 		      struct netlink_ext_ack *extack)
102 {
103 	return 0;
104 }
105 
106 static inline void tcf_block_put(struct tcf_block *block)
107 {
108 }
109 
110 static inline
111 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
112 		       struct tcf_block_ext_info *ei)
113 {
114 }
115 
116 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
117 {
118 	return NULL;
119 }
120 
121 static inline struct net_device *tcf_block_dev(struct tcf_block *block)
122 {
123 	return NULL;
124 }
125 
126 static inline
127 int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
128 			       void *cb_priv)
129 {
130 	return 0;
131 }
132 
133 static inline
134 void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
135 				  void *cb_priv)
136 {
137 }
138 
139 static inline
140 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
141 {
142 	return NULL;
143 }
144 
145 static inline
146 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
147 					 tc_setup_cb_t *cb, void *cb_ident)
148 {
149 	return NULL;
150 }
151 
152 static inline
153 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
154 {
155 }
156 
157 static inline
158 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
159 {
160 	return 0;
161 }
162 
163 static inline
164 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
165 					     tc_setup_cb_t *cb, void *cb_ident,
166 					     void *cb_priv,
167 					     struct netlink_ext_ack *extack)
168 {
169 	return NULL;
170 }
171 
172 static inline
173 int tcf_block_cb_register(struct tcf_block *block,
174 			  tc_setup_cb_t *cb, void *cb_ident,
175 			  void *cb_priv, struct netlink_ext_ack *extack)
176 {
177 	return 0;
178 }
179 
180 static inline
181 void __tcf_block_cb_unregister(struct tcf_block *block,
182 			       struct tcf_block_cb *block_cb)
183 {
184 }
185 
186 static inline
187 void tcf_block_cb_unregister(struct tcf_block *block,
188 			     tc_setup_cb_t *cb, void *cb_ident)
189 {
190 }
191 
192 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
193 			       struct tcf_result *res, bool compat_mode)
194 {
195 	return TC_ACT_UNSPEC;
196 }
197 #endif
198 
199 static inline unsigned long
200 __cls_set_class(unsigned long *clp, unsigned long cl)
201 {
202 	return xchg(clp, cl);
203 }
204 
205 static inline unsigned long
206 cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
207 {
208 	unsigned long old_cl;
209 
210 	sch_tree_lock(q);
211 	old_cl = __cls_set_class(clp, cl);
212 	sch_tree_unlock(q);
213 	return old_cl;
214 }
215 
216 static inline void
217 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
218 {
219 	struct Qdisc *q = tp->chain->block->q;
220 	unsigned long cl;
221 
222 	/* Check q as it is not set for shared blocks. In that case,
223 	 * setting class is not supported.
224 	 */
225 	if (!q)
226 		return;
227 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
228 	cl = cls_set_class(q, &r->class, cl);
229 	if (cl)
230 		q->ops->cl_ops->unbind_tcf(q, cl);
231 }
232 
233 static inline void
234 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
235 {
236 	struct Qdisc *q = tp->chain->block->q;
237 	unsigned long cl;
238 
239 	if (!q)
240 		return;
241 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
242 		q->ops->cl_ops->unbind_tcf(q, cl);
243 }
244 
245 struct tcf_exts {
246 #ifdef CONFIG_NET_CLS_ACT
247 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
248 	int nr_actions;
249 	struct tc_action **actions;
250 	struct net *net;
251 #endif
252 	/* Map to export classifier specific extension TLV types to the
253 	 * generic extensions API. Unsupported extensions must be set to 0.
254 	 */
255 	int action;
256 	int police;
257 };
258 
259 static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
260 {
261 #ifdef CONFIG_NET_CLS_ACT
262 	exts->type = 0;
263 	exts->nr_actions = 0;
264 	exts->net = NULL;
265 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
266 				GFP_KERNEL);
267 	if (!exts->actions)
268 		return -ENOMEM;
269 #endif
270 	exts->action = action;
271 	exts->police = police;
272 	return 0;
273 }
274 
275 /* Return false if the netns is being destroyed in cleanup_net(). Callers
276  * need to do cleanup synchronously in this case, otherwise may race with
277  * tc_action_net_exit(). Return true for other cases.
278  */
279 static inline bool tcf_exts_get_net(struct tcf_exts *exts)
280 {
281 #ifdef CONFIG_NET_CLS_ACT
282 	exts->net = maybe_get_net(exts->net);
283 	return exts->net != NULL;
284 #else
285 	return true;
286 #endif
287 }
288 
289 static inline void tcf_exts_put_net(struct tcf_exts *exts)
290 {
291 #ifdef CONFIG_NET_CLS_ACT
292 	if (exts->net)
293 		put_net(exts->net);
294 #endif
295 }
296 
297 static inline void tcf_exts_to_list(const struct tcf_exts *exts,
298 				    struct list_head *actions)
299 {
300 #ifdef CONFIG_NET_CLS_ACT
301 	int i;
302 
303 	for (i = 0; i < exts->nr_actions; i++) {
304 		struct tc_action *a = exts->actions[i];
305 
306 		list_add_tail(&a->list, actions);
307 	}
308 #endif
309 }
310 
311 static inline void
312 tcf_exts_stats_update(const struct tcf_exts *exts,
313 		      u64 bytes, u64 packets, u64 lastuse)
314 {
315 #ifdef CONFIG_NET_CLS_ACT
316 	int i;
317 
318 	preempt_disable();
319 
320 	for (i = 0; i < exts->nr_actions; i++) {
321 		struct tc_action *a = exts->actions[i];
322 
323 		tcf_action_stats_update(a, bytes, packets, lastuse);
324 	}
325 
326 	preempt_enable();
327 #endif
328 }
329 
330 /**
331  * tcf_exts_has_actions - check if at least one action is present
332  * @exts: tc filter extensions handle
333  *
334  * Returns true if at least one action is present.
335  */
336 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
337 {
338 #ifdef CONFIG_NET_CLS_ACT
339 	return exts->nr_actions;
340 #else
341 	return false;
342 #endif
343 }
344 
345 /**
346  * tcf_exts_has_one_action - check if exactly one action is present
347  * @exts: tc filter extensions handle
348  *
349  * Returns true if exactly one action is present.
350  */
351 static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
352 {
353 #ifdef CONFIG_NET_CLS_ACT
354 	return exts->nr_actions == 1;
355 #else
356 	return false;
357 #endif
358 }
359 
360 /**
361  * tcf_exts_exec - execute tc filter extensions
362  * @skb: socket buffer
363  * @exts: tc filter extensions handle
364  * @res: desired result
365  *
366  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
367  * a negative number if the filter must be considered unmatched or
368  * a positive action code (TC_ACT_*) which must be returned to the
369  * underlying layer.
370  */
371 static inline int
372 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
373 	      struct tcf_result *res)
374 {
375 #ifdef CONFIG_NET_CLS_ACT
376 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
377 #endif
378 	return TC_ACT_OK;
379 }
380 
381 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
382 		      struct nlattr **tb, struct nlattr *rate_tlv,
383 		      struct tcf_exts *exts, bool ovr,
384 		      struct netlink_ext_ack *extack);
385 void tcf_exts_destroy(struct tcf_exts *exts);
386 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
387 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
388 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
389 
390 /**
391  * struct tcf_pkt_info - packet information
392  */
393 struct tcf_pkt_info {
394 	unsigned char *		ptr;
395 	int			nexthdr;
396 };
397 
398 #ifdef CONFIG_NET_EMATCH
399 
400 struct tcf_ematch_ops;
401 
402 /**
403  * struct tcf_ematch - extended match (ematch)
404  *
405  * @matchid: identifier to allow userspace to reidentify a match
406  * @flags: flags specifying attributes and the relation to other matches
407  * @ops: the operations lookup table of the corresponding ematch module
408  * @datalen: length of the ematch specific configuration data
409  * @data: ematch specific data
410  */
411 struct tcf_ematch {
412 	struct tcf_ematch_ops * ops;
413 	unsigned long		data;
414 	unsigned int		datalen;
415 	u16			matchid;
416 	u16			flags;
417 	struct net		*net;
418 };
419 
420 static inline int tcf_em_is_container(struct tcf_ematch *em)
421 {
422 	return !em->ops;
423 }
424 
425 static inline int tcf_em_is_simple(struct tcf_ematch *em)
426 {
427 	return em->flags & TCF_EM_SIMPLE;
428 }
429 
430 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
431 {
432 	return em->flags & TCF_EM_INVERT;
433 }
434 
435 static inline int tcf_em_last_match(struct tcf_ematch *em)
436 {
437 	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
438 }
439 
440 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
441 {
442 	if (tcf_em_last_match(em))
443 		return 1;
444 
445 	if (result == 0 && em->flags & TCF_EM_REL_AND)
446 		return 1;
447 
448 	if (result != 0 && em->flags & TCF_EM_REL_OR)
449 		return 1;
450 
451 	return 0;
452 }
453 
454 /**
455  * struct tcf_ematch_tree - ematch tree handle
456  *
457  * @hdr: ematch tree header supplied by userspace
458  * @matches: array of ematches
459  */
460 struct tcf_ematch_tree {
461 	struct tcf_ematch_tree_hdr hdr;
462 	struct tcf_ematch *	matches;
463 
464 };
465 
466 /**
467  * struct tcf_ematch_ops - ematch module operations
468  *
469  * @kind: identifier (kind) of this ematch module
470  * @datalen: length of expected configuration data (optional)
471  * @change: called during validation (optional)
472  * @match: called during ematch tree evaluation, must return 1/0
473  * @destroy: called during destroyage (optional)
474  * @dump: called during dumping process (optional)
475  * @owner: owner, must be set to THIS_MODULE
476  * @link: link to previous/next ematch module (internal use)
477  */
478 struct tcf_ematch_ops {
479 	int			kind;
480 	int			datalen;
481 	int			(*change)(struct net *net, void *,
482 					  int, struct tcf_ematch *);
483 	int			(*match)(struct sk_buff *, struct tcf_ematch *,
484 					 struct tcf_pkt_info *);
485 	void			(*destroy)(struct tcf_ematch *);
486 	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
487 	struct module		*owner;
488 	struct list_head	link;
489 };
490 
491 int tcf_em_register(struct tcf_ematch_ops *);
492 void tcf_em_unregister(struct tcf_ematch_ops *);
493 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
494 			 struct tcf_ematch_tree *);
495 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
496 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
497 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
498 			struct tcf_pkt_info *);
499 
500 /**
501  * tcf_em_tree_match - evaulate an ematch tree
502  *
503  * @skb: socket buffer of the packet in question
504  * @tree: ematch tree to be used for evaluation
505  * @info: packet information examined by classifier
506  *
507  * This function matches @skb against the ematch tree in @tree by going
508  * through all ematches respecting their logic relations returning
509  * as soon as the result is obvious.
510  *
511  * Returns 1 if the ematch tree as-one matches, no ematches are configured
512  * or ematch is not enabled in the kernel, otherwise 0 is returned.
513  */
514 static inline int tcf_em_tree_match(struct sk_buff *skb,
515 				    struct tcf_ematch_tree *tree,
516 				    struct tcf_pkt_info *info)
517 {
518 	if (tree->hdr.nmatches)
519 		return __tcf_em_tree_match(skb, tree, info);
520 	else
521 		return 1;
522 }
523 
524 #define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
525 
526 #else /* CONFIG_NET_EMATCH */
527 
528 struct tcf_ematch_tree {
529 };
530 
531 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
532 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
533 #define tcf_em_tree_dump(skb, t, tlv) (0)
534 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
535 
536 #endif /* CONFIG_NET_EMATCH */
537 
538 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
539 {
540 	switch (layer) {
541 		case TCF_LAYER_LINK:
542 			return skb_mac_header(skb);
543 		case TCF_LAYER_NETWORK:
544 			return skb_network_header(skb);
545 		case TCF_LAYER_TRANSPORT:
546 			return skb_transport_header(skb);
547 	}
548 
549 	return NULL;
550 }
551 
552 static inline int tcf_valid_offset(const struct sk_buff *skb,
553 				   const unsigned char *ptr, const int len)
554 {
555 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
556 		      ptr >= skb->head &&
557 		      (ptr <= (ptr + len)));
558 }
559 
560 #ifdef CONFIG_NET_CLS_IND
561 #include <net/net_namespace.h>
562 
563 static inline int
564 tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
565 		 struct netlink_ext_ack *extack)
566 {
567 	char indev[IFNAMSIZ];
568 	struct net_device *dev;
569 
570 	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
571 		NL_SET_ERR_MSG(extack, "Interface name too long");
572 		return -EINVAL;
573 	}
574 	dev = __dev_get_by_name(net, indev);
575 	if (!dev)
576 		return -ENODEV;
577 	return dev->ifindex;
578 }
579 
580 static inline bool
581 tcf_match_indev(struct sk_buff *skb, int ifindex)
582 {
583 	if (!ifindex)
584 		return true;
585 	if  (!skb->skb_iif)
586 		return false;
587 	return ifindex == skb->skb_iif;
588 }
589 #endif /* CONFIG_NET_CLS_IND */
590 
591 int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
592 		     enum tc_setup_type type, void *type_data, bool err_stop);
593 
594 enum tc_block_command {
595 	TC_BLOCK_BIND,
596 	TC_BLOCK_UNBIND,
597 };
598 
599 struct tc_block_offload {
600 	enum tc_block_command command;
601 	enum tcf_block_binder_type binder_type;
602 	struct tcf_block *block;
603 	struct netlink_ext_ack *extack;
604 };
605 
606 struct tc_cls_common_offload {
607 	u32 chain_index;
608 	__be16 protocol;
609 	u32 prio;
610 	struct netlink_ext_ack *extack;
611 };
612 
613 struct tc_cls_u32_knode {
614 	struct tcf_exts *exts;
615 	struct tc_u32_sel *sel;
616 	u32 handle;
617 	u32 val;
618 	u32 mask;
619 	u32 link_handle;
620 	u8 fshift;
621 };
622 
623 struct tc_cls_u32_hnode {
624 	u32 handle;
625 	u32 prio;
626 	unsigned int divisor;
627 };
628 
629 enum tc_clsu32_command {
630 	TC_CLSU32_NEW_KNODE,
631 	TC_CLSU32_REPLACE_KNODE,
632 	TC_CLSU32_DELETE_KNODE,
633 	TC_CLSU32_NEW_HNODE,
634 	TC_CLSU32_REPLACE_HNODE,
635 	TC_CLSU32_DELETE_HNODE,
636 };
637 
638 struct tc_cls_u32_offload {
639 	struct tc_cls_common_offload common;
640 	/* knode values */
641 	enum tc_clsu32_command command;
642 	union {
643 		struct tc_cls_u32_knode knode;
644 		struct tc_cls_u32_hnode hnode;
645 	};
646 };
647 
648 static inline bool tc_can_offload(const struct net_device *dev)
649 {
650 	return dev->features & NETIF_F_HW_TC;
651 }
652 
653 static inline bool tc_can_offload_extack(const struct net_device *dev,
654 					 struct netlink_ext_ack *extack)
655 {
656 	bool can = tc_can_offload(dev);
657 
658 	if (!can)
659 		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
660 
661 	return can;
662 }
663 
664 static inline bool
665 tc_cls_can_offload_and_chain0(const struct net_device *dev,
666 			      struct tc_cls_common_offload *common)
667 {
668 	if (!tc_can_offload_extack(dev, common->extack))
669 		return false;
670 	if (common->chain_index) {
671 		NL_SET_ERR_MSG(common->extack,
672 			       "Driver supports only offload of chain 0");
673 		return false;
674 	}
675 	return true;
676 }
677 
678 static inline bool tc_skip_hw(u32 flags)
679 {
680 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
681 }
682 
683 static inline bool tc_skip_sw(u32 flags)
684 {
685 	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
686 }
687 
688 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
689 static inline bool tc_flags_valid(u32 flags)
690 {
691 	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
692 		      TCA_CLS_FLAGS_VERBOSE))
693 		return false;
694 
695 	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
696 	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
697 		return false;
698 
699 	return true;
700 }
701 
702 static inline bool tc_in_hw(u32 flags)
703 {
704 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
705 }
706 
707 static inline void
708 tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
709 			   const struct tcf_proto *tp, u32 flags,
710 			   struct netlink_ext_ack *extack)
711 {
712 	cls_common->chain_index = tp->chain->index;
713 	cls_common->protocol = tp->protocol;
714 	cls_common->prio = tp->prio;
715 	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
716 		cls_common->extack = extack;
717 }
718 
719 enum tc_fl_command {
720 	TC_CLSFLOWER_REPLACE,
721 	TC_CLSFLOWER_DESTROY,
722 	TC_CLSFLOWER_STATS,
723 };
724 
725 struct tc_cls_flower_offload {
726 	struct tc_cls_common_offload common;
727 	enum tc_fl_command command;
728 	unsigned long cookie;
729 	struct flow_dissector *dissector;
730 	struct fl_flow_key *mask;
731 	struct fl_flow_key *key;
732 	struct tcf_exts *exts;
733 	u32 classid;
734 };
735 
736 enum tc_matchall_command {
737 	TC_CLSMATCHALL_REPLACE,
738 	TC_CLSMATCHALL_DESTROY,
739 };
740 
741 struct tc_cls_matchall_offload {
742 	struct tc_cls_common_offload common;
743 	enum tc_matchall_command command;
744 	struct tcf_exts *exts;
745 	unsigned long cookie;
746 };
747 
748 enum tc_clsbpf_command {
749 	TC_CLSBPF_OFFLOAD,
750 	TC_CLSBPF_STATS,
751 };
752 
753 struct tc_cls_bpf_offload {
754 	struct tc_cls_common_offload common;
755 	enum tc_clsbpf_command command;
756 	struct tcf_exts *exts;
757 	struct bpf_prog *prog;
758 	struct bpf_prog *oldprog;
759 	const char *name;
760 	bool exts_integrated;
761 };
762 
763 struct tc_mqprio_qopt_offload {
764 	/* struct tc_mqprio_qopt must always be the first element */
765 	struct tc_mqprio_qopt qopt;
766 	u16 mode;
767 	u16 shaper;
768 	u32 flags;
769 	u64 min_rate[TC_QOPT_MAX_QUEUE];
770 	u64 max_rate[TC_QOPT_MAX_QUEUE];
771 };
772 
773 /* This structure holds cookie structure that is passed from user
774  * to the kernel for actions and classifiers
775  */
776 struct tc_cookie {
777 	u8  *data;
778 	u32 len;
779 };
780 
781 struct tc_qopt_offload_stats {
782 	struct gnet_stats_basic_packed *bstats;
783 	struct gnet_stats_queue *qstats;
784 };
785 
786 enum tc_mq_command {
787 	TC_MQ_CREATE,
788 	TC_MQ_DESTROY,
789 	TC_MQ_STATS,
790 };
791 
792 struct tc_mq_qopt_offload {
793 	enum tc_mq_command command;
794 	u32 handle;
795 	struct tc_qopt_offload_stats stats;
796 };
797 
798 enum tc_red_command {
799 	TC_RED_REPLACE,
800 	TC_RED_DESTROY,
801 	TC_RED_STATS,
802 	TC_RED_XSTATS,
803 };
804 
805 struct tc_red_qopt_offload_params {
806 	u32 min;
807 	u32 max;
808 	u32 probability;
809 	bool is_ecn;
810 	struct gnet_stats_queue *qstats;
811 };
812 
813 struct tc_red_qopt_offload {
814 	enum tc_red_command command;
815 	u32 handle;
816 	u32 parent;
817 	union {
818 		struct tc_red_qopt_offload_params set;
819 		struct tc_qopt_offload_stats stats;
820 		struct red_stats *xstats;
821 	};
822 };
823 
824 enum tc_prio_command {
825 	TC_PRIO_REPLACE,
826 	TC_PRIO_DESTROY,
827 	TC_PRIO_STATS,
828 	TC_PRIO_GRAFT,
829 };
830 
831 struct tc_prio_qopt_offload_params {
832 	int bands;
833 	u8 priomap[TC_PRIO_MAX + 1];
834 	/* In case that a prio qdisc is offloaded and now is changed to a
835 	 * non-offloadedable config, it needs to update the backlog & qlen
836 	 * values to negate the HW backlog & qlen values (and only them).
837 	 */
838 	struct gnet_stats_queue *qstats;
839 };
840 
841 struct tc_prio_qopt_offload_graft_params {
842 	u8 band;
843 	u32 child_handle;
844 };
845 
846 struct tc_prio_qopt_offload {
847 	enum tc_prio_command command;
848 	u32 handle;
849 	u32 parent;
850 	union {
851 		struct tc_prio_qopt_offload_params replace_params;
852 		struct tc_qopt_offload_stats stats;
853 		struct tc_prio_qopt_offload_graft_params graft_params;
854 	};
855 };
856 
857 #endif
858