xref: /linux/include/net/pkt_cls.h (revision 132db93572821ec2fdf81e354cc40f558faf7e4f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
4 
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 #include <net/net_namespace.h>
10 
11 /* TC action not accessible from user space */
12 #define TC_ACT_CONSUMED		(TC_ACT_VALUE_MAX + 1)
13 
14 /* Basic packet classifier frontend definitions. */
15 
16 struct tcf_walker {
17 	int	stop;
18 	int	skip;
19 	int	count;
20 	bool	nonempty;
21 	unsigned long cookie;
22 	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
23 };
24 
25 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
26 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
27 
28 struct tcf_block_ext_info {
29 	enum flow_block_binder_type binder_type;
30 	tcf_chain_head_change_t *chain_head_change;
31 	void *chain_head_change_priv;
32 	u32 block_index;
33 };
34 
35 struct tcf_block_cb;
36 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
37 
38 #ifdef CONFIG_NET_CLS
39 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
40 				       u32 chain_index);
41 void tcf_chain_put_by_act(struct tcf_chain *chain);
42 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
43 				     struct tcf_chain *chain);
44 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
45 				     struct tcf_proto *tp, bool rtnl_held);
46 void tcf_block_netif_keep_dst(struct tcf_block *block);
47 int tcf_block_get(struct tcf_block **p_block,
48 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
49 		  struct netlink_ext_ack *extack);
50 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
51 		      struct tcf_block_ext_info *ei,
52 		      struct netlink_ext_ack *extack);
53 void tcf_block_put(struct tcf_block *block);
54 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
55 		       struct tcf_block_ext_info *ei);
56 
57 static inline bool tcf_block_shared(struct tcf_block *block)
58 {
59 	return block->index;
60 }
61 
62 static inline bool tcf_block_non_null_shared(struct tcf_block *block)
63 {
64 	return block && block->index;
65 }
66 
67 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
68 {
69 	WARN_ON(tcf_block_shared(block));
70 	return block->q;
71 }
72 
73 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
74 		 struct tcf_result *res, bool compat_mode);
75 int tcf_classify_ingress(struct sk_buff *skb,
76 			 const struct tcf_block *ingress_block,
77 			 const struct tcf_proto *tp, struct tcf_result *res,
78 			 bool compat_mode);
79 
80 #else
81 static inline bool tcf_block_shared(struct tcf_block *block)
82 {
83 	return false;
84 }
85 
86 static inline bool tcf_block_non_null_shared(struct tcf_block *block)
87 {
88 	return false;
89 }
90 
91 static inline
92 int tcf_block_get(struct tcf_block **p_block,
93 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
94 		  struct netlink_ext_ack *extack)
95 {
96 	return 0;
97 }
98 
99 static inline
100 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
101 		      struct tcf_block_ext_info *ei,
102 		      struct netlink_ext_ack *extack)
103 {
104 	return 0;
105 }
106 
107 static inline void tcf_block_put(struct tcf_block *block)
108 {
109 }
110 
111 static inline
112 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
113 		       struct tcf_block_ext_info *ei)
114 {
115 }
116 
117 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
118 {
119 	return NULL;
120 }
121 
122 static inline
123 int tc_setup_cb_block_register(struct tcf_block *block, flow_setup_cb_t *cb,
124 			       void *cb_priv)
125 {
126 	return 0;
127 }
128 
129 static inline
130 void tc_setup_cb_block_unregister(struct tcf_block *block, flow_setup_cb_t *cb,
131 				  void *cb_priv)
132 {
133 }
134 
135 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
136 			       struct tcf_result *res, bool compat_mode)
137 {
138 	return TC_ACT_UNSPEC;
139 }
140 
141 static inline int tcf_classify_ingress(struct sk_buff *skb,
142 				       const struct tcf_block *ingress_block,
143 				       const struct tcf_proto *tp,
144 				       struct tcf_result *res, bool compat_mode)
145 {
146 	return TC_ACT_UNSPEC;
147 }
148 
149 #endif
150 
151 static inline unsigned long
152 __cls_set_class(unsigned long *clp, unsigned long cl)
153 {
154 	return xchg(clp, cl);
155 }
156 
157 static inline void
158 __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
159 {
160 	unsigned long cl;
161 
162 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
163 	cl = __cls_set_class(&r->class, cl);
164 	if (cl)
165 		q->ops->cl_ops->unbind_tcf(q, cl);
166 }
167 
168 static inline void
169 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
170 {
171 	struct Qdisc *q = tp->chain->block->q;
172 
173 	/* Check q as it is not set for shared blocks. In that case,
174 	 * setting class is not supported.
175 	 */
176 	if (!q)
177 		return;
178 	sch_tree_lock(q);
179 	__tcf_bind_filter(q, r, base);
180 	sch_tree_unlock(q);
181 }
182 
183 static inline void
184 __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
185 {
186 	unsigned long cl;
187 
188 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
189 		q->ops->cl_ops->unbind_tcf(q, cl);
190 }
191 
192 static inline void
193 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
194 {
195 	struct Qdisc *q = tp->chain->block->q;
196 
197 	if (!q)
198 		return;
199 	__tcf_unbind_filter(q, r);
200 }
201 
202 struct tcf_exts {
203 #ifdef CONFIG_NET_CLS_ACT
204 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
205 	int nr_actions;
206 	struct tc_action **actions;
207 	struct net *net;
208 #endif
209 	/* Map to export classifier specific extension TLV types to the
210 	 * generic extensions API. Unsupported extensions must be set to 0.
211 	 */
212 	int action;
213 	int police;
214 };
215 
216 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
217 				int action, int police)
218 {
219 #ifdef CONFIG_NET_CLS_ACT
220 	exts->type = 0;
221 	exts->nr_actions = 0;
222 	exts->net = net;
223 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
224 				GFP_KERNEL);
225 	if (!exts->actions)
226 		return -ENOMEM;
227 #endif
228 	exts->action = action;
229 	exts->police = police;
230 	return 0;
231 }
232 
233 /* Return false if the netns is being destroyed in cleanup_net(). Callers
234  * need to do cleanup synchronously in this case, otherwise may race with
235  * tc_action_net_exit(). Return true for other cases.
236  */
237 static inline bool tcf_exts_get_net(struct tcf_exts *exts)
238 {
239 #ifdef CONFIG_NET_CLS_ACT
240 	exts->net = maybe_get_net(exts->net);
241 	return exts->net != NULL;
242 #else
243 	return true;
244 #endif
245 }
246 
247 static inline void tcf_exts_put_net(struct tcf_exts *exts)
248 {
249 #ifdef CONFIG_NET_CLS_ACT
250 	if (exts->net)
251 		put_net(exts->net);
252 #endif
253 }
254 
255 #ifdef CONFIG_NET_CLS_ACT
256 #define tcf_exts_for_each_action(i, a, exts) \
257 	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
258 #else
259 #define tcf_exts_for_each_action(i, a, exts) \
260 	for (; 0; (void)(i), (void)(a), (void)(exts))
261 #endif
262 
263 static inline void
264 tcf_exts_stats_update(const struct tcf_exts *exts,
265 		      u64 bytes, u64 packets, u64 drops, u64 lastuse,
266 		      u8 used_hw_stats, bool used_hw_stats_valid)
267 {
268 #ifdef CONFIG_NET_CLS_ACT
269 	int i;
270 
271 	preempt_disable();
272 
273 	for (i = 0; i < exts->nr_actions; i++) {
274 		struct tc_action *a = exts->actions[i];
275 
276 		tcf_action_stats_update(a, bytes, packets, drops,
277 					lastuse, true);
278 		a->used_hw_stats = used_hw_stats;
279 		a->used_hw_stats_valid = used_hw_stats_valid;
280 	}
281 
282 	preempt_enable();
283 #endif
284 }
285 
286 /**
287  * tcf_exts_has_actions - check if at least one action is present
288  * @exts: tc filter extensions handle
289  *
290  * Returns true if at least one action is present.
291  */
292 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
293 {
294 #ifdef CONFIG_NET_CLS_ACT
295 	return exts->nr_actions;
296 #else
297 	return false;
298 #endif
299 }
300 
301 /**
302  * tcf_exts_exec - execute tc filter extensions
303  * @skb: socket buffer
304  * @exts: tc filter extensions handle
305  * @res: desired result
306  *
307  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
308  * a negative number if the filter must be considered unmatched or
309  * a positive action code (TC_ACT_*) which must be returned to the
310  * underlying layer.
311  */
312 static inline int
313 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
314 	      struct tcf_result *res)
315 {
316 #ifdef CONFIG_NET_CLS_ACT
317 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
318 #endif
319 	return TC_ACT_OK;
320 }
321 
322 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
323 		      struct nlattr **tb, struct nlattr *rate_tlv,
324 		      struct tcf_exts *exts, bool ovr, bool rtnl_held,
325 		      struct netlink_ext_ack *extack);
326 void tcf_exts_destroy(struct tcf_exts *exts);
327 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
328 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
329 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts);
330 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
331 
332 /**
333  * struct tcf_pkt_info - packet information
334  */
335 struct tcf_pkt_info {
336 	unsigned char *		ptr;
337 	int			nexthdr;
338 };
339 
340 #ifdef CONFIG_NET_EMATCH
341 
342 struct tcf_ematch_ops;
343 
344 /**
345  * struct tcf_ematch - extended match (ematch)
346  *
347  * @matchid: identifier to allow userspace to reidentify a match
348  * @flags: flags specifying attributes and the relation to other matches
349  * @ops: the operations lookup table of the corresponding ematch module
350  * @datalen: length of the ematch specific configuration data
351  * @data: ematch specific data
352  */
353 struct tcf_ematch {
354 	struct tcf_ematch_ops * ops;
355 	unsigned long		data;
356 	unsigned int		datalen;
357 	u16			matchid;
358 	u16			flags;
359 	struct net		*net;
360 };
361 
362 static inline int tcf_em_is_container(struct tcf_ematch *em)
363 {
364 	return !em->ops;
365 }
366 
367 static inline int tcf_em_is_simple(struct tcf_ematch *em)
368 {
369 	return em->flags & TCF_EM_SIMPLE;
370 }
371 
372 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
373 {
374 	return em->flags & TCF_EM_INVERT;
375 }
376 
377 static inline int tcf_em_last_match(struct tcf_ematch *em)
378 {
379 	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
380 }
381 
382 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
383 {
384 	if (tcf_em_last_match(em))
385 		return 1;
386 
387 	if (result == 0 && em->flags & TCF_EM_REL_AND)
388 		return 1;
389 
390 	if (result != 0 && em->flags & TCF_EM_REL_OR)
391 		return 1;
392 
393 	return 0;
394 }
395 
396 /**
397  * struct tcf_ematch_tree - ematch tree handle
398  *
399  * @hdr: ematch tree header supplied by userspace
400  * @matches: array of ematches
401  */
402 struct tcf_ematch_tree {
403 	struct tcf_ematch_tree_hdr hdr;
404 	struct tcf_ematch *	matches;
405 
406 };
407 
408 /**
409  * struct tcf_ematch_ops - ematch module operations
410  *
411  * @kind: identifier (kind) of this ematch module
412  * @datalen: length of expected configuration data (optional)
413  * @change: called during validation (optional)
414  * @match: called during ematch tree evaluation, must return 1/0
415  * @destroy: called during destroyage (optional)
416  * @dump: called during dumping process (optional)
417  * @owner: owner, must be set to THIS_MODULE
418  * @link: link to previous/next ematch module (internal use)
419  */
420 struct tcf_ematch_ops {
421 	int			kind;
422 	int			datalen;
423 	int			(*change)(struct net *net, void *,
424 					  int, struct tcf_ematch *);
425 	int			(*match)(struct sk_buff *, struct tcf_ematch *,
426 					 struct tcf_pkt_info *);
427 	void			(*destroy)(struct tcf_ematch *);
428 	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
429 	struct module		*owner;
430 	struct list_head	link;
431 };
432 
433 int tcf_em_register(struct tcf_ematch_ops *);
434 void tcf_em_unregister(struct tcf_ematch_ops *);
435 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
436 			 struct tcf_ematch_tree *);
437 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
438 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
439 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
440 			struct tcf_pkt_info *);
441 
442 /**
443  * tcf_em_tree_match - evaulate an ematch tree
444  *
445  * @skb: socket buffer of the packet in question
446  * @tree: ematch tree to be used for evaluation
447  * @info: packet information examined by classifier
448  *
449  * This function matches @skb against the ematch tree in @tree by going
450  * through all ematches respecting their logic relations returning
451  * as soon as the result is obvious.
452  *
453  * Returns 1 if the ematch tree as-one matches, no ematches are configured
454  * or ematch is not enabled in the kernel, otherwise 0 is returned.
455  */
456 static inline int tcf_em_tree_match(struct sk_buff *skb,
457 				    struct tcf_ematch_tree *tree,
458 				    struct tcf_pkt_info *info)
459 {
460 	if (tree->hdr.nmatches)
461 		return __tcf_em_tree_match(skb, tree, info);
462 	else
463 		return 1;
464 }
465 
466 #define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
467 
468 #else /* CONFIG_NET_EMATCH */
469 
470 struct tcf_ematch_tree {
471 };
472 
473 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
474 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
475 #define tcf_em_tree_dump(skb, t, tlv) (0)
476 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
477 
478 #endif /* CONFIG_NET_EMATCH */
479 
480 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
481 {
482 	switch (layer) {
483 		case TCF_LAYER_LINK:
484 			return skb_mac_header(skb);
485 		case TCF_LAYER_NETWORK:
486 			return skb_network_header(skb);
487 		case TCF_LAYER_TRANSPORT:
488 			return skb_transport_header(skb);
489 	}
490 
491 	return NULL;
492 }
493 
494 static inline int tcf_valid_offset(const struct sk_buff *skb,
495 				   const unsigned char *ptr, const int len)
496 {
497 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
498 		      ptr >= skb->head &&
499 		      (ptr <= (ptr + len)));
500 }
501 
502 static inline int
503 tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
504 		 struct netlink_ext_ack *extack)
505 {
506 	char indev[IFNAMSIZ];
507 	struct net_device *dev;
508 
509 	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
510 		NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
511 				    "Interface name too long");
512 		return -EINVAL;
513 	}
514 	dev = __dev_get_by_name(net, indev);
515 	if (!dev) {
516 		NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
517 				    "Network device not found");
518 		return -ENODEV;
519 	}
520 	return dev->ifindex;
521 }
522 
523 static inline bool
524 tcf_match_indev(struct sk_buff *skb, int ifindex)
525 {
526 	if (!ifindex)
527 		return true;
528 	if  (!skb->skb_iif)
529 		return false;
530 	return ifindex == skb->skb_iif;
531 }
532 
533 int tc_setup_flow_action(struct flow_action *flow_action,
534 			 const struct tcf_exts *exts);
535 void tc_cleanup_flow_action(struct flow_action *flow_action);
536 
537 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
538 		     void *type_data, bool err_stop, bool rtnl_held);
539 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
540 		    enum tc_setup_type type, void *type_data, bool err_stop,
541 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
542 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
543 			enum tc_setup_type type, void *type_data, bool err_stop,
544 			u32 *old_flags, unsigned int *old_in_hw_count,
545 			u32 *new_flags, unsigned int *new_in_hw_count,
546 			bool rtnl_held);
547 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
548 			enum tc_setup_type type, void *type_data, bool err_stop,
549 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
550 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
551 			  bool add, flow_setup_cb_t *cb,
552 			  enum tc_setup_type type, void *type_data,
553 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count);
554 unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
555 
556 struct tc_cls_u32_knode {
557 	struct tcf_exts *exts;
558 	struct tcf_result *res;
559 	struct tc_u32_sel *sel;
560 	u32 handle;
561 	u32 val;
562 	u32 mask;
563 	u32 link_handle;
564 	u8 fshift;
565 };
566 
567 struct tc_cls_u32_hnode {
568 	u32 handle;
569 	u32 prio;
570 	unsigned int divisor;
571 };
572 
573 enum tc_clsu32_command {
574 	TC_CLSU32_NEW_KNODE,
575 	TC_CLSU32_REPLACE_KNODE,
576 	TC_CLSU32_DELETE_KNODE,
577 	TC_CLSU32_NEW_HNODE,
578 	TC_CLSU32_REPLACE_HNODE,
579 	TC_CLSU32_DELETE_HNODE,
580 };
581 
582 struct tc_cls_u32_offload {
583 	struct flow_cls_common_offload common;
584 	/* knode values */
585 	enum tc_clsu32_command command;
586 	union {
587 		struct tc_cls_u32_knode knode;
588 		struct tc_cls_u32_hnode hnode;
589 	};
590 };
591 
592 static inline bool tc_can_offload(const struct net_device *dev)
593 {
594 	return dev->features & NETIF_F_HW_TC;
595 }
596 
597 static inline bool tc_can_offload_extack(const struct net_device *dev,
598 					 struct netlink_ext_ack *extack)
599 {
600 	bool can = tc_can_offload(dev);
601 
602 	if (!can)
603 		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
604 
605 	return can;
606 }
607 
608 static inline bool
609 tc_cls_can_offload_and_chain0(const struct net_device *dev,
610 			      struct flow_cls_common_offload *common)
611 {
612 	if (!tc_can_offload_extack(dev, common->extack))
613 		return false;
614 	if (common->chain_index) {
615 		NL_SET_ERR_MSG(common->extack,
616 			       "Driver supports only offload of chain 0");
617 		return false;
618 	}
619 	return true;
620 }
621 
622 static inline bool tc_skip_hw(u32 flags)
623 {
624 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
625 }
626 
627 static inline bool tc_skip_sw(u32 flags)
628 {
629 	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
630 }
631 
632 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
633 static inline bool tc_flags_valid(u32 flags)
634 {
635 	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
636 		      TCA_CLS_FLAGS_VERBOSE))
637 		return false;
638 
639 	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
640 	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
641 		return false;
642 
643 	return true;
644 }
645 
646 static inline bool tc_in_hw(u32 flags)
647 {
648 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
649 }
650 
651 static inline void
652 tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
653 			   const struct tcf_proto *tp, u32 flags,
654 			   struct netlink_ext_ack *extack)
655 {
656 	cls_common->chain_index = tp->chain->index;
657 	cls_common->protocol = tp->protocol;
658 	cls_common->prio = tp->prio >> 16;
659 	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
660 		cls_common->extack = extack;
661 }
662 
663 enum tc_matchall_command {
664 	TC_CLSMATCHALL_REPLACE,
665 	TC_CLSMATCHALL_DESTROY,
666 	TC_CLSMATCHALL_STATS,
667 };
668 
669 struct tc_cls_matchall_offload {
670 	struct flow_cls_common_offload common;
671 	enum tc_matchall_command command;
672 	struct flow_rule *rule;
673 	struct flow_stats stats;
674 	unsigned long cookie;
675 };
676 
677 enum tc_clsbpf_command {
678 	TC_CLSBPF_OFFLOAD,
679 	TC_CLSBPF_STATS,
680 };
681 
682 struct tc_cls_bpf_offload {
683 	struct flow_cls_common_offload common;
684 	enum tc_clsbpf_command command;
685 	struct tcf_exts *exts;
686 	struct bpf_prog *prog;
687 	struct bpf_prog *oldprog;
688 	const char *name;
689 	bool exts_integrated;
690 };
691 
692 struct tc_mqprio_qopt_offload {
693 	/* struct tc_mqprio_qopt must always be the first element */
694 	struct tc_mqprio_qopt qopt;
695 	u16 mode;
696 	u16 shaper;
697 	u32 flags;
698 	u64 min_rate[TC_QOPT_MAX_QUEUE];
699 	u64 max_rate[TC_QOPT_MAX_QUEUE];
700 };
701 
702 /* This structure holds cookie structure that is passed from user
703  * to the kernel for actions and classifiers
704  */
705 struct tc_cookie {
706 	u8  *data;
707 	u32 len;
708 	struct rcu_head rcu;
709 };
710 
711 struct tc_qopt_offload_stats {
712 	struct gnet_stats_basic_packed *bstats;
713 	struct gnet_stats_queue *qstats;
714 };
715 
716 enum tc_mq_command {
717 	TC_MQ_CREATE,
718 	TC_MQ_DESTROY,
719 	TC_MQ_STATS,
720 	TC_MQ_GRAFT,
721 };
722 
723 struct tc_mq_opt_offload_graft_params {
724 	unsigned long queue;
725 	u32 child_handle;
726 };
727 
728 struct tc_mq_qopt_offload {
729 	enum tc_mq_command command;
730 	u32 handle;
731 	union {
732 		struct tc_qopt_offload_stats stats;
733 		struct tc_mq_opt_offload_graft_params graft_params;
734 	};
735 };
736 
737 enum tc_red_command {
738 	TC_RED_REPLACE,
739 	TC_RED_DESTROY,
740 	TC_RED_STATS,
741 	TC_RED_XSTATS,
742 	TC_RED_GRAFT,
743 };
744 
745 struct tc_red_qopt_offload_params {
746 	u32 min;
747 	u32 max;
748 	u32 probability;
749 	u32 limit;
750 	bool is_ecn;
751 	bool is_harddrop;
752 	bool is_nodrop;
753 	struct gnet_stats_queue *qstats;
754 };
755 
756 struct tc_red_qopt_offload {
757 	enum tc_red_command command;
758 	u32 handle;
759 	u32 parent;
760 	union {
761 		struct tc_red_qopt_offload_params set;
762 		struct tc_qopt_offload_stats stats;
763 		struct red_stats *xstats;
764 		u32 child_handle;
765 	};
766 };
767 
768 enum tc_gred_command {
769 	TC_GRED_REPLACE,
770 	TC_GRED_DESTROY,
771 	TC_GRED_STATS,
772 };
773 
774 struct tc_gred_vq_qopt_offload_params {
775 	bool present;
776 	u32 limit;
777 	u32 prio;
778 	u32 min;
779 	u32 max;
780 	bool is_ecn;
781 	bool is_harddrop;
782 	u32 probability;
783 	/* Only need backlog, see struct tc_prio_qopt_offload_params */
784 	u32 *backlog;
785 };
786 
787 struct tc_gred_qopt_offload_params {
788 	bool grio_on;
789 	bool wred_on;
790 	unsigned int dp_cnt;
791 	unsigned int dp_def;
792 	struct gnet_stats_queue *qstats;
793 	struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
794 };
795 
796 struct tc_gred_qopt_offload_stats {
797 	struct gnet_stats_basic_packed bstats[MAX_DPs];
798 	struct gnet_stats_queue qstats[MAX_DPs];
799 	struct red_stats *xstats[MAX_DPs];
800 };
801 
802 struct tc_gred_qopt_offload {
803 	enum tc_gred_command command;
804 	u32 handle;
805 	u32 parent;
806 	union {
807 		struct tc_gred_qopt_offload_params set;
808 		struct tc_gred_qopt_offload_stats stats;
809 	};
810 };
811 
812 enum tc_prio_command {
813 	TC_PRIO_REPLACE,
814 	TC_PRIO_DESTROY,
815 	TC_PRIO_STATS,
816 	TC_PRIO_GRAFT,
817 };
818 
819 struct tc_prio_qopt_offload_params {
820 	int bands;
821 	u8 priomap[TC_PRIO_MAX + 1];
822 	/* At the point of un-offloading the Qdisc, the reported backlog and
823 	 * qlen need to be reduced by the portion that is in HW.
824 	 */
825 	struct gnet_stats_queue *qstats;
826 };
827 
828 struct tc_prio_qopt_offload_graft_params {
829 	u8 band;
830 	u32 child_handle;
831 };
832 
833 struct tc_prio_qopt_offload {
834 	enum tc_prio_command command;
835 	u32 handle;
836 	u32 parent;
837 	union {
838 		struct tc_prio_qopt_offload_params replace_params;
839 		struct tc_qopt_offload_stats stats;
840 		struct tc_prio_qopt_offload_graft_params graft_params;
841 	};
842 };
843 
844 enum tc_root_command {
845 	TC_ROOT_GRAFT,
846 };
847 
848 struct tc_root_qopt_offload {
849 	enum tc_root_command command;
850 	u32 handle;
851 	bool ingress;
852 };
853 
854 enum tc_ets_command {
855 	TC_ETS_REPLACE,
856 	TC_ETS_DESTROY,
857 	TC_ETS_STATS,
858 	TC_ETS_GRAFT,
859 };
860 
861 struct tc_ets_qopt_offload_replace_params {
862 	unsigned int bands;
863 	u8 priomap[TC_PRIO_MAX + 1];
864 	unsigned int quanta[TCQ_ETS_MAX_BANDS];	/* 0 for strict bands. */
865 	unsigned int weights[TCQ_ETS_MAX_BANDS];
866 	struct gnet_stats_queue *qstats;
867 };
868 
869 struct tc_ets_qopt_offload_graft_params {
870 	u8 band;
871 	u32 child_handle;
872 };
873 
874 struct tc_ets_qopt_offload {
875 	enum tc_ets_command command;
876 	u32 handle;
877 	u32 parent;
878 	union {
879 		struct tc_ets_qopt_offload_replace_params replace_params;
880 		struct tc_qopt_offload_stats stats;
881 		struct tc_ets_qopt_offload_graft_params graft_params;
882 	};
883 };
884 
885 enum tc_tbf_command {
886 	TC_TBF_REPLACE,
887 	TC_TBF_DESTROY,
888 	TC_TBF_STATS,
889 };
890 
891 struct tc_tbf_qopt_offload_replace_params {
892 	struct psched_ratecfg rate;
893 	u32 max_size;
894 	struct gnet_stats_queue *qstats;
895 };
896 
897 struct tc_tbf_qopt_offload {
898 	enum tc_tbf_command command;
899 	u32 handle;
900 	u32 parent;
901 	union {
902 		struct tc_tbf_qopt_offload_replace_params replace_params;
903 		struct tc_qopt_offload_stats stats;
904 	};
905 };
906 
907 enum tc_fifo_command {
908 	TC_FIFO_REPLACE,
909 	TC_FIFO_DESTROY,
910 	TC_FIFO_STATS,
911 };
912 
913 struct tc_fifo_qopt_offload {
914 	enum tc_fifo_command command;
915 	u32 handle;
916 	u32 parent;
917 	union {
918 		struct tc_qopt_offload_stats stats;
919 	};
920 };
921 
922 #endif
923