xref: /linux/include/net/pkt_cls.h (revision 6ebe6dbd6886af07b102aca42e44edbee94a22d9)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
4 
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 
10 /* Basic packet classifier frontend definitions. */
11 
12 struct tcf_walker {
13 	int	stop;
14 	int	skip;
15 	int	count;
16 	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
17 };
18 
19 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
20 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
21 
22 enum tcf_block_binder_type {
23 	TCF_BLOCK_BINDER_TYPE_UNSPEC,
24 	TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
25 	TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
26 };
27 
28 struct tcf_block_ext_info {
29 	enum tcf_block_binder_type binder_type;
30 	tcf_chain_head_change_t *chain_head_change;
31 	void *chain_head_change_priv;
32 };
33 
34 struct tcf_block_cb;
35 bool tcf_queue_work(struct work_struct *work);
36 
37 #ifdef CONFIG_NET_CLS
38 struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
39 				bool create);
40 void tcf_chain_put(struct tcf_chain *chain);
41 int tcf_block_get(struct tcf_block **p_block,
42 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
43 		  struct netlink_ext_ack *extack);
44 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
45 		      struct tcf_block_ext_info *ei,
46 		      struct netlink_ext_ack *extack);
47 void tcf_block_put(struct tcf_block *block);
48 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
49 		       struct tcf_block_ext_info *ei);
50 
51 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
52 {
53 	return block->q;
54 }
55 
56 static inline struct net_device *tcf_block_dev(struct tcf_block *block)
57 {
58 	return tcf_block_q(block)->dev_queue->dev;
59 }
60 
61 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
62 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
63 					 tc_setup_cb_t *cb, void *cb_ident);
64 void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
65 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
66 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
67 					     tc_setup_cb_t *cb, void *cb_ident,
68 					     void *cb_priv);
69 int tcf_block_cb_register(struct tcf_block *block,
70 			  tc_setup_cb_t *cb, void *cb_ident,
71 			  void *cb_priv);
72 void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb);
73 void tcf_block_cb_unregister(struct tcf_block *block,
74 			     tc_setup_cb_t *cb, void *cb_ident);
75 
76 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
77 		 struct tcf_result *res, bool compat_mode);
78 
79 #else
80 static inline
81 int tcf_block_get(struct tcf_block **p_block,
82 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
83 		  struct netlink_ext_ack *extack)
84 {
85 	return 0;
86 }
87 
88 static inline
89 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
90 		      struct tcf_block_ext_info *ei)
91 {
92 	return 0;
93 }
94 
95 static inline void tcf_block_put(struct tcf_block *block)
96 {
97 }
98 
99 static inline
100 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
101 		       struct tcf_block_ext_info *ei)
102 {
103 }
104 
105 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
106 {
107 	return NULL;
108 }
109 
110 static inline struct net_device *tcf_block_dev(struct tcf_block *block)
111 {
112 	return NULL;
113 }
114 
115 static inline
116 int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
117 			       void *cb_priv)
118 {
119 	return 0;
120 }
121 
122 static inline
123 void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
124 				  void *cb_priv)
125 {
126 }
127 
128 static inline
129 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
130 {
131 	return NULL;
132 }
133 
134 static inline
135 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
136 					 tc_setup_cb_t *cb, void *cb_ident)
137 {
138 	return NULL;
139 }
140 
141 static inline
142 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
143 {
144 }
145 
146 static inline
147 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
148 {
149 	return 0;
150 }
151 
152 static inline
153 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
154 					     tc_setup_cb_t *cb, void *cb_ident,
155 					     void *cb_priv)
156 {
157 	return NULL;
158 }
159 
160 static inline
161 int tcf_block_cb_register(struct tcf_block *block,
162 			  tc_setup_cb_t *cb, void *cb_ident,
163 			  void *cb_priv)
164 {
165 	return 0;
166 }
167 
168 static inline
169 void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
170 {
171 }
172 
173 static inline
174 void tcf_block_cb_unregister(struct tcf_block *block,
175 			     tc_setup_cb_t *cb, void *cb_ident)
176 {
177 }
178 
179 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
180 			       struct tcf_result *res, bool compat_mode)
181 {
182 	return TC_ACT_UNSPEC;
183 }
184 #endif
185 
186 static inline unsigned long
187 __cls_set_class(unsigned long *clp, unsigned long cl)
188 {
189 	return xchg(clp, cl);
190 }
191 
192 static inline unsigned long
193 cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
194 {
195 	unsigned long old_cl;
196 
197 	sch_tree_lock(q);
198 	old_cl = __cls_set_class(clp, cl);
199 	sch_tree_unlock(q);
200 	return old_cl;
201 }
202 
203 static inline void
204 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
205 {
206 	struct Qdisc *q = tp->chain->block->q;
207 	unsigned long cl;
208 
209 	/* Check q as it is not set for shared blocks. In that case,
210 	 * setting class is not supported.
211 	 */
212 	if (!q)
213 		return;
214 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
215 	cl = cls_set_class(q, &r->class, cl);
216 	if (cl)
217 		q->ops->cl_ops->unbind_tcf(q, cl);
218 }
219 
220 static inline void
221 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
222 {
223 	struct Qdisc *q = tp->chain->block->q;
224 	unsigned long cl;
225 
226 	if (!q)
227 		return;
228 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
229 		q->ops->cl_ops->unbind_tcf(q, cl);
230 }
231 
232 struct tcf_exts {
233 #ifdef CONFIG_NET_CLS_ACT
234 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
235 	int nr_actions;
236 	struct tc_action **actions;
237 	struct net *net;
238 #endif
239 	/* Map to export classifier specific extension TLV types to the
240 	 * generic extensions API. Unsupported extensions must be set to 0.
241 	 */
242 	int action;
243 	int police;
244 };
245 
246 static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
247 {
248 #ifdef CONFIG_NET_CLS_ACT
249 	exts->type = 0;
250 	exts->nr_actions = 0;
251 	exts->net = NULL;
252 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
253 				GFP_KERNEL);
254 	if (!exts->actions)
255 		return -ENOMEM;
256 #endif
257 	exts->action = action;
258 	exts->police = police;
259 	return 0;
260 }
261 
262 /* Return false if the netns is being destroyed in cleanup_net(). Callers
263  * need to do cleanup synchronously in this case, otherwise may race with
264  * tc_action_net_exit(). Return true for other cases.
265  */
266 static inline bool tcf_exts_get_net(struct tcf_exts *exts)
267 {
268 #ifdef CONFIG_NET_CLS_ACT
269 	exts->net = maybe_get_net(exts->net);
270 	return exts->net != NULL;
271 #else
272 	return true;
273 #endif
274 }
275 
276 static inline void tcf_exts_put_net(struct tcf_exts *exts)
277 {
278 #ifdef CONFIG_NET_CLS_ACT
279 	if (exts->net)
280 		put_net(exts->net);
281 #endif
282 }
283 
284 static inline void tcf_exts_to_list(const struct tcf_exts *exts,
285 				    struct list_head *actions)
286 {
287 #ifdef CONFIG_NET_CLS_ACT
288 	int i;
289 
290 	for (i = 0; i < exts->nr_actions; i++) {
291 		struct tc_action *a = exts->actions[i];
292 
293 		list_add_tail(&a->list, actions);
294 	}
295 #endif
296 }
297 
298 static inline void
299 tcf_exts_stats_update(const struct tcf_exts *exts,
300 		      u64 bytes, u64 packets, u64 lastuse)
301 {
302 #ifdef CONFIG_NET_CLS_ACT
303 	int i;
304 
305 	preempt_disable();
306 
307 	for (i = 0; i < exts->nr_actions; i++) {
308 		struct tc_action *a = exts->actions[i];
309 
310 		tcf_action_stats_update(a, bytes, packets, lastuse);
311 	}
312 
313 	preempt_enable();
314 #endif
315 }
316 
317 /**
318  * tcf_exts_has_actions - check if at least one action is present
319  * @exts: tc filter extensions handle
320  *
321  * Returns true if at least one action is present.
322  */
323 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
324 {
325 #ifdef CONFIG_NET_CLS_ACT
326 	return exts->nr_actions;
327 #else
328 	return false;
329 #endif
330 }
331 
332 /**
333  * tcf_exts_has_one_action - check if exactly one action is present
334  * @exts: tc filter extensions handle
335  *
336  * Returns true if exactly one action is present.
337  */
338 static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
339 {
340 #ifdef CONFIG_NET_CLS_ACT
341 	return exts->nr_actions == 1;
342 #else
343 	return false;
344 #endif
345 }
346 
347 /**
348  * tcf_exts_exec - execute tc filter extensions
349  * @skb: socket buffer
350  * @exts: tc filter extensions handle
351  * @res: desired result
352  *
353  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
354  * a negative number if the filter must be considered unmatched or
355  * a positive action code (TC_ACT_*) which must be returned to the
356  * underlying layer.
357  */
358 static inline int
359 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
360 	      struct tcf_result *res)
361 {
362 #ifdef CONFIG_NET_CLS_ACT
363 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
364 #endif
365 	return TC_ACT_OK;
366 }
367 
368 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
369 		      struct nlattr **tb, struct nlattr *rate_tlv,
370 		      struct tcf_exts *exts, bool ovr);
371 void tcf_exts_destroy(struct tcf_exts *exts);
372 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
373 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
374 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
375 
376 /**
377  * struct tcf_pkt_info - packet information
378  */
379 struct tcf_pkt_info {
380 	unsigned char *		ptr;
381 	int			nexthdr;
382 };
383 
384 #ifdef CONFIG_NET_EMATCH
385 
386 struct tcf_ematch_ops;
387 
388 /**
389  * struct tcf_ematch - extended match (ematch)
390  *
391  * @matchid: identifier to allow userspace to reidentify a match
392  * @flags: flags specifying attributes and the relation to other matches
393  * @ops: the operations lookup table of the corresponding ematch module
394  * @datalen: length of the ematch specific configuration data
395  * @data: ematch specific data
396  */
397 struct tcf_ematch {
398 	struct tcf_ematch_ops * ops;
399 	unsigned long		data;
400 	unsigned int		datalen;
401 	u16			matchid;
402 	u16			flags;
403 	struct net		*net;
404 };
405 
406 static inline int tcf_em_is_container(struct tcf_ematch *em)
407 {
408 	return !em->ops;
409 }
410 
411 static inline int tcf_em_is_simple(struct tcf_ematch *em)
412 {
413 	return em->flags & TCF_EM_SIMPLE;
414 }
415 
416 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
417 {
418 	return em->flags & TCF_EM_INVERT;
419 }
420 
421 static inline int tcf_em_last_match(struct tcf_ematch *em)
422 {
423 	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
424 }
425 
426 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
427 {
428 	if (tcf_em_last_match(em))
429 		return 1;
430 
431 	if (result == 0 && em->flags & TCF_EM_REL_AND)
432 		return 1;
433 
434 	if (result != 0 && em->flags & TCF_EM_REL_OR)
435 		return 1;
436 
437 	return 0;
438 }
439 
440 /**
441  * struct tcf_ematch_tree - ematch tree handle
442  *
443  * @hdr: ematch tree header supplied by userspace
444  * @matches: array of ematches
445  */
446 struct tcf_ematch_tree {
447 	struct tcf_ematch_tree_hdr hdr;
448 	struct tcf_ematch *	matches;
449 
450 };
451 
452 /**
453  * struct tcf_ematch_ops - ematch module operations
454  *
455  * @kind: identifier (kind) of this ematch module
456  * @datalen: length of expected configuration data (optional)
457  * @change: called during validation (optional)
458  * @match: called during ematch tree evaluation, must return 1/0
459  * @destroy: called during destroyage (optional)
460  * @dump: called during dumping process (optional)
461  * @owner: owner, must be set to THIS_MODULE
462  * @link: link to previous/next ematch module (internal use)
463  */
464 struct tcf_ematch_ops {
465 	int			kind;
466 	int			datalen;
467 	int			(*change)(struct net *net, void *,
468 					  int, struct tcf_ematch *);
469 	int			(*match)(struct sk_buff *, struct tcf_ematch *,
470 					 struct tcf_pkt_info *);
471 	void			(*destroy)(struct tcf_ematch *);
472 	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
473 	struct module		*owner;
474 	struct list_head	link;
475 };
476 
477 int tcf_em_register(struct tcf_ematch_ops *);
478 void tcf_em_unregister(struct tcf_ematch_ops *);
479 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
480 			 struct tcf_ematch_tree *);
481 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
482 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
483 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
484 			struct tcf_pkt_info *);
485 
486 /**
487  * tcf_em_tree_match - evaulate an ematch tree
488  *
489  * @skb: socket buffer of the packet in question
490  * @tree: ematch tree to be used for evaluation
491  * @info: packet information examined by classifier
492  *
493  * This function matches @skb against the ematch tree in @tree by going
494  * through all ematches respecting their logic relations returning
495  * as soon as the result is obvious.
496  *
497  * Returns 1 if the ematch tree as-one matches, no ematches are configured
498  * or ematch is not enabled in the kernel, otherwise 0 is returned.
499  */
500 static inline int tcf_em_tree_match(struct sk_buff *skb,
501 				    struct tcf_ematch_tree *tree,
502 				    struct tcf_pkt_info *info)
503 {
504 	if (tree->hdr.nmatches)
505 		return __tcf_em_tree_match(skb, tree, info);
506 	else
507 		return 1;
508 }
509 
510 #define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
511 
512 #else /* CONFIG_NET_EMATCH */
513 
514 struct tcf_ematch_tree {
515 };
516 
517 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
518 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
519 #define tcf_em_tree_dump(skb, t, tlv) (0)
520 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
521 
522 #endif /* CONFIG_NET_EMATCH */
523 
524 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
525 {
526 	switch (layer) {
527 		case TCF_LAYER_LINK:
528 			return skb->data;
529 		case TCF_LAYER_NETWORK:
530 			return skb_network_header(skb);
531 		case TCF_LAYER_TRANSPORT:
532 			return skb_transport_header(skb);
533 	}
534 
535 	return NULL;
536 }
537 
538 static inline int tcf_valid_offset(const struct sk_buff *skb,
539 				   const unsigned char *ptr, const int len)
540 {
541 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
542 		      ptr >= skb->head &&
543 		      (ptr <= (ptr + len)));
544 }
545 
546 #ifdef CONFIG_NET_CLS_IND
547 #include <net/net_namespace.h>
548 
549 static inline int
550 tcf_change_indev(struct net *net, struct nlattr *indev_tlv)
551 {
552 	char indev[IFNAMSIZ];
553 	struct net_device *dev;
554 
555 	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ)
556 		return -EINVAL;
557 	dev = __dev_get_by_name(net, indev);
558 	if (!dev)
559 		return -ENODEV;
560 	return dev->ifindex;
561 }
562 
563 static inline bool
564 tcf_match_indev(struct sk_buff *skb, int ifindex)
565 {
566 	if (!ifindex)
567 		return true;
568 	if  (!skb->skb_iif)
569 		return false;
570 	return ifindex == skb->skb_iif;
571 }
572 #endif /* CONFIG_NET_CLS_IND */
573 
574 int tc_setup_cb_call(struct tcf_block *block, struct tcf_exts *exts,
575 		     enum tc_setup_type type, void *type_data, bool err_stop);
576 
577 enum tc_block_command {
578 	TC_BLOCK_BIND,
579 	TC_BLOCK_UNBIND,
580 };
581 
582 struct tc_block_offload {
583 	enum tc_block_command command;
584 	enum tcf_block_binder_type binder_type;
585 	struct tcf_block *block;
586 };
587 
588 struct tc_cls_common_offload {
589 	u32 chain_index;
590 	__be16 protocol;
591 	u32 prio;
592 };
593 
594 static inline void
595 tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
596 			   const struct tcf_proto *tp)
597 {
598 	cls_common->chain_index = tp->chain->index;
599 	cls_common->protocol = tp->protocol;
600 	cls_common->prio = tp->prio;
601 }
602 
603 struct tc_cls_u32_knode {
604 	struct tcf_exts *exts;
605 	struct tc_u32_sel *sel;
606 	u32 handle;
607 	u32 val;
608 	u32 mask;
609 	u32 link_handle;
610 	u8 fshift;
611 };
612 
613 struct tc_cls_u32_hnode {
614 	u32 handle;
615 	u32 prio;
616 	unsigned int divisor;
617 };
618 
619 enum tc_clsu32_command {
620 	TC_CLSU32_NEW_KNODE,
621 	TC_CLSU32_REPLACE_KNODE,
622 	TC_CLSU32_DELETE_KNODE,
623 	TC_CLSU32_NEW_HNODE,
624 	TC_CLSU32_REPLACE_HNODE,
625 	TC_CLSU32_DELETE_HNODE,
626 };
627 
628 struct tc_cls_u32_offload {
629 	struct tc_cls_common_offload common;
630 	/* knode values */
631 	enum tc_clsu32_command command;
632 	union {
633 		struct tc_cls_u32_knode knode;
634 		struct tc_cls_u32_hnode hnode;
635 	};
636 };
637 
638 static inline bool tc_can_offload(const struct net_device *dev)
639 {
640 	return dev->features & NETIF_F_HW_TC;
641 }
642 
643 static inline bool tc_skip_hw(u32 flags)
644 {
645 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
646 }
647 
648 static inline bool tc_skip_sw(u32 flags)
649 {
650 	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
651 }
652 
653 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
654 static inline bool tc_flags_valid(u32 flags)
655 {
656 	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW))
657 		return false;
658 
659 	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
660 		return false;
661 
662 	return true;
663 }
664 
665 static inline bool tc_in_hw(u32 flags)
666 {
667 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
668 }
669 
670 enum tc_fl_command {
671 	TC_CLSFLOWER_REPLACE,
672 	TC_CLSFLOWER_DESTROY,
673 	TC_CLSFLOWER_STATS,
674 };
675 
676 struct tc_cls_flower_offload {
677 	struct tc_cls_common_offload common;
678 	enum tc_fl_command command;
679 	unsigned long cookie;
680 	struct flow_dissector *dissector;
681 	struct fl_flow_key *mask;
682 	struct fl_flow_key *key;
683 	struct tcf_exts *exts;
684 	u32 classid;
685 };
686 
687 enum tc_matchall_command {
688 	TC_CLSMATCHALL_REPLACE,
689 	TC_CLSMATCHALL_DESTROY,
690 };
691 
692 struct tc_cls_matchall_offload {
693 	struct tc_cls_common_offload common;
694 	enum tc_matchall_command command;
695 	struct tcf_exts *exts;
696 	unsigned long cookie;
697 };
698 
699 enum tc_clsbpf_command {
700 	TC_CLSBPF_OFFLOAD,
701 	TC_CLSBPF_STATS,
702 };
703 
704 struct tc_cls_bpf_offload {
705 	struct tc_cls_common_offload common;
706 	enum tc_clsbpf_command command;
707 	struct tcf_exts *exts;
708 	struct bpf_prog *prog;
709 	struct bpf_prog *oldprog;
710 	const char *name;
711 	bool exts_integrated;
712 	u32 gen_flags;
713 };
714 
715 struct tc_mqprio_qopt_offload {
716 	/* struct tc_mqprio_qopt must always be the first element */
717 	struct tc_mqprio_qopt qopt;
718 	u16 mode;
719 	u16 shaper;
720 	u32 flags;
721 	u64 min_rate[TC_QOPT_MAX_QUEUE];
722 	u64 max_rate[TC_QOPT_MAX_QUEUE];
723 };
724 
725 /* This structure holds cookie structure that is passed from user
726  * to the kernel for actions and classifiers
727  */
728 struct tc_cookie {
729 	u8  *data;
730 	u32 len;
731 };
732 
733 enum tc_red_command {
734 	TC_RED_REPLACE,
735 	TC_RED_DESTROY,
736 	TC_RED_STATS,
737 	TC_RED_XSTATS,
738 };
739 
740 struct tc_red_qopt_offload_params {
741 	u32 min;
742 	u32 max;
743 	u32 probability;
744 	bool is_ecn;
745 };
746 struct tc_red_qopt_offload_stats {
747 	struct gnet_stats_basic_packed *bstats;
748 	struct gnet_stats_queue *qstats;
749 };
750 
751 struct tc_red_qopt_offload {
752 	enum tc_red_command command;
753 	u32 handle;
754 	u32 parent;
755 	union {
756 		struct tc_red_qopt_offload_params set;
757 		struct tc_red_qopt_offload_stats stats;
758 		struct red_stats *xstats;
759 	};
760 };
761 
762 #endif
763