xref: /linux/include/net/pkt_cls.h (revision a5d9265e017f081f0dc133c0e2f45103d027b874)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
4 
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 #include <net/flow_offload.h>
10 
11 /* TC action not accessible from user space */
12 #define TC_ACT_REINSERT		(TC_ACT_VALUE_MAX + 1)
13 
14 /* Basic packet classifier frontend definitions. */
15 
16 struct tcf_walker {
17 	int	stop;
18 	int	skip;
19 	int	count;
20 	unsigned long cookie;
21 	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
22 };
23 
24 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
25 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
26 
27 enum tcf_block_binder_type {
28 	TCF_BLOCK_BINDER_TYPE_UNSPEC,
29 	TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
30 	TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
31 };
32 
33 struct tcf_block_ext_info {
34 	enum tcf_block_binder_type binder_type;
35 	tcf_chain_head_change_t *chain_head_change;
36 	void *chain_head_change_priv;
37 	u32 block_index;
38 };
39 
40 struct tcf_block_cb;
41 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
42 
43 #ifdef CONFIG_NET_CLS
44 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
45 				       u32 chain_index);
46 void tcf_chain_put_by_act(struct tcf_chain *chain);
47 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
48 				     struct tcf_chain *chain);
49 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
50 				     struct tcf_proto *tp, bool rtnl_held);
51 void tcf_block_netif_keep_dst(struct tcf_block *block);
52 int tcf_block_get(struct tcf_block **p_block,
53 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
54 		  struct netlink_ext_ack *extack);
55 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
56 		      struct tcf_block_ext_info *ei,
57 		      struct netlink_ext_ack *extack);
58 void tcf_block_put(struct tcf_block *block);
59 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
60 		       struct tcf_block_ext_info *ei);
61 
62 static inline bool tcf_block_shared(struct tcf_block *block)
63 {
64 	return block->index;
65 }
66 
67 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
68 {
69 	WARN_ON(tcf_block_shared(block));
70 	return block->q;
71 }
72 
73 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
74 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
75 					 tc_setup_cb_t *cb, void *cb_ident);
76 void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
77 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
78 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
79 					     tc_setup_cb_t *cb, void *cb_ident,
80 					     void *cb_priv,
81 					     struct netlink_ext_ack *extack);
82 int tcf_block_cb_register(struct tcf_block *block,
83 			  tc_setup_cb_t *cb, void *cb_ident,
84 			  void *cb_priv, struct netlink_ext_ack *extack);
85 void __tcf_block_cb_unregister(struct tcf_block *block,
86 			       struct tcf_block_cb *block_cb);
87 void tcf_block_cb_unregister(struct tcf_block *block,
88 			     tc_setup_cb_t *cb, void *cb_ident);
89 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
90 				tc_indr_block_bind_cb_t *cb, void *cb_ident);
91 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
92 			      tc_indr_block_bind_cb_t *cb, void *cb_ident);
93 void __tc_indr_block_cb_unregister(struct net_device *dev,
94 				   tc_indr_block_bind_cb_t *cb, void *cb_ident);
95 void tc_indr_block_cb_unregister(struct net_device *dev,
96 				 tc_indr_block_bind_cb_t *cb, void *cb_ident);
97 
98 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
99 		 struct tcf_result *res, bool compat_mode);
100 
101 #else
102 static inline
103 int tcf_block_get(struct tcf_block **p_block,
104 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
105 		  struct netlink_ext_ack *extack)
106 {
107 	return 0;
108 }
109 
110 static inline
111 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
112 		      struct tcf_block_ext_info *ei,
113 		      struct netlink_ext_ack *extack)
114 {
115 	return 0;
116 }
117 
118 static inline void tcf_block_put(struct tcf_block *block)
119 {
120 }
121 
122 static inline
123 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
124 		       struct tcf_block_ext_info *ei)
125 {
126 }
127 
128 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
129 {
130 	return NULL;
131 }
132 
133 static inline
134 int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
135 			       void *cb_priv)
136 {
137 	return 0;
138 }
139 
140 static inline
141 void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
142 				  void *cb_priv)
143 {
144 }
145 
146 static inline
147 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
148 {
149 	return NULL;
150 }
151 
152 static inline
153 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
154 					 tc_setup_cb_t *cb, void *cb_ident)
155 {
156 	return NULL;
157 }
158 
159 static inline
160 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
161 {
162 }
163 
164 static inline
165 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
166 {
167 	return 0;
168 }
169 
170 static inline
171 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
172 					     tc_setup_cb_t *cb, void *cb_ident,
173 					     void *cb_priv,
174 					     struct netlink_ext_ack *extack)
175 {
176 	return NULL;
177 }
178 
179 static inline
180 int tcf_block_cb_register(struct tcf_block *block,
181 			  tc_setup_cb_t *cb, void *cb_ident,
182 			  void *cb_priv, struct netlink_ext_ack *extack)
183 {
184 	return 0;
185 }
186 
187 static inline
188 void __tcf_block_cb_unregister(struct tcf_block *block,
189 			       struct tcf_block_cb *block_cb)
190 {
191 }
192 
193 static inline
194 void tcf_block_cb_unregister(struct tcf_block *block,
195 			     tc_setup_cb_t *cb, void *cb_ident)
196 {
197 }
198 
199 static inline
200 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
201 				tc_indr_block_bind_cb_t *cb, void *cb_ident)
202 {
203 	return 0;
204 }
205 
206 static inline
207 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
208 			      tc_indr_block_bind_cb_t *cb, void *cb_ident)
209 {
210 	return 0;
211 }
212 
213 static inline
214 void __tc_indr_block_cb_unregister(struct net_device *dev,
215 				   tc_indr_block_bind_cb_t *cb, void *cb_ident)
216 {
217 }
218 
219 static inline
220 void tc_indr_block_cb_unregister(struct net_device *dev,
221 				 tc_indr_block_bind_cb_t *cb, void *cb_ident)
222 {
223 }
224 
225 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
226 			       struct tcf_result *res, bool compat_mode)
227 {
228 	return TC_ACT_UNSPEC;
229 }
230 #endif
231 
232 static inline unsigned long
233 __cls_set_class(unsigned long *clp, unsigned long cl)
234 {
235 	return xchg(clp, cl);
236 }
237 
238 static inline unsigned long
239 cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
240 {
241 	unsigned long old_cl;
242 
243 	sch_tree_lock(q);
244 	old_cl = __cls_set_class(clp, cl);
245 	sch_tree_unlock(q);
246 	return old_cl;
247 }
248 
249 static inline void
250 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
251 {
252 	struct Qdisc *q = tp->chain->block->q;
253 	unsigned long cl;
254 
255 	/* Check q as it is not set for shared blocks. In that case,
256 	 * setting class is not supported.
257 	 */
258 	if (!q)
259 		return;
260 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
261 	cl = cls_set_class(q, &r->class, cl);
262 	if (cl)
263 		q->ops->cl_ops->unbind_tcf(q, cl);
264 }
265 
266 static inline void
267 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
268 {
269 	struct Qdisc *q = tp->chain->block->q;
270 	unsigned long cl;
271 
272 	if (!q)
273 		return;
274 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
275 		q->ops->cl_ops->unbind_tcf(q, cl);
276 }
277 
278 struct tcf_exts {
279 #ifdef CONFIG_NET_CLS_ACT
280 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
281 	int nr_actions;
282 	struct tc_action **actions;
283 	struct net *net;
284 #endif
285 	/* Map to export classifier specific extension TLV types to the
286 	 * generic extensions API. Unsupported extensions must be set to 0.
287 	 */
288 	int action;
289 	int police;
290 };
291 
292 static inline int tcf_exts_init(struct tcf_exts *exts, int action, int police)
293 {
294 #ifdef CONFIG_NET_CLS_ACT
295 	exts->type = 0;
296 	exts->nr_actions = 0;
297 	exts->net = NULL;
298 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
299 				GFP_KERNEL);
300 	if (!exts->actions)
301 		return -ENOMEM;
302 #endif
303 	exts->action = action;
304 	exts->police = police;
305 	return 0;
306 }
307 
308 /* Return false if the netns is being destroyed in cleanup_net(). Callers
309  * need to do cleanup synchronously in this case, otherwise may race with
310  * tc_action_net_exit(). Return true for other cases.
311  */
312 static inline bool tcf_exts_get_net(struct tcf_exts *exts)
313 {
314 #ifdef CONFIG_NET_CLS_ACT
315 	exts->net = maybe_get_net(exts->net);
316 	return exts->net != NULL;
317 #else
318 	return true;
319 #endif
320 }
321 
322 static inline void tcf_exts_put_net(struct tcf_exts *exts)
323 {
324 #ifdef CONFIG_NET_CLS_ACT
325 	if (exts->net)
326 		put_net(exts->net);
327 #endif
328 }
329 
330 #ifdef CONFIG_NET_CLS_ACT
331 #define tcf_exts_for_each_action(i, a, exts) \
332 	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
333 #else
334 #define tcf_exts_for_each_action(i, a, exts) \
335 	for (; 0; (void)(i), (void)(a), (void)(exts))
336 #endif
337 
338 static inline void
339 tcf_exts_stats_update(const struct tcf_exts *exts,
340 		      u64 bytes, u64 packets, u64 lastuse)
341 {
342 #ifdef CONFIG_NET_CLS_ACT
343 	int i;
344 
345 	preempt_disable();
346 
347 	for (i = 0; i < exts->nr_actions; i++) {
348 		struct tc_action *a = exts->actions[i];
349 
350 		tcf_action_stats_update(a, bytes, packets, lastuse, true);
351 	}
352 
353 	preempt_enable();
354 #endif
355 }
356 
357 /**
358  * tcf_exts_has_actions - check if at least one action is present
359  * @exts: tc filter extensions handle
360  *
361  * Returns true if at least one action is present.
362  */
363 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
364 {
365 #ifdef CONFIG_NET_CLS_ACT
366 	return exts->nr_actions;
367 #else
368 	return false;
369 #endif
370 }
371 
372 /**
373  * tcf_exts_has_one_action - check if exactly one action is present
374  * @exts: tc filter extensions handle
375  *
376  * Returns true if exactly one action is present.
377  */
378 static inline bool tcf_exts_has_one_action(struct tcf_exts *exts)
379 {
380 #ifdef CONFIG_NET_CLS_ACT
381 	return exts->nr_actions == 1;
382 #else
383 	return false;
384 #endif
385 }
386 
387 static inline struct tc_action *tcf_exts_first_action(struct tcf_exts *exts)
388 {
389 #ifdef CONFIG_NET_CLS_ACT
390 	return exts->actions[0];
391 #else
392 	return NULL;
393 #endif
394 }
395 
396 /**
397  * tcf_exts_exec - execute tc filter extensions
398  * @skb: socket buffer
399  * @exts: tc filter extensions handle
400  * @res: desired result
401  *
402  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
403  * a negative number if the filter must be considered unmatched or
404  * a positive action code (TC_ACT_*) which must be returned to the
405  * underlying layer.
406  */
407 static inline int
408 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
409 	      struct tcf_result *res)
410 {
411 #ifdef CONFIG_NET_CLS_ACT
412 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
413 #endif
414 	return TC_ACT_OK;
415 }
416 
417 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
418 		      struct nlattr **tb, struct nlattr *rate_tlv,
419 		      struct tcf_exts *exts, bool ovr, bool rtnl_held,
420 		      struct netlink_ext_ack *extack);
421 void tcf_exts_destroy(struct tcf_exts *exts);
422 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
423 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
424 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
425 
426 /**
427  * struct tcf_pkt_info - packet information
428  */
429 struct tcf_pkt_info {
430 	unsigned char *		ptr;
431 	int			nexthdr;
432 };
433 
434 #ifdef CONFIG_NET_EMATCH
435 
436 struct tcf_ematch_ops;
437 
438 /**
439  * struct tcf_ematch - extended match (ematch)
440  *
441  * @matchid: identifier to allow userspace to reidentify a match
442  * @flags: flags specifying attributes and the relation to other matches
443  * @ops: the operations lookup table of the corresponding ematch module
444  * @datalen: length of the ematch specific configuration data
445  * @data: ematch specific data
446  */
447 struct tcf_ematch {
448 	struct tcf_ematch_ops * ops;
449 	unsigned long		data;
450 	unsigned int		datalen;
451 	u16			matchid;
452 	u16			flags;
453 	struct net		*net;
454 };
455 
456 static inline int tcf_em_is_container(struct tcf_ematch *em)
457 {
458 	return !em->ops;
459 }
460 
461 static inline int tcf_em_is_simple(struct tcf_ematch *em)
462 {
463 	return em->flags & TCF_EM_SIMPLE;
464 }
465 
466 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
467 {
468 	return em->flags & TCF_EM_INVERT;
469 }
470 
471 static inline int tcf_em_last_match(struct tcf_ematch *em)
472 {
473 	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
474 }
475 
476 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
477 {
478 	if (tcf_em_last_match(em))
479 		return 1;
480 
481 	if (result == 0 && em->flags & TCF_EM_REL_AND)
482 		return 1;
483 
484 	if (result != 0 && em->flags & TCF_EM_REL_OR)
485 		return 1;
486 
487 	return 0;
488 }
489 
490 /**
491  * struct tcf_ematch_tree - ematch tree handle
492  *
493  * @hdr: ematch tree header supplied by userspace
494  * @matches: array of ematches
495  */
496 struct tcf_ematch_tree {
497 	struct tcf_ematch_tree_hdr hdr;
498 	struct tcf_ematch *	matches;
499 
500 };
501 
502 /**
503  * struct tcf_ematch_ops - ematch module operations
504  *
505  * @kind: identifier (kind) of this ematch module
506  * @datalen: length of expected configuration data (optional)
507  * @change: called during validation (optional)
508  * @match: called during ematch tree evaluation, must return 1/0
509  * @destroy: called during destroyage (optional)
510  * @dump: called during dumping process (optional)
511  * @owner: owner, must be set to THIS_MODULE
512  * @link: link to previous/next ematch module (internal use)
513  */
514 struct tcf_ematch_ops {
515 	int			kind;
516 	int			datalen;
517 	int			(*change)(struct net *net, void *,
518 					  int, struct tcf_ematch *);
519 	int			(*match)(struct sk_buff *, struct tcf_ematch *,
520 					 struct tcf_pkt_info *);
521 	void			(*destroy)(struct tcf_ematch *);
522 	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
523 	struct module		*owner;
524 	struct list_head	link;
525 };
526 
527 int tcf_em_register(struct tcf_ematch_ops *);
528 void tcf_em_unregister(struct tcf_ematch_ops *);
529 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
530 			 struct tcf_ematch_tree *);
531 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
532 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
533 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
534 			struct tcf_pkt_info *);
535 
536 /**
537  * tcf_em_tree_match - evaulate an ematch tree
538  *
539  * @skb: socket buffer of the packet in question
540  * @tree: ematch tree to be used for evaluation
541  * @info: packet information examined by classifier
542  *
543  * This function matches @skb against the ematch tree in @tree by going
544  * through all ematches respecting their logic relations returning
545  * as soon as the result is obvious.
546  *
547  * Returns 1 if the ematch tree as-one matches, no ematches are configured
548  * or ematch is not enabled in the kernel, otherwise 0 is returned.
549  */
550 static inline int tcf_em_tree_match(struct sk_buff *skb,
551 				    struct tcf_ematch_tree *tree,
552 				    struct tcf_pkt_info *info)
553 {
554 	if (tree->hdr.nmatches)
555 		return __tcf_em_tree_match(skb, tree, info);
556 	else
557 		return 1;
558 }
559 
560 #define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
561 
562 #else /* CONFIG_NET_EMATCH */
563 
564 struct tcf_ematch_tree {
565 };
566 
567 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
568 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
569 #define tcf_em_tree_dump(skb, t, tlv) (0)
570 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
571 
572 #endif /* CONFIG_NET_EMATCH */
573 
574 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
575 {
576 	switch (layer) {
577 		case TCF_LAYER_LINK:
578 			return skb_mac_header(skb);
579 		case TCF_LAYER_NETWORK:
580 			return skb_network_header(skb);
581 		case TCF_LAYER_TRANSPORT:
582 			return skb_transport_header(skb);
583 	}
584 
585 	return NULL;
586 }
587 
588 static inline int tcf_valid_offset(const struct sk_buff *skb,
589 				   const unsigned char *ptr, const int len)
590 {
591 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
592 		      ptr >= skb->head &&
593 		      (ptr <= (ptr + len)));
594 }
595 
596 #ifdef CONFIG_NET_CLS_IND
597 #include <net/net_namespace.h>
598 
599 static inline int
600 tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
601 		 struct netlink_ext_ack *extack)
602 {
603 	char indev[IFNAMSIZ];
604 	struct net_device *dev;
605 
606 	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
607 		NL_SET_ERR_MSG(extack, "Interface name too long");
608 		return -EINVAL;
609 	}
610 	dev = __dev_get_by_name(net, indev);
611 	if (!dev)
612 		return -ENODEV;
613 	return dev->ifindex;
614 }
615 
616 static inline bool
617 tcf_match_indev(struct sk_buff *skb, int ifindex)
618 {
619 	if (!ifindex)
620 		return true;
621 	if  (!skb->skb_iif)
622 		return false;
623 	return ifindex == skb->skb_iif;
624 }
625 #endif /* CONFIG_NET_CLS_IND */
626 
627 int tc_setup_flow_action(struct flow_action *flow_action,
628 			 const struct tcf_exts *exts);
629 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
630 		     void *type_data, bool err_stop);
631 unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
632 
633 enum tc_block_command {
634 	TC_BLOCK_BIND,
635 	TC_BLOCK_UNBIND,
636 };
637 
638 struct tc_block_offload {
639 	enum tc_block_command command;
640 	enum tcf_block_binder_type binder_type;
641 	struct tcf_block *block;
642 	struct netlink_ext_ack *extack;
643 };
644 
645 struct tc_cls_common_offload {
646 	u32 chain_index;
647 	__be16 protocol;
648 	u32 prio;
649 	struct netlink_ext_ack *extack;
650 };
651 
652 struct tc_cls_u32_knode {
653 	struct tcf_exts *exts;
654 	struct tcf_result *res;
655 	struct tc_u32_sel *sel;
656 	u32 handle;
657 	u32 val;
658 	u32 mask;
659 	u32 link_handle;
660 	u8 fshift;
661 };
662 
663 struct tc_cls_u32_hnode {
664 	u32 handle;
665 	u32 prio;
666 	unsigned int divisor;
667 };
668 
669 enum tc_clsu32_command {
670 	TC_CLSU32_NEW_KNODE,
671 	TC_CLSU32_REPLACE_KNODE,
672 	TC_CLSU32_DELETE_KNODE,
673 	TC_CLSU32_NEW_HNODE,
674 	TC_CLSU32_REPLACE_HNODE,
675 	TC_CLSU32_DELETE_HNODE,
676 };
677 
678 struct tc_cls_u32_offload {
679 	struct tc_cls_common_offload common;
680 	/* knode values */
681 	enum tc_clsu32_command command;
682 	union {
683 		struct tc_cls_u32_knode knode;
684 		struct tc_cls_u32_hnode hnode;
685 	};
686 };
687 
688 static inline bool tc_can_offload(const struct net_device *dev)
689 {
690 	return dev->features & NETIF_F_HW_TC;
691 }
692 
693 static inline bool tc_can_offload_extack(const struct net_device *dev,
694 					 struct netlink_ext_ack *extack)
695 {
696 	bool can = tc_can_offload(dev);
697 
698 	if (!can)
699 		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
700 
701 	return can;
702 }
703 
704 static inline bool
705 tc_cls_can_offload_and_chain0(const struct net_device *dev,
706 			      struct tc_cls_common_offload *common)
707 {
708 	if (!tc_can_offload_extack(dev, common->extack))
709 		return false;
710 	if (common->chain_index) {
711 		NL_SET_ERR_MSG(common->extack,
712 			       "Driver supports only offload of chain 0");
713 		return false;
714 	}
715 	return true;
716 }
717 
718 static inline bool tc_skip_hw(u32 flags)
719 {
720 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
721 }
722 
723 static inline bool tc_skip_sw(u32 flags)
724 {
725 	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
726 }
727 
728 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
729 static inline bool tc_flags_valid(u32 flags)
730 {
731 	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
732 		      TCA_CLS_FLAGS_VERBOSE))
733 		return false;
734 
735 	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
736 	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
737 		return false;
738 
739 	return true;
740 }
741 
742 static inline bool tc_in_hw(u32 flags)
743 {
744 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
745 }
746 
747 static inline void
748 tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
749 			   const struct tcf_proto *tp, u32 flags,
750 			   struct netlink_ext_ack *extack)
751 {
752 	cls_common->chain_index = tp->chain->index;
753 	cls_common->protocol = tp->protocol;
754 	cls_common->prio = tp->prio;
755 	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
756 		cls_common->extack = extack;
757 }
758 
759 enum tc_fl_command {
760 	TC_CLSFLOWER_REPLACE,
761 	TC_CLSFLOWER_DESTROY,
762 	TC_CLSFLOWER_STATS,
763 	TC_CLSFLOWER_TMPLT_CREATE,
764 	TC_CLSFLOWER_TMPLT_DESTROY,
765 };
766 
767 struct tc_cls_flower_offload {
768 	struct tc_cls_common_offload common;
769 	enum tc_fl_command command;
770 	unsigned long cookie;
771 	struct flow_rule *rule;
772 	struct flow_stats stats;
773 	u32 classid;
774 };
775 
776 static inline struct flow_rule *
777 tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
778 {
779 	return tc_flow_cmd->rule;
780 }
781 
782 enum tc_matchall_command {
783 	TC_CLSMATCHALL_REPLACE,
784 	TC_CLSMATCHALL_DESTROY,
785 };
786 
787 struct tc_cls_matchall_offload {
788 	struct tc_cls_common_offload common;
789 	enum tc_matchall_command command;
790 	struct tcf_exts *exts;
791 	unsigned long cookie;
792 };
793 
794 enum tc_clsbpf_command {
795 	TC_CLSBPF_OFFLOAD,
796 	TC_CLSBPF_STATS,
797 };
798 
799 struct tc_cls_bpf_offload {
800 	struct tc_cls_common_offload common;
801 	enum tc_clsbpf_command command;
802 	struct tcf_exts *exts;
803 	struct bpf_prog *prog;
804 	struct bpf_prog *oldprog;
805 	const char *name;
806 	bool exts_integrated;
807 };
808 
809 struct tc_mqprio_qopt_offload {
810 	/* struct tc_mqprio_qopt must always be the first element */
811 	struct tc_mqprio_qopt qopt;
812 	u16 mode;
813 	u16 shaper;
814 	u32 flags;
815 	u64 min_rate[TC_QOPT_MAX_QUEUE];
816 	u64 max_rate[TC_QOPT_MAX_QUEUE];
817 };
818 
819 /* This structure holds cookie structure that is passed from user
820  * to the kernel for actions and classifiers
821  */
822 struct tc_cookie {
823 	u8  *data;
824 	u32 len;
825 	struct rcu_head rcu;
826 };
827 
828 struct tc_qopt_offload_stats {
829 	struct gnet_stats_basic_packed *bstats;
830 	struct gnet_stats_queue *qstats;
831 };
832 
833 enum tc_mq_command {
834 	TC_MQ_CREATE,
835 	TC_MQ_DESTROY,
836 	TC_MQ_STATS,
837 	TC_MQ_GRAFT,
838 };
839 
840 struct tc_mq_opt_offload_graft_params {
841 	unsigned long queue;
842 	u32 child_handle;
843 };
844 
845 struct tc_mq_qopt_offload {
846 	enum tc_mq_command command;
847 	u32 handle;
848 	union {
849 		struct tc_qopt_offload_stats stats;
850 		struct tc_mq_opt_offload_graft_params graft_params;
851 	};
852 };
853 
854 enum tc_red_command {
855 	TC_RED_REPLACE,
856 	TC_RED_DESTROY,
857 	TC_RED_STATS,
858 	TC_RED_XSTATS,
859 	TC_RED_GRAFT,
860 };
861 
862 struct tc_red_qopt_offload_params {
863 	u32 min;
864 	u32 max;
865 	u32 probability;
866 	u32 limit;
867 	bool is_ecn;
868 	bool is_harddrop;
869 	struct gnet_stats_queue *qstats;
870 };
871 
872 struct tc_red_qopt_offload {
873 	enum tc_red_command command;
874 	u32 handle;
875 	u32 parent;
876 	union {
877 		struct tc_red_qopt_offload_params set;
878 		struct tc_qopt_offload_stats stats;
879 		struct red_stats *xstats;
880 		u32 child_handle;
881 	};
882 };
883 
884 enum tc_gred_command {
885 	TC_GRED_REPLACE,
886 	TC_GRED_DESTROY,
887 	TC_GRED_STATS,
888 };
889 
890 struct tc_gred_vq_qopt_offload_params {
891 	bool present;
892 	u32 limit;
893 	u32 prio;
894 	u32 min;
895 	u32 max;
896 	bool is_ecn;
897 	bool is_harddrop;
898 	u32 probability;
899 	/* Only need backlog, see struct tc_prio_qopt_offload_params */
900 	u32 *backlog;
901 };
902 
903 struct tc_gred_qopt_offload_params {
904 	bool grio_on;
905 	bool wred_on;
906 	unsigned int dp_cnt;
907 	unsigned int dp_def;
908 	struct gnet_stats_queue *qstats;
909 	struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
910 };
911 
912 struct tc_gred_qopt_offload_stats {
913 	struct gnet_stats_basic_packed bstats[MAX_DPs];
914 	struct gnet_stats_queue qstats[MAX_DPs];
915 	struct red_stats *xstats[MAX_DPs];
916 };
917 
918 struct tc_gred_qopt_offload {
919 	enum tc_gred_command command;
920 	u32 handle;
921 	u32 parent;
922 	union {
923 		struct tc_gred_qopt_offload_params set;
924 		struct tc_gred_qopt_offload_stats stats;
925 	};
926 };
927 
928 enum tc_prio_command {
929 	TC_PRIO_REPLACE,
930 	TC_PRIO_DESTROY,
931 	TC_PRIO_STATS,
932 	TC_PRIO_GRAFT,
933 };
934 
935 struct tc_prio_qopt_offload_params {
936 	int bands;
937 	u8 priomap[TC_PRIO_MAX + 1];
938 	/* In case that a prio qdisc is offloaded and now is changed to a
939 	 * non-offloadedable config, it needs to update the backlog & qlen
940 	 * values to negate the HW backlog & qlen values (and only them).
941 	 */
942 	struct gnet_stats_queue *qstats;
943 };
944 
945 struct tc_prio_qopt_offload_graft_params {
946 	u8 band;
947 	u32 child_handle;
948 };
949 
950 struct tc_prio_qopt_offload {
951 	enum tc_prio_command command;
952 	u32 handle;
953 	u32 parent;
954 	union {
955 		struct tc_prio_qopt_offload_params replace_params;
956 		struct tc_qopt_offload_stats stats;
957 		struct tc_prio_qopt_offload_graft_params graft_params;
958 	};
959 };
960 
961 enum tc_root_command {
962 	TC_ROOT_GRAFT,
963 };
964 
965 struct tc_root_qopt_offload {
966 	enum tc_root_command command;
967 	u32 handle;
968 	bool ingress;
969 };
970 
971 #endif
972