xref: /linux/include/net/pkt_cls.h (revision 4e95bc268b915c3a19ec8b9110f61e4ea41a1ed0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
4 
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 #include <net/flow_offload.h>
10 #include <net/net_namespace.h>
11 
12 /* TC action not accessible from user space */
13 #define TC_ACT_CONSUMED		(TC_ACT_VALUE_MAX + 1)
14 
15 /* Basic packet classifier frontend definitions. */
16 
17 struct tcf_walker {
18 	int	stop;
19 	int	skip;
20 	int	count;
21 	bool	nonempty;
22 	unsigned long cookie;
23 	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
24 };
25 
26 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
27 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
28 
29 #define tc_block_offload flow_block_offload
30 #define tc_block_command flow_block_command
31 #define tcf_block_binder_type flow_block_binder_type
32 
33 struct tcf_block_ext_info {
34 	enum tcf_block_binder_type binder_type;
35 	tcf_chain_head_change_t *chain_head_change;
36 	void *chain_head_change_priv;
37 	u32 block_index;
38 };
39 
40 struct tcf_block_cb;
41 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
42 
43 #ifdef CONFIG_NET_CLS
44 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
45 				       u32 chain_index);
46 void tcf_chain_put_by_act(struct tcf_chain *chain);
47 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
48 				     struct tcf_chain *chain);
49 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
50 				     struct tcf_proto *tp, bool rtnl_held);
51 void tcf_block_netif_keep_dst(struct tcf_block *block);
52 int tcf_block_get(struct tcf_block **p_block,
53 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
54 		  struct netlink_ext_ack *extack);
55 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
56 		      struct tcf_block_ext_info *ei,
57 		      struct netlink_ext_ack *extack);
58 void tcf_block_put(struct tcf_block *block);
59 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
60 		       struct tcf_block_ext_info *ei);
61 
62 static inline bool tcf_block_shared(struct tcf_block *block)
63 {
64 	return block->index;
65 }
66 
67 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
68 {
69 	WARN_ON(tcf_block_shared(block));
70 	return block->q;
71 }
72 
73 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb);
74 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
75 					 tc_setup_cb_t *cb, void *cb_ident);
76 void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
77 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
78 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
79 					     tc_setup_cb_t *cb, void *cb_ident,
80 					     void *cb_priv,
81 					     struct netlink_ext_ack *extack);
82 int tcf_block_cb_register(struct tcf_block *block,
83 			  tc_setup_cb_t *cb, void *cb_ident,
84 			  void *cb_priv, struct netlink_ext_ack *extack);
85 void __tcf_block_cb_unregister(struct tcf_block *block,
86 			       struct tcf_block_cb *block_cb);
87 void tcf_block_cb_unregister(struct tcf_block *block,
88 			     tc_setup_cb_t *cb, void *cb_ident);
89 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
90 				tc_indr_block_bind_cb_t *cb, void *cb_ident);
91 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
92 			      tc_indr_block_bind_cb_t *cb, void *cb_ident);
93 void __tc_indr_block_cb_unregister(struct net_device *dev,
94 				   tc_indr_block_bind_cb_t *cb, void *cb_ident);
95 void tc_indr_block_cb_unregister(struct net_device *dev,
96 				 tc_indr_block_bind_cb_t *cb, void *cb_ident);
97 
98 int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
99 		 struct tcf_result *res, bool compat_mode);
100 
101 #else
102 static inline bool tcf_block_shared(struct tcf_block *block)
103 {
104 	return false;
105 }
106 
107 static inline
108 int tcf_block_get(struct tcf_block **p_block,
109 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
110 		  struct netlink_ext_ack *extack)
111 {
112 	return 0;
113 }
114 
115 static inline
116 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
117 		      struct tcf_block_ext_info *ei,
118 		      struct netlink_ext_ack *extack)
119 {
120 	return 0;
121 }
122 
123 static inline void tcf_block_put(struct tcf_block *block)
124 {
125 }
126 
127 static inline
128 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
129 		       struct tcf_block_ext_info *ei)
130 {
131 }
132 
133 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
134 {
135 	return NULL;
136 }
137 
138 static inline
139 int tc_setup_cb_block_register(struct tcf_block *block, tc_setup_cb_t *cb,
140 			       void *cb_priv)
141 {
142 	return 0;
143 }
144 
145 static inline
146 void tc_setup_cb_block_unregister(struct tcf_block *block, tc_setup_cb_t *cb,
147 				  void *cb_priv)
148 {
149 }
150 
151 static inline
152 void *tcf_block_cb_priv(struct tcf_block_cb *block_cb)
153 {
154 	return NULL;
155 }
156 
157 static inline
158 struct tcf_block_cb *tcf_block_cb_lookup(struct tcf_block *block,
159 					 tc_setup_cb_t *cb, void *cb_ident)
160 {
161 	return NULL;
162 }
163 
164 static inline
165 void tcf_block_cb_incref(struct tcf_block_cb *block_cb)
166 {
167 }
168 
169 static inline
170 unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
171 {
172 	return 0;
173 }
174 
175 static inline
176 struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
177 					     tc_setup_cb_t *cb, void *cb_ident,
178 					     void *cb_priv,
179 					     struct netlink_ext_ack *extack)
180 {
181 	return NULL;
182 }
183 
184 static inline
185 int tcf_block_cb_register(struct tcf_block *block,
186 			  tc_setup_cb_t *cb, void *cb_ident,
187 			  void *cb_priv, struct netlink_ext_ack *extack)
188 {
189 	return 0;
190 }
191 
192 static inline
193 void __tcf_block_cb_unregister(struct tcf_block *block,
194 			       struct tcf_block_cb *block_cb)
195 {
196 }
197 
198 static inline
199 void tcf_block_cb_unregister(struct tcf_block *block,
200 			     tc_setup_cb_t *cb, void *cb_ident)
201 {
202 }
203 
204 static inline
205 int __tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
206 				tc_indr_block_bind_cb_t *cb, void *cb_ident)
207 {
208 	return 0;
209 }
210 
211 static inline
212 int tc_indr_block_cb_register(struct net_device *dev, void *cb_priv,
213 			      tc_indr_block_bind_cb_t *cb, void *cb_ident)
214 {
215 	return 0;
216 }
217 
218 static inline
219 void __tc_indr_block_cb_unregister(struct net_device *dev,
220 				   tc_indr_block_bind_cb_t *cb, void *cb_ident)
221 {
222 }
223 
224 static inline
225 void tc_indr_block_cb_unregister(struct net_device *dev,
226 				 tc_indr_block_bind_cb_t *cb, void *cb_ident)
227 {
228 }
229 
230 static inline int tcf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
231 			       struct tcf_result *res, bool compat_mode)
232 {
233 	return TC_ACT_UNSPEC;
234 }
235 #endif
236 
237 static inline unsigned long
238 __cls_set_class(unsigned long *clp, unsigned long cl)
239 {
240 	return xchg(clp, cl);
241 }
242 
243 static inline unsigned long
244 cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
245 {
246 	unsigned long old_cl;
247 
248 	sch_tree_lock(q);
249 	old_cl = __cls_set_class(clp, cl);
250 	sch_tree_unlock(q);
251 	return old_cl;
252 }
253 
254 static inline void
255 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
256 {
257 	struct Qdisc *q = tp->chain->block->q;
258 	unsigned long cl;
259 
260 	/* Check q as it is not set for shared blocks. In that case,
261 	 * setting class is not supported.
262 	 */
263 	if (!q)
264 		return;
265 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
266 	cl = cls_set_class(q, &r->class, cl);
267 	if (cl)
268 		q->ops->cl_ops->unbind_tcf(q, cl);
269 }
270 
271 static inline void
272 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
273 {
274 	struct Qdisc *q = tp->chain->block->q;
275 	unsigned long cl;
276 
277 	if (!q)
278 		return;
279 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
280 		q->ops->cl_ops->unbind_tcf(q, cl);
281 }
282 
283 struct tcf_exts {
284 #ifdef CONFIG_NET_CLS_ACT
285 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
286 	int nr_actions;
287 	struct tc_action **actions;
288 	struct net *net;
289 #endif
290 	/* Map to export classifier specific extension TLV types to the
291 	 * generic extensions API. Unsupported extensions must be set to 0.
292 	 */
293 	int action;
294 	int police;
295 };
296 
297 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
298 				int action, int police)
299 {
300 #ifdef CONFIG_NET_CLS_ACT
301 	exts->type = 0;
302 	exts->nr_actions = 0;
303 	exts->net = net;
304 	exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
305 				GFP_KERNEL);
306 	if (!exts->actions)
307 		return -ENOMEM;
308 #endif
309 	exts->action = action;
310 	exts->police = police;
311 	return 0;
312 }
313 
314 /* Return false if the netns is being destroyed in cleanup_net(). Callers
315  * need to do cleanup synchronously in this case, otherwise may race with
316  * tc_action_net_exit(). Return true for other cases.
317  */
318 static inline bool tcf_exts_get_net(struct tcf_exts *exts)
319 {
320 #ifdef CONFIG_NET_CLS_ACT
321 	exts->net = maybe_get_net(exts->net);
322 	return exts->net != NULL;
323 #else
324 	return true;
325 #endif
326 }
327 
328 static inline void tcf_exts_put_net(struct tcf_exts *exts)
329 {
330 #ifdef CONFIG_NET_CLS_ACT
331 	if (exts->net)
332 		put_net(exts->net);
333 #endif
334 }
335 
336 #ifdef CONFIG_NET_CLS_ACT
337 #define tcf_exts_for_each_action(i, a, exts) \
338 	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
339 #else
340 #define tcf_exts_for_each_action(i, a, exts) \
341 	for (; 0; (void)(i), (void)(a), (void)(exts))
342 #endif
343 
344 static inline void
345 tcf_exts_stats_update(const struct tcf_exts *exts,
346 		      u64 bytes, u64 packets, u64 lastuse)
347 {
348 #ifdef CONFIG_NET_CLS_ACT
349 	int i;
350 
351 	preempt_disable();
352 
353 	for (i = 0; i < exts->nr_actions; i++) {
354 		struct tc_action *a = exts->actions[i];
355 
356 		tcf_action_stats_update(a, bytes, packets, lastuse, true);
357 	}
358 
359 	preempt_enable();
360 #endif
361 }
362 
363 /**
364  * tcf_exts_has_actions - check if at least one action is present
365  * @exts: tc filter extensions handle
366  *
367  * Returns true if at least one action is present.
368  */
369 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
370 {
371 #ifdef CONFIG_NET_CLS_ACT
372 	return exts->nr_actions;
373 #else
374 	return false;
375 #endif
376 }
377 
378 /**
379  * tcf_exts_exec - execute tc filter extensions
380  * @skb: socket buffer
381  * @exts: tc filter extensions handle
382  * @res: desired result
383  *
384  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
385  * a negative number if the filter must be considered unmatched or
386  * a positive action code (TC_ACT_*) which must be returned to the
387  * underlying layer.
388  */
389 static inline int
390 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
391 	      struct tcf_result *res)
392 {
393 #ifdef CONFIG_NET_CLS_ACT
394 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
395 #endif
396 	return TC_ACT_OK;
397 }
398 
399 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
400 		      struct nlattr **tb, struct nlattr *rate_tlv,
401 		      struct tcf_exts *exts, bool ovr, bool rtnl_held,
402 		      struct netlink_ext_ack *extack);
403 void tcf_exts_destroy(struct tcf_exts *exts);
404 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
405 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
406 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
407 
408 /**
409  * struct tcf_pkt_info - packet information
410  */
411 struct tcf_pkt_info {
412 	unsigned char *		ptr;
413 	int			nexthdr;
414 };
415 
416 #ifdef CONFIG_NET_EMATCH
417 
418 struct tcf_ematch_ops;
419 
420 /**
421  * struct tcf_ematch - extended match (ematch)
422  *
423  * @matchid: identifier to allow userspace to reidentify a match
424  * @flags: flags specifying attributes and the relation to other matches
425  * @ops: the operations lookup table of the corresponding ematch module
426  * @datalen: length of the ematch specific configuration data
427  * @data: ematch specific data
428  */
429 struct tcf_ematch {
430 	struct tcf_ematch_ops * ops;
431 	unsigned long		data;
432 	unsigned int		datalen;
433 	u16			matchid;
434 	u16			flags;
435 	struct net		*net;
436 };
437 
438 static inline int tcf_em_is_container(struct tcf_ematch *em)
439 {
440 	return !em->ops;
441 }
442 
443 static inline int tcf_em_is_simple(struct tcf_ematch *em)
444 {
445 	return em->flags & TCF_EM_SIMPLE;
446 }
447 
448 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
449 {
450 	return em->flags & TCF_EM_INVERT;
451 }
452 
453 static inline int tcf_em_last_match(struct tcf_ematch *em)
454 {
455 	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
456 }
457 
458 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
459 {
460 	if (tcf_em_last_match(em))
461 		return 1;
462 
463 	if (result == 0 && em->flags & TCF_EM_REL_AND)
464 		return 1;
465 
466 	if (result != 0 && em->flags & TCF_EM_REL_OR)
467 		return 1;
468 
469 	return 0;
470 }
471 
472 /**
473  * struct tcf_ematch_tree - ematch tree handle
474  *
475  * @hdr: ematch tree header supplied by userspace
476  * @matches: array of ematches
477  */
478 struct tcf_ematch_tree {
479 	struct tcf_ematch_tree_hdr hdr;
480 	struct tcf_ematch *	matches;
481 
482 };
483 
484 /**
485  * struct tcf_ematch_ops - ematch module operations
486  *
487  * @kind: identifier (kind) of this ematch module
488  * @datalen: length of expected configuration data (optional)
489  * @change: called during validation (optional)
490  * @match: called during ematch tree evaluation, must return 1/0
491  * @destroy: called during destroyage (optional)
492  * @dump: called during dumping process (optional)
493  * @owner: owner, must be set to THIS_MODULE
494  * @link: link to previous/next ematch module (internal use)
495  */
496 struct tcf_ematch_ops {
497 	int			kind;
498 	int			datalen;
499 	int			(*change)(struct net *net, void *,
500 					  int, struct tcf_ematch *);
501 	int			(*match)(struct sk_buff *, struct tcf_ematch *,
502 					 struct tcf_pkt_info *);
503 	void			(*destroy)(struct tcf_ematch *);
504 	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
505 	struct module		*owner;
506 	struct list_head	link;
507 };
508 
509 int tcf_em_register(struct tcf_ematch_ops *);
510 void tcf_em_unregister(struct tcf_ematch_ops *);
511 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
512 			 struct tcf_ematch_tree *);
513 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
514 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
515 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
516 			struct tcf_pkt_info *);
517 
518 /**
519  * tcf_em_tree_match - evaulate an ematch tree
520  *
521  * @skb: socket buffer of the packet in question
522  * @tree: ematch tree to be used for evaluation
523  * @info: packet information examined by classifier
524  *
525  * This function matches @skb against the ematch tree in @tree by going
526  * through all ematches respecting their logic relations returning
527  * as soon as the result is obvious.
528  *
529  * Returns 1 if the ematch tree as-one matches, no ematches are configured
530  * or ematch is not enabled in the kernel, otherwise 0 is returned.
531  */
532 static inline int tcf_em_tree_match(struct sk_buff *skb,
533 				    struct tcf_ematch_tree *tree,
534 				    struct tcf_pkt_info *info)
535 {
536 	if (tree->hdr.nmatches)
537 		return __tcf_em_tree_match(skb, tree, info);
538 	else
539 		return 1;
540 }
541 
542 #define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
543 
544 #else /* CONFIG_NET_EMATCH */
545 
546 struct tcf_ematch_tree {
547 };
548 
549 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
550 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
551 #define tcf_em_tree_dump(skb, t, tlv) (0)
552 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
553 
554 #endif /* CONFIG_NET_EMATCH */
555 
556 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
557 {
558 	switch (layer) {
559 		case TCF_LAYER_LINK:
560 			return skb_mac_header(skb);
561 		case TCF_LAYER_NETWORK:
562 			return skb_network_header(skb);
563 		case TCF_LAYER_TRANSPORT:
564 			return skb_transport_header(skb);
565 	}
566 
567 	return NULL;
568 }
569 
570 static inline int tcf_valid_offset(const struct sk_buff *skb,
571 				   const unsigned char *ptr, const int len)
572 {
573 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
574 		      ptr >= skb->head &&
575 		      (ptr <= (ptr + len)));
576 }
577 
578 static inline int
579 tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
580 		 struct netlink_ext_ack *extack)
581 {
582 	char indev[IFNAMSIZ];
583 	struct net_device *dev;
584 
585 	if (nla_strlcpy(indev, indev_tlv, IFNAMSIZ) >= IFNAMSIZ) {
586 		NL_SET_ERR_MSG(extack, "Interface name too long");
587 		return -EINVAL;
588 	}
589 	dev = __dev_get_by_name(net, indev);
590 	if (!dev)
591 		return -ENODEV;
592 	return dev->ifindex;
593 }
594 
595 static inline bool
596 tcf_match_indev(struct sk_buff *skb, int ifindex)
597 {
598 	if (!ifindex)
599 		return true;
600 	if  (!skb->skb_iif)
601 		return false;
602 	return ifindex == skb->skb_iif;
603 }
604 
605 int tc_setup_flow_action(struct flow_action *flow_action,
606 			 const struct tcf_exts *exts);
607 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
608 		     void *type_data, bool err_stop);
609 unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
610 
611 struct tc_cls_common_offload {
612 	u32 chain_index;
613 	__be16 protocol;
614 	u32 prio;
615 	struct netlink_ext_ack *extack;
616 };
617 
618 struct tc_cls_u32_knode {
619 	struct tcf_exts *exts;
620 	struct tcf_result *res;
621 	struct tc_u32_sel *sel;
622 	u32 handle;
623 	u32 val;
624 	u32 mask;
625 	u32 link_handle;
626 	u8 fshift;
627 };
628 
629 struct tc_cls_u32_hnode {
630 	u32 handle;
631 	u32 prio;
632 	unsigned int divisor;
633 };
634 
635 enum tc_clsu32_command {
636 	TC_CLSU32_NEW_KNODE,
637 	TC_CLSU32_REPLACE_KNODE,
638 	TC_CLSU32_DELETE_KNODE,
639 	TC_CLSU32_NEW_HNODE,
640 	TC_CLSU32_REPLACE_HNODE,
641 	TC_CLSU32_DELETE_HNODE,
642 };
643 
644 struct tc_cls_u32_offload {
645 	struct tc_cls_common_offload common;
646 	/* knode values */
647 	enum tc_clsu32_command command;
648 	union {
649 		struct tc_cls_u32_knode knode;
650 		struct tc_cls_u32_hnode hnode;
651 	};
652 };
653 
654 static inline bool tc_can_offload(const struct net_device *dev)
655 {
656 	return dev->features & NETIF_F_HW_TC;
657 }
658 
659 static inline bool tc_can_offload_extack(const struct net_device *dev,
660 					 struct netlink_ext_ack *extack)
661 {
662 	bool can = tc_can_offload(dev);
663 
664 	if (!can)
665 		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
666 
667 	return can;
668 }
669 
670 static inline bool
671 tc_cls_can_offload_and_chain0(const struct net_device *dev,
672 			      struct tc_cls_common_offload *common)
673 {
674 	if (!tc_can_offload_extack(dev, common->extack))
675 		return false;
676 	if (common->chain_index) {
677 		NL_SET_ERR_MSG(common->extack,
678 			       "Driver supports only offload of chain 0");
679 		return false;
680 	}
681 	return true;
682 }
683 
684 static inline bool tc_skip_hw(u32 flags)
685 {
686 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
687 }
688 
689 static inline bool tc_skip_sw(u32 flags)
690 {
691 	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
692 }
693 
694 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
695 static inline bool tc_flags_valid(u32 flags)
696 {
697 	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
698 		      TCA_CLS_FLAGS_VERBOSE))
699 		return false;
700 
701 	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
702 	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
703 		return false;
704 
705 	return true;
706 }
707 
708 static inline bool tc_in_hw(u32 flags)
709 {
710 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
711 }
712 
713 static inline void
714 tc_cls_common_offload_init(struct tc_cls_common_offload *cls_common,
715 			   const struct tcf_proto *tp, u32 flags,
716 			   struct netlink_ext_ack *extack)
717 {
718 	cls_common->chain_index = tp->chain->index;
719 	cls_common->protocol = tp->protocol;
720 	cls_common->prio = tp->prio;
721 	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
722 		cls_common->extack = extack;
723 }
724 
725 enum tc_fl_command {
726 	TC_CLSFLOWER_REPLACE,
727 	TC_CLSFLOWER_DESTROY,
728 	TC_CLSFLOWER_STATS,
729 	TC_CLSFLOWER_TMPLT_CREATE,
730 	TC_CLSFLOWER_TMPLT_DESTROY,
731 };
732 
733 struct tc_cls_flower_offload {
734 	struct tc_cls_common_offload common;
735 	enum tc_fl_command command;
736 	unsigned long cookie;
737 	struct flow_rule *rule;
738 	struct flow_stats stats;
739 	u32 classid;
740 };
741 
742 static inline struct flow_rule *
743 tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd)
744 {
745 	return tc_flow_cmd->rule;
746 }
747 
748 enum tc_matchall_command {
749 	TC_CLSMATCHALL_REPLACE,
750 	TC_CLSMATCHALL_DESTROY,
751 	TC_CLSMATCHALL_STATS,
752 };
753 
754 struct tc_cls_matchall_offload {
755 	struct tc_cls_common_offload common;
756 	enum tc_matchall_command command;
757 	struct flow_rule *rule;
758 	struct flow_stats stats;
759 	unsigned long cookie;
760 };
761 
762 enum tc_clsbpf_command {
763 	TC_CLSBPF_OFFLOAD,
764 	TC_CLSBPF_STATS,
765 };
766 
767 struct tc_cls_bpf_offload {
768 	struct tc_cls_common_offload common;
769 	enum tc_clsbpf_command command;
770 	struct tcf_exts *exts;
771 	struct bpf_prog *prog;
772 	struct bpf_prog *oldprog;
773 	const char *name;
774 	bool exts_integrated;
775 };
776 
777 struct tc_mqprio_qopt_offload {
778 	/* struct tc_mqprio_qopt must always be the first element */
779 	struct tc_mqprio_qopt qopt;
780 	u16 mode;
781 	u16 shaper;
782 	u32 flags;
783 	u64 min_rate[TC_QOPT_MAX_QUEUE];
784 	u64 max_rate[TC_QOPT_MAX_QUEUE];
785 };
786 
787 /* This structure holds cookie structure that is passed from user
788  * to the kernel for actions and classifiers
789  */
790 struct tc_cookie {
791 	u8  *data;
792 	u32 len;
793 	struct rcu_head rcu;
794 };
795 
796 struct tc_qopt_offload_stats {
797 	struct gnet_stats_basic_packed *bstats;
798 	struct gnet_stats_queue *qstats;
799 };
800 
801 enum tc_mq_command {
802 	TC_MQ_CREATE,
803 	TC_MQ_DESTROY,
804 	TC_MQ_STATS,
805 	TC_MQ_GRAFT,
806 };
807 
808 struct tc_mq_opt_offload_graft_params {
809 	unsigned long queue;
810 	u32 child_handle;
811 };
812 
813 struct tc_mq_qopt_offload {
814 	enum tc_mq_command command;
815 	u32 handle;
816 	union {
817 		struct tc_qopt_offload_stats stats;
818 		struct tc_mq_opt_offload_graft_params graft_params;
819 	};
820 };
821 
822 enum tc_red_command {
823 	TC_RED_REPLACE,
824 	TC_RED_DESTROY,
825 	TC_RED_STATS,
826 	TC_RED_XSTATS,
827 	TC_RED_GRAFT,
828 };
829 
830 struct tc_red_qopt_offload_params {
831 	u32 min;
832 	u32 max;
833 	u32 probability;
834 	u32 limit;
835 	bool is_ecn;
836 	bool is_harddrop;
837 	struct gnet_stats_queue *qstats;
838 };
839 
840 struct tc_red_qopt_offload {
841 	enum tc_red_command command;
842 	u32 handle;
843 	u32 parent;
844 	union {
845 		struct tc_red_qopt_offload_params set;
846 		struct tc_qopt_offload_stats stats;
847 		struct red_stats *xstats;
848 		u32 child_handle;
849 	};
850 };
851 
852 enum tc_gred_command {
853 	TC_GRED_REPLACE,
854 	TC_GRED_DESTROY,
855 	TC_GRED_STATS,
856 };
857 
858 struct tc_gred_vq_qopt_offload_params {
859 	bool present;
860 	u32 limit;
861 	u32 prio;
862 	u32 min;
863 	u32 max;
864 	bool is_ecn;
865 	bool is_harddrop;
866 	u32 probability;
867 	/* Only need backlog, see struct tc_prio_qopt_offload_params */
868 	u32 *backlog;
869 };
870 
871 struct tc_gred_qopt_offload_params {
872 	bool grio_on;
873 	bool wred_on;
874 	unsigned int dp_cnt;
875 	unsigned int dp_def;
876 	struct gnet_stats_queue *qstats;
877 	struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
878 };
879 
880 struct tc_gred_qopt_offload_stats {
881 	struct gnet_stats_basic_packed bstats[MAX_DPs];
882 	struct gnet_stats_queue qstats[MAX_DPs];
883 	struct red_stats *xstats[MAX_DPs];
884 };
885 
886 struct tc_gred_qopt_offload {
887 	enum tc_gred_command command;
888 	u32 handle;
889 	u32 parent;
890 	union {
891 		struct tc_gred_qopt_offload_params set;
892 		struct tc_gred_qopt_offload_stats stats;
893 	};
894 };
895 
896 enum tc_prio_command {
897 	TC_PRIO_REPLACE,
898 	TC_PRIO_DESTROY,
899 	TC_PRIO_STATS,
900 	TC_PRIO_GRAFT,
901 };
902 
903 struct tc_prio_qopt_offload_params {
904 	int bands;
905 	u8 priomap[TC_PRIO_MAX + 1];
906 	/* In case that a prio qdisc is offloaded and now is changed to a
907 	 * non-offloadedable config, it needs to update the backlog & qlen
908 	 * values to negate the HW backlog & qlen values (and only them).
909 	 */
910 	struct gnet_stats_queue *qstats;
911 };
912 
913 struct tc_prio_qopt_offload_graft_params {
914 	u8 band;
915 	u32 child_handle;
916 };
917 
918 struct tc_prio_qopt_offload {
919 	enum tc_prio_command command;
920 	u32 handle;
921 	u32 parent;
922 	union {
923 		struct tc_prio_qopt_offload_params replace_params;
924 		struct tc_qopt_offload_stats stats;
925 		struct tc_prio_qopt_offload_graft_params graft_params;
926 	};
927 };
928 
929 enum tc_root_command {
930 	TC_ROOT_GRAFT,
931 };
932 
933 struct tc_root_qopt_offload {
934 	enum tc_root_command command;
935 	u32 handle;
936 	bool ingress;
937 };
938 
939 #endif
940