xref: /linux/include/net/pkt_cls.h (revision f85f5ae45ad945270a8884261de8249431e8b5a6)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
4 
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 #include <net/net_namespace.h>
10 
11 /* TC action not accessible from user space */
12 #define TC_ACT_CONSUMED		(TC_ACT_VALUE_MAX + 1)
13 
14 /* Basic packet classifier frontend definitions. */
15 
16 struct tcf_walker {
17 	int	stop;
18 	int	skip;
19 	int	count;
20 	bool	nonempty;
21 	unsigned long cookie;
22 	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
23 };
24 
25 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
26 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
27 
28 struct tcf_block_ext_info {
29 	enum flow_block_binder_type binder_type;
30 	tcf_chain_head_change_t *chain_head_change;
31 	void *chain_head_change_priv;
32 	u32 block_index;
33 };
34 
35 struct tcf_qevent {
36 	struct tcf_block	*block;
37 	struct tcf_block_ext_info info;
38 	struct tcf_proto __rcu *filter_chain;
39 };
40 
41 struct tcf_block_cb;
42 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
43 
44 #ifdef CONFIG_NET_CLS
45 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
46 				       u32 chain_index);
47 void tcf_chain_put_by_act(struct tcf_chain *chain);
48 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
49 				     struct tcf_chain *chain);
50 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
51 				     struct tcf_proto *tp);
52 void tcf_block_netif_keep_dst(struct tcf_block *block);
53 int tcf_block_get(struct tcf_block **p_block,
54 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
55 		  struct netlink_ext_ack *extack);
56 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
57 		      struct tcf_block_ext_info *ei,
58 		      struct netlink_ext_ack *extack);
59 void tcf_block_put(struct tcf_block *block);
60 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
61 		       struct tcf_block_ext_info *ei);
62 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
63 		     int police, struct tcf_proto *tp, u32 handle, bool used_action_miss);
64 
65 static inline bool tcf_block_shared(struct tcf_block *block)
66 {
67 	return block->index;
68 }
69 
70 static inline bool tcf_block_non_null_shared(struct tcf_block *block)
71 {
72 	return block && block->index;
73 }
74 
75 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
76 {
77 	WARN_ON(tcf_block_shared(block));
78 	return block->q;
79 }
80 
81 int tcf_classify(struct sk_buff *skb,
82 		 const struct tcf_block *block,
83 		 const struct tcf_proto *tp, struct tcf_result *res,
84 		 bool compat_mode);
85 
86 static inline bool tc_cls_stats_dump(struct tcf_proto *tp,
87 				     struct tcf_walker *arg,
88 				     void *filter)
89 {
90 	if (arg->count >= arg->skip && arg->fn(tp, filter, arg) < 0) {
91 		arg->stop = 1;
92 		return false;
93 	}
94 
95 	arg->count++;
96 	return true;
97 }
98 
99 #else
100 static inline bool tcf_block_shared(struct tcf_block *block)
101 {
102 	return false;
103 }
104 
105 static inline bool tcf_block_non_null_shared(struct tcf_block *block)
106 {
107 	return false;
108 }
109 
110 static inline
111 int tcf_block_get(struct tcf_block **p_block,
112 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
113 		  struct netlink_ext_ack *extack)
114 {
115 	return 0;
116 }
117 
118 static inline
119 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
120 		      struct tcf_block_ext_info *ei,
121 		      struct netlink_ext_ack *extack)
122 {
123 	return 0;
124 }
125 
126 static inline void tcf_block_put(struct tcf_block *block)
127 {
128 }
129 
130 static inline
131 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
132 		       struct tcf_block_ext_info *ei)
133 {
134 }
135 
136 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
137 {
138 	return NULL;
139 }
140 
141 static inline int tcf_classify(struct sk_buff *skb,
142 			       const struct tcf_block *block,
143 			       const struct tcf_proto *tp,
144 			       struct tcf_result *res, bool compat_mode)
145 {
146 	return TC_ACT_UNSPEC;
147 }
148 
149 #endif
150 
151 static inline unsigned long
152 __cls_set_class(unsigned long *clp, unsigned long cl)
153 {
154 	return xchg(clp, cl);
155 }
156 
157 static inline void
158 __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
159 {
160 	unsigned long cl;
161 
162 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
163 	cl = __cls_set_class(&r->class, cl);
164 	if (cl)
165 		q->ops->cl_ops->unbind_tcf(q, cl);
166 }
167 
168 static inline void
169 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
170 {
171 	struct Qdisc *q = tp->chain->block->q;
172 
173 	/* Check q as it is not set for shared blocks. In that case,
174 	 * setting class is not supported.
175 	 */
176 	if (!q)
177 		return;
178 	sch_tree_lock(q);
179 	__tcf_bind_filter(q, r, base);
180 	sch_tree_unlock(q);
181 }
182 
183 static inline void
184 __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
185 {
186 	unsigned long cl;
187 
188 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
189 		q->ops->cl_ops->unbind_tcf(q, cl);
190 }
191 
192 static inline void
193 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
194 {
195 	struct Qdisc *q = tp->chain->block->q;
196 
197 	if (!q)
198 		return;
199 	__tcf_unbind_filter(q, r);
200 }
201 
202 static inline void tc_cls_bind_class(u32 classid, unsigned long cl,
203 				     void *q, struct tcf_result *res,
204 				     unsigned long base)
205 {
206 	if (res->classid == classid) {
207 		if (cl)
208 			__tcf_bind_filter(q, res, base);
209 		else
210 			__tcf_unbind_filter(q, res);
211 	}
212 }
213 
214 struct tcf_exts {
215 #ifdef CONFIG_NET_CLS_ACT
216 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
217 	int nr_actions;
218 	struct tc_action **actions;
219 	struct net	*net;
220 	netns_tracker	ns_tracker;
221 	struct tcf_exts_miss_cookie_node *miss_cookie_node;
222 #endif
223 	/* Map to export classifier specific extension TLV types to the
224 	 * generic extensions API. Unsupported extensions must be set to 0.
225 	 */
226 	int action;
227 	int police;
228 };
229 
230 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
231 				int action, int police)
232 {
233 #ifdef CONFIG_NET_CLS
234 	return tcf_exts_init_ex(exts, net, action, police, NULL, 0, false);
235 #else
236 	return -EOPNOTSUPP;
237 #endif
238 }
239 
240 /* Return false if the netns is being destroyed in cleanup_net(). Callers
241  * need to do cleanup synchronously in this case, otherwise may race with
242  * tc_action_net_exit(). Return true for other cases.
243  */
244 static inline bool tcf_exts_get_net(struct tcf_exts *exts)
245 {
246 #ifdef CONFIG_NET_CLS_ACT
247 	exts->net = maybe_get_net(exts->net);
248 	if (exts->net)
249 		netns_tracker_alloc(exts->net, &exts->ns_tracker, GFP_KERNEL);
250 	return exts->net != NULL;
251 #else
252 	return true;
253 #endif
254 }
255 
256 static inline void tcf_exts_put_net(struct tcf_exts *exts)
257 {
258 #ifdef CONFIG_NET_CLS_ACT
259 	if (exts->net)
260 		put_net_track(exts->net, &exts->ns_tracker);
261 #endif
262 }
263 
264 #ifdef CONFIG_NET_CLS_ACT
265 #define tcf_exts_for_each_action(i, a, exts) \
266 	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
267 #else
268 #define tcf_exts_for_each_action(i, a, exts) \
269 	for (; 0; (void)(i), (void)(a), (void)(exts))
270 #endif
271 
272 #define tcf_act_for_each_action(i, a, actions) \
273 	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++)
274 
275 static inline bool tc_act_in_hw(struct tc_action *act)
276 {
277 	return !!act->in_hw_count;
278 }
279 
280 static inline void
281 tcf_exts_hw_stats_update(const struct tcf_exts *exts,
282 			 struct flow_stats *stats,
283 			 bool use_act_stats)
284 {
285 #ifdef CONFIG_NET_CLS_ACT
286 	int i;
287 
288 	for (i = 0; i < exts->nr_actions; i++) {
289 		struct tc_action *a = exts->actions[i];
290 
291 		if (use_act_stats || tc_act_in_hw(a)) {
292 			if (!tcf_action_update_hw_stats(a))
293 				continue;
294 		}
295 
296 		preempt_disable();
297 		tcf_action_stats_update(a, stats->bytes, stats->pkts, stats->drops,
298 					stats->lastused, true);
299 		preempt_enable();
300 
301 		a->used_hw_stats = stats->used_hw_stats;
302 		a->used_hw_stats_valid = stats->used_hw_stats_valid;
303 	}
304 #endif
305 }
306 
307 /**
308  * tcf_exts_has_actions - check if at least one action is present
309  * @exts: tc filter extensions handle
310  *
311  * Returns true if at least one action is present.
312  */
313 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
314 {
315 #ifdef CONFIG_NET_CLS_ACT
316 	return exts->nr_actions;
317 #else
318 	return false;
319 #endif
320 }
321 
322 /**
323  * tcf_exts_exec - execute tc filter extensions
324  * @skb: socket buffer
325  * @exts: tc filter extensions handle
326  * @res: desired result
327  *
328  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
329  * a negative number if the filter must be considered unmatched or
330  * a positive action code (TC_ACT_*) which must be returned to the
331  * underlying layer.
332  */
333 static inline int
334 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
335 	      struct tcf_result *res)
336 {
337 #ifdef CONFIG_NET_CLS_ACT
338 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
339 #endif
340 	return TC_ACT_OK;
341 }
342 
343 static inline int
344 tcf_exts_exec_ex(struct sk_buff *skb, struct tcf_exts *exts, int act_index,
345 		 struct tcf_result *res)
346 {
347 #ifdef CONFIG_NET_CLS_ACT
348 	return tcf_action_exec(skb, exts->actions + act_index,
349 			       exts->nr_actions - act_index, res);
350 #else
351 	return TC_ACT_OK;
352 #endif
353 }
354 
355 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
356 		      struct nlattr **tb, struct nlattr *rate_tlv,
357 		      struct tcf_exts *exts, u32 flags,
358 		      struct netlink_ext_ack *extack);
359 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
360 			 struct nlattr *rate_tlv, struct tcf_exts *exts,
361 			 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack);
362 void tcf_exts_destroy(struct tcf_exts *exts);
363 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
364 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
365 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts);
366 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
367 
368 /**
369  * struct tcf_pkt_info - packet information
370  *
371  * @ptr: start of the pkt data
372  * @nexthdr: offset of the next header
373  */
374 struct tcf_pkt_info {
375 	unsigned char *		ptr;
376 	int			nexthdr;
377 };
378 
379 #ifdef CONFIG_NET_EMATCH
380 
381 struct tcf_ematch_ops;
382 
383 /**
384  * struct tcf_ematch - extended match (ematch)
385  *
386  * @matchid: identifier to allow userspace to reidentify a match
387  * @flags: flags specifying attributes and the relation to other matches
388  * @ops: the operations lookup table of the corresponding ematch module
389  * @datalen: length of the ematch specific configuration data
390  * @data: ematch specific data
391  * @net: the network namespace
392  */
393 struct tcf_ematch {
394 	struct tcf_ematch_ops * ops;
395 	unsigned long		data;
396 	unsigned int		datalen;
397 	u16			matchid;
398 	u16			flags;
399 	struct net		*net;
400 };
401 
402 static inline int tcf_em_is_container(struct tcf_ematch *em)
403 {
404 	return !em->ops;
405 }
406 
407 static inline int tcf_em_is_simple(struct tcf_ematch *em)
408 {
409 	return em->flags & TCF_EM_SIMPLE;
410 }
411 
412 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
413 {
414 	return em->flags & TCF_EM_INVERT;
415 }
416 
417 static inline int tcf_em_last_match(struct tcf_ematch *em)
418 {
419 	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
420 }
421 
422 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
423 {
424 	if (tcf_em_last_match(em))
425 		return 1;
426 
427 	if (result == 0 && em->flags & TCF_EM_REL_AND)
428 		return 1;
429 
430 	if (result != 0 && em->flags & TCF_EM_REL_OR)
431 		return 1;
432 
433 	return 0;
434 }
435 
436 /**
437  * struct tcf_ematch_tree - ematch tree handle
438  *
439  * @hdr: ematch tree header supplied by userspace
440  * @matches: array of ematches
441  */
442 struct tcf_ematch_tree {
443 	struct tcf_ematch_tree_hdr hdr;
444 	struct tcf_ematch *	matches;
445 
446 };
447 
448 /**
449  * struct tcf_ematch_ops - ematch module operations
450  *
451  * @kind: identifier (kind) of this ematch module
452  * @datalen: length of expected configuration data (optional)
453  * @change: called during validation (optional)
454  * @match: called during ematch tree evaluation, must return 1/0
455  * @destroy: called during destroyage (optional)
456  * @dump: called during dumping process (optional)
457  * @owner: owner, must be set to THIS_MODULE
458  * @link: link to previous/next ematch module (internal use)
459  */
460 struct tcf_ematch_ops {
461 	int			kind;
462 	int			datalen;
463 	int			(*change)(struct net *net, void *,
464 					  int, struct tcf_ematch *);
465 	int			(*match)(struct sk_buff *, struct tcf_ematch *,
466 					 struct tcf_pkt_info *);
467 	void			(*destroy)(struct tcf_ematch *);
468 	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
469 	struct module		*owner;
470 	struct list_head	link;
471 };
472 
473 int tcf_em_register(struct tcf_ematch_ops *);
474 void tcf_em_unregister(struct tcf_ematch_ops *);
475 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
476 			 struct tcf_ematch_tree *);
477 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
478 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
479 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
480 			struct tcf_pkt_info *);
481 
482 /**
483  * tcf_em_tree_match - evaulate an ematch tree
484  *
485  * @skb: socket buffer of the packet in question
486  * @tree: ematch tree to be used for evaluation
487  * @info: packet information examined by classifier
488  *
489  * This function matches @skb against the ematch tree in @tree by going
490  * through all ematches respecting their logic relations returning
491  * as soon as the result is obvious.
492  *
493  * Returns 1 if the ematch tree as-one matches, no ematches are configured
494  * or ematch is not enabled in the kernel, otherwise 0 is returned.
495  */
496 static inline int tcf_em_tree_match(struct sk_buff *skb,
497 				    struct tcf_ematch_tree *tree,
498 				    struct tcf_pkt_info *info)
499 {
500 	if (tree->hdr.nmatches)
501 		return __tcf_em_tree_match(skb, tree, info);
502 	else
503 		return 1;
504 }
505 
506 #define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
507 
508 #else /* CONFIG_NET_EMATCH */
509 
510 struct tcf_ematch_tree {
511 };
512 
513 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
514 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
515 #define tcf_em_tree_dump(skb, t, tlv) (0)
516 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
517 
518 #endif /* CONFIG_NET_EMATCH */
519 
520 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
521 {
522 	switch (layer) {
523 		case TCF_LAYER_LINK:
524 			return skb_mac_header(skb);
525 		case TCF_LAYER_NETWORK:
526 			return skb_network_header(skb);
527 		case TCF_LAYER_TRANSPORT:
528 			return skb_transport_header(skb);
529 	}
530 
531 	return NULL;
532 }
533 
534 static inline int tcf_valid_offset(const struct sk_buff *skb,
535 				   const unsigned char *ptr, const int len)
536 {
537 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
538 		      ptr >= skb->head &&
539 		      (ptr <= (ptr + len)));
540 }
541 
542 static inline int
543 tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
544 		 struct netlink_ext_ack *extack)
545 {
546 	char indev[IFNAMSIZ];
547 	struct net_device *dev;
548 
549 	if (nla_strscpy(indev, indev_tlv, IFNAMSIZ) < 0) {
550 		NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
551 				    "Interface name too long");
552 		return -EINVAL;
553 	}
554 	dev = __dev_get_by_name(net, indev);
555 	if (!dev) {
556 		NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
557 				    "Network device not found");
558 		return -ENODEV;
559 	}
560 	return dev->ifindex;
561 }
562 
563 static inline bool
564 tcf_match_indev(struct sk_buff *skb, int ifindex)
565 {
566 	if (!ifindex)
567 		return true;
568 	if  (!skb->skb_iif)
569 		return false;
570 	return ifindex == skb->skb_iif;
571 }
572 
573 int tc_setup_offload_action(struct flow_action *flow_action,
574 			    const struct tcf_exts *exts,
575 			    struct netlink_ext_ack *extack);
576 void tc_cleanup_offload_action(struct flow_action *flow_action);
577 int tc_setup_action(struct flow_action *flow_action,
578 		    struct tc_action *actions[],
579 		    u32 miss_cookie_base,
580 		    struct netlink_ext_ack *extack);
581 
582 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
583 		     void *type_data, bool err_stop, bool rtnl_held);
584 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
585 		    enum tc_setup_type type, void *type_data, bool err_stop,
586 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
587 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
588 			enum tc_setup_type type, void *type_data, bool err_stop,
589 			u32 *old_flags, unsigned int *old_in_hw_count,
590 			u32 *new_flags, unsigned int *new_in_hw_count,
591 			bool rtnl_held);
592 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
593 			enum tc_setup_type type, void *type_data, bool err_stop,
594 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
595 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
596 			  bool add, flow_setup_cb_t *cb,
597 			  enum tc_setup_type type, void *type_data,
598 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count);
599 unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
600 
601 #ifdef CONFIG_NET_CLS_ACT
602 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
603 		    enum flow_block_binder_type binder_type,
604 		    struct nlattr *block_index_attr,
605 		    struct netlink_ext_ack *extack);
606 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
607 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
608 			       struct netlink_ext_ack *extack);
609 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
610 				  struct sk_buff **to_free, int *ret);
611 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
612 #else
613 static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
614 				  enum flow_block_binder_type binder_type,
615 				  struct nlattr *block_index_attr,
616 				  struct netlink_ext_ack *extack)
617 {
618 	return 0;
619 }
620 
621 static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
622 {
623 }
624 
625 static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
626 					     struct netlink_ext_ack *extack)
627 {
628 	return 0;
629 }
630 
631 static inline struct sk_buff *
632 tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
633 		  struct sk_buff **to_free, int *ret)
634 {
635 	return skb;
636 }
637 
638 static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
639 {
640 	return 0;
641 }
642 #endif
643 
644 struct tc_cls_u32_knode {
645 	struct tcf_exts *exts;
646 	struct tcf_result *res;
647 	struct tc_u32_sel *sel;
648 	u32 handle;
649 	u32 val;
650 	u32 mask;
651 	u32 link_handle;
652 	u8 fshift;
653 };
654 
655 struct tc_cls_u32_hnode {
656 	u32 handle;
657 	u32 prio;
658 	unsigned int divisor;
659 };
660 
661 enum tc_clsu32_command {
662 	TC_CLSU32_NEW_KNODE,
663 	TC_CLSU32_REPLACE_KNODE,
664 	TC_CLSU32_DELETE_KNODE,
665 	TC_CLSU32_NEW_HNODE,
666 	TC_CLSU32_REPLACE_HNODE,
667 	TC_CLSU32_DELETE_HNODE,
668 };
669 
670 struct tc_cls_u32_offload {
671 	struct flow_cls_common_offload common;
672 	/* knode values */
673 	enum tc_clsu32_command command;
674 	union {
675 		struct tc_cls_u32_knode knode;
676 		struct tc_cls_u32_hnode hnode;
677 	};
678 };
679 
680 static inline bool tc_can_offload(const struct net_device *dev)
681 {
682 	return dev->features & NETIF_F_HW_TC;
683 }
684 
685 static inline bool tc_can_offload_extack(const struct net_device *dev,
686 					 struct netlink_ext_ack *extack)
687 {
688 	bool can = tc_can_offload(dev);
689 
690 	if (!can)
691 		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
692 
693 	return can;
694 }
695 
696 static inline bool
697 tc_cls_can_offload_and_chain0(const struct net_device *dev,
698 			      struct flow_cls_common_offload *common)
699 {
700 	if (!tc_can_offload_extack(dev, common->extack))
701 		return false;
702 	if (common->chain_index) {
703 		NL_SET_ERR_MSG(common->extack,
704 			       "Driver supports only offload of chain 0");
705 		return false;
706 	}
707 	return true;
708 }
709 
710 static inline bool tc_skip_hw(u32 flags)
711 {
712 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
713 }
714 
715 static inline bool tc_skip_sw(u32 flags)
716 {
717 	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
718 }
719 
720 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
721 static inline bool tc_flags_valid(u32 flags)
722 {
723 	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
724 		      TCA_CLS_FLAGS_VERBOSE))
725 		return false;
726 
727 	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
728 	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
729 		return false;
730 
731 	return true;
732 }
733 
734 static inline bool tc_in_hw(u32 flags)
735 {
736 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
737 }
738 
739 static inline void
740 tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
741 			   const struct tcf_proto *tp, u32 flags,
742 			   struct netlink_ext_ack *extack)
743 {
744 	cls_common->chain_index = tp->chain->index;
745 	cls_common->protocol = tp->protocol;
746 	cls_common->prio = tp->prio >> 16;
747 	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
748 		cls_common->extack = extack;
749 }
750 
751 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
752 static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
753 {
754 	struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
755 
756 	if (tc_skb_ext)
757 		memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
758 	return tc_skb_ext;
759 }
760 #endif
761 
762 enum tc_matchall_command {
763 	TC_CLSMATCHALL_REPLACE,
764 	TC_CLSMATCHALL_DESTROY,
765 	TC_CLSMATCHALL_STATS,
766 };
767 
768 struct tc_cls_matchall_offload {
769 	struct flow_cls_common_offload common;
770 	enum tc_matchall_command command;
771 	struct flow_rule *rule;
772 	struct flow_stats stats;
773 	bool use_act_stats;
774 	unsigned long cookie;
775 };
776 
777 enum tc_clsbpf_command {
778 	TC_CLSBPF_OFFLOAD,
779 	TC_CLSBPF_STATS,
780 };
781 
782 struct tc_cls_bpf_offload {
783 	struct flow_cls_common_offload common;
784 	enum tc_clsbpf_command command;
785 	struct tcf_exts *exts;
786 	struct bpf_prog *prog;
787 	struct bpf_prog *oldprog;
788 	const char *name;
789 	bool exts_integrated;
790 };
791 
792 /* This structure holds cookie structure that is passed from user
793  * to the kernel for actions and classifiers
794  */
795 struct tc_cookie {
796 	u8  *data;
797 	u32 len;
798 	struct rcu_head rcu;
799 };
800 
801 struct tc_qopt_offload_stats {
802 	struct gnet_stats_basic_sync *bstats;
803 	struct gnet_stats_queue *qstats;
804 };
805 
806 enum tc_mq_command {
807 	TC_MQ_CREATE,
808 	TC_MQ_DESTROY,
809 	TC_MQ_STATS,
810 	TC_MQ_GRAFT,
811 };
812 
813 struct tc_mq_opt_offload_graft_params {
814 	unsigned long queue;
815 	u32 child_handle;
816 };
817 
818 struct tc_mq_qopt_offload {
819 	enum tc_mq_command command;
820 	u32 handle;
821 	union {
822 		struct tc_qopt_offload_stats stats;
823 		struct tc_mq_opt_offload_graft_params graft_params;
824 	};
825 };
826 
827 enum tc_htb_command {
828 	/* Root */
829 	TC_HTB_CREATE, /* Initialize HTB offload. */
830 	TC_HTB_DESTROY, /* Destroy HTB offload. */
831 
832 	/* Classes */
833 	/* Allocate qid and create leaf. */
834 	TC_HTB_LEAF_ALLOC_QUEUE,
835 	/* Convert leaf to inner, preserve and return qid, create new leaf. */
836 	TC_HTB_LEAF_TO_INNER,
837 	/* Delete leaf, while siblings remain. */
838 	TC_HTB_LEAF_DEL,
839 	/* Delete leaf, convert parent to leaf, preserving qid. */
840 	TC_HTB_LEAF_DEL_LAST,
841 	/* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */
842 	TC_HTB_LEAF_DEL_LAST_FORCE,
843 	/* Modify parameters of a node. */
844 	TC_HTB_NODE_MODIFY,
845 
846 	/* Class qdisc */
847 	TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */
848 };
849 
850 struct tc_htb_qopt_offload {
851 	struct netlink_ext_ack *extack;
852 	enum tc_htb_command command;
853 	u32 parent_classid;
854 	u16 classid;
855 	u16 qid;
856 	u32 quantum;
857 	u64 rate;
858 	u64 ceil;
859 	u8 prio;
860 };
861 
862 #define TC_HTB_CLASSID_ROOT U32_MAX
863 
864 enum tc_red_command {
865 	TC_RED_REPLACE,
866 	TC_RED_DESTROY,
867 	TC_RED_STATS,
868 	TC_RED_XSTATS,
869 	TC_RED_GRAFT,
870 };
871 
872 struct tc_red_qopt_offload_params {
873 	u32 min;
874 	u32 max;
875 	u32 probability;
876 	u32 limit;
877 	bool is_ecn;
878 	bool is_harddrop;
879 	bool is_nodrop;
880 	struct gnet_stats_queue *qstats;
881 };
882 
883 struct tc_red_qopt_offload {
884 	enum tc_red_command command;
885 	u32 handle;
886 	u32 parent;
887 	union {
888 		struct tc_red_qopt_offload_params set;
889 		struct tc_qopt_offload_stats stats;
890 		struct red_stats *xstats;
891 		u32 child_handle;
892 	};
893 };
894 
895 enum tc_gred_command {
896 	TC_GRED_REPLACE,
897 	TC_GRED_DESTROY,
898 	TC_GRED_STATS,
899 };
900 
901 struct tc_gred_vq_qopt_offload_params {
902 	bool present;
903 	u32 limit;
904 	u32 prio;
905 	u32 min;
906 	u32 max;
907 	bool is_ecn;
908 	bool is_harddrop;
909 	u32 probability;
910 	/* Only need backlog, see struct tc_prio_qopt_offload_params */
911 	u32 *backlog;
912 };
913 
914 struct tc_gred_qopt_offload_params {
915 	bool grio_on;
916 	bool wred_on;
917 	unsigned int dp_cnt;
918 	unsigned int dp_def;
919 	struct gnet_stats_queue *qstats;
920 	struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
921 };
922 
923 struct tc_gred_qopt_offload_stats {
924 	struct gnet_stats_basic_sync bstats[MAX_DPs];
925 	struct gnet_stats_queue qstats[MAX_DPs];
926 	struct red_stats *xstats[MAX_DPs];
927 };
928 
929 struct tc_gred_qopt_offload {
930 	enum tc_gred_command command;
931 	u32 handle;
932 	u32 parent;
933 	union {
934 		struct tc_gred_qopt_offload_params set;
935 		struct tc_gred_qopt_offload_stats stats;
936 	};
937 };
938 
939 enum tc_prio_command {
940 	TC_PRIO_REPLACE,
941 	TC_PRIO_DESTROY,
942 	TC_PRIO_STATS,
943 	TC_PRIO_GRAFT,
944 };
945 
946 struct tc_prio_qopt_offload_params {
947 	int bands;
948 	u8 priomap[TC_PRIO_MAX + 1];
949 	/* At the point of un-offloading the Qdisc, the reported backlog and
950 	 * qlen need to be reduced by the portion that is in HW.
951 	 */
952 	struct gnet_stats_queue *qstats;
953 };
954 
955 struct tc_prio_qopt_offload_graft_params {
956 	u8 band;
957 	u32 child_handle;
958 };
959 
960 struct tc_prio_qopt_offload {
961 	enum tc_prio_command command;
962 	u32 handle;
963 	u32 parent;
964 	union {
965 		struct tc_prio_qopt_offload_params replace_params;
966 		struct tc_qopt_offload_stats stats;
967 		struct tc_prio_qopt_offload_graft_params graft_params;
968 	};
969 };
970 
971 enum tc_root_command {
972 	TC_ROOT_GRAFT,
973 };
974 
975 struct tc_root_qopt_offload {
976 	enum tc_root_command command;
977 	u32 handle;
978 	bool ingress;
979 };
980 
981 enum tc_ets_command {
982 	TC_ETS_REPLACE,
983 	TC_ETS_DESTROY,
984 	TC_ETS_STATS,
985 	TC_ETS_GRAFT,
986 };
987 
988 struct tc_ets_qopt_offload_replace_params {
989 	unsigned int bands;
990 	u8 priomap[TC_PRIO_MAX + 1];
991 	unsigned int quanta[TCQ_ETS_MAX_BANDS];	/* 0 for strict bands. */
992 	unsigned int weights[TCQ_ETS_MAX_BANDS];
993 	struct gnet_stats_queue *qstats;
994 };
995 
996 struct tc_ets_qopt_offload_graft_params {
997 	u8 band;
998 	u32 child_handle;
999 };
1000 
1001 struct tc_ets_qopt_offload {
1002 	enum tc_ets_command command;
1003 	u32 handle;
1004 	u32 parent;
1005 	union {
1006 		struct tc_ets_qopt_offload_replace_params replace_params;
1007 		struct tc_qopt_offload_stats stats;
1008 		struct tc_ets_qopt_offload_graft_params graft_params;
1009 	};
1010 };
1011 
1012 enum tc_tbf_command {
1013 	TC_TBF_REPLACE,
1014 	TC_TBF_DESTROY,
1015 	TC_TBF_STATS,
1016 	TC_TBF_GRAFT,
1017 };
1018 
1019 struct tc_tbf_qopt_offload_replace_params {
1020 	struct psched_ratecfg rate;
1021 	u32 max_size;
1022 	struct gnet_stats_queue *qstats;
1023 };
1024 
1025 struct tc_tbf_qopt_offload {
1026 	enum tc_tbf_command command;
1027 	u32 handle;
1028 	u32 parent;
1029 	union {
1030 		struct tc_tbf_qopt_offload_replace_params replace_params;
1031 		struct tc_qopt_offload_stats stats;
1032 		u32 child_handle;
1033 	};
1034 };
1035 
1036 enum tc_fifo_command {
1037 	TC_FIFO_REPLACE,
1038 	TC_FIFO_DESTROY,
1039 	TC_FIFO_STATS,
1040 };
1041 
1042 struct tc_fifo_qopt_offload {
1043 	enum tc_fifo_command command;
1044 	u32 handle;
1045 	u32 parent;
1046 	union {
1047 		struct tc_qopt_offload_stats stats;
1048 	};
1049 };
1050 
1051 #ifdef CONFIG_NET_CLS_ACT
1052 DECLARE_STATIC_KEY_FALSE(tc_skb_ext_tc);
1053 void tc_skb_ext_tc_enable(void);
1054 void tc_skb_ext_tc_disable(void);
1055 #define tc_skb_ext_tc_enabled() static_branch_unlikely(&tc_skb_ext_tc)
1056 #else /* CONFIG_NET_CLS_ACT */
1057 static inline void tc_skb_ext_tc_enable(void) { }
1058 static inline void tc_skb_ext_tc_disable(void) { }
1059 #define tc_skb_ext_tc_enabled() false
1060 #endif
1061 
1062 #endif
1063