xref: /linux/include/net/pkt_cls.h (revision db1ecca22edf27c5a3dd66af406c88b5b5ac7cc1)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_PKT_CLS_H
3 #define __NET_PKT_CLS_H
4 
5 #include <linux/pkt_cls.h>
6 #include <linux/workqueue.h>
7 #include <net/sch_generic.h>
8 #include <net/act_api.h>
9 #include <net/net_namespace.h>
10 
11 /* TC action not accessible from user space */
12 #define TC_ACT_CONSUMED		(TC_ACT_VALUE_MAX + 1)
13 
14 /* Basic packet classifier frontend definitions. */
15 
16 struct tcf_walker {
17 	int	stop;
18 	int	skip;
19 	int	count;
20 	bool	nonempty;
21 	unsigned long cookie;
22 	int	(*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
23 };
24 
25 int register_tcf_proto_ops(struct tcf_proto_ops *ops);
26 void unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
27 
28 struct tcf_block_ext_info {
29 	enum flow_block_binder_type binder_type;
30 	tcf_chain_head_change_t *chain_head_change;
31 	void *chain_head_change_priv;
32 	u32 block_index;
33 };
34 
35 struct tcf_qevent {
36 	struct tcf_block	*block;
37 	struct tcf_block_ext_info info;
38 	struct tcf_proto __rcu *filter_chain;
39 };
40 
41 struct tcf_block_cb;
42 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
43 
44 #ifdef CONFIG_NET_CLS
45 struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
46 				       u32 chain_index);
47 void tcf_chain_put_by_act(struct tcf_chain *chain);
48 struct tcf_chain *tcf_get_next_chain(struct tcf_block *block,
49 				     struct tcf_chain *chain);
50 struct tcf_proto *tcf_get_next_proto(struct tcf_chain *chain,
51 				     struct tcf_proto *tp);
52 void tcf_block_netif_keep_dst(struct tcf_block *block);
53 int tcf_block_get(struct tcf_block **p_block,
54 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
55 		  struct netlink_ext_ack *extack);
56 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
57 		      struct tcf_block_ext_info *ei,
58 		      struct netlink_ext_ack *extack);
59 void tcf_block_put(struct tcf_block *block);
60 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
61 		       struct tcf_block_ext_info *ei);
62 int tcf_exts_init_ex(struct tcf_exts *exts, struct net *net, int action,
63 		     int police, struct tcf_proto *tp, u32 handle, bool used_action_miss);
64 
65 static inline bool tcf_block_shared(struct tcf_block *block)
66 {
67 	return block->index;
68 }
69 
70 static inline bool tcf_block_non_null_shared(struct tcf_block *block)
71 {
72 	return block && block->index;
73 }
74 
75 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
76 {
77 	WARN_ON(tcf_block_shared(block));
78 	return block->q;
79 }
80 
81 int tcf_classify(struct sk_buff *skb,
82 		 const struct tcf_block *block,
83 		 const struct tcf_proto *tp, struct tcf_result *res,
84 		 bool compat_mode);
85 
86 static inline bool tc_cls_stats_dump(struct tcf_proto *tp,
87 				     struct tcf_walker *arg,
88 				     void *filter)
89 {
90 	if (arg->count >= arg->skip && arg->fn(tp, filter, arg) < 0) {
91 		arg->stop = 1;
92 		return false;
93 	}
94 
95 	arg->count++;
96 	return true;
97 }
98 
99 #else
100 static inline bool tcf_block_shared(struct tcf_block *block)
101 {
102 	return false;
103 }
104 
105 static inline bool tcf_block_non_null_shared(struct tcf_block *block)
106 {
107 	return false;
108 }
109 
110 static inline
111 int tcf_block_get(struct tcf_block **p_block,
112 		  struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
113 		  struct netlink_ext_ack *extack)
114 {
115 	return 0;
116 }
117 
118 static inline
119 int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
120 		      struct tcf_block_ext_info *ei,
121 		      struct netlink_ext_ack *extack)
122 {
123 	return 0;
124 }
125 
126 static inline void tcf_block_put(struct tcf_block *block)
127 {
128 }
129 
130 static inline
131 void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
132 		       struct tcf_block_ext_info *ei)
133 {
134 }
135 
136 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
137 {
138 	return NULL;
139 }
140 
141 static inline int tcf_classify(struct sk_buff *skb,
142 			       const struct tcf_block *block,
143 			       const struct tcf_proto *tp,
144 			       struct tcf_result *res, bool compat_mode)
145 {
146 	return TC_ACT_UNSPEC;
147 }
148 
149 #endif
150 
151 static inline unsigned long
152 __cls_set_class(unsigned long *clp, unsigned long cl)
153 {
154 	return xchg(clp, cl);
155 }
156 
157 static inline void tcf_set_drop_reason(struct tcf_result *res,
158 				       enum skb_drop_reason reason)
159 {
160 	res->drop_reason = reason;
161 }
162 
163 static inline void
164 __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
165 {
166 	unsigned long cl;
167 
168 	cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
169 	cl = __cls_set_class(&r->class, cl);
170 	if (cl)
171 		q->ops->cl_ops->unbind_tcf(q, cl);
172 }
173 
174 static inline void
175 tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
176 {
177 	struct Qdisc *q = tp->chain->block->q;
178 
179 	/* Check q as it is not set for shared blocks. In that case,
180 	 * setting class is not supported.
181 	 */
182 	if (!q)
183 		return;
184 	sch_tree_lock(q);
185 	__tcf_bind_filter(q, r, base);
186 	sch_tree_unlock(q);
187 }
188 
189 static inline void
190 __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
191 {
192 	unsigned long cl;
193 
194 	if ((cl = __cls_set_class(&r->class, 0)) != 0)
195 		q->ops->cl_ops->unbind_tcf(q, cl);
196 }
197 
198 static inline void
199 tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
200 {
201 	struct Qdisc *q = tp->chain->block->q;
202 
203 	if (!q)
204 		return;
205 	__tcf_unbind_filter(q, r);
206 }
207 
208 static inline void tc_cls_bind_class(u32 classid, unsigned long cl,
209 				     void *q, struct tcf_result *res,
210 				     unsigned long base)
211 {
212 	if (res->classid == classid) {
213 		if (cl)
214 			__tcf_bind_filter(q, res, base);
215 		else
216 			__tcf_unbind_filter(q, res);
217 	}
218 }
219 
220 struct tcf_exts {
221 #ifdef CONFIG_NET_CLS_ACT
222 	__u32	type; /* for backward compat(TCA_OLD_COMPAT) */
223 	int nr_actions;
224 	struct tc_action **actions;
225 	struct net	*net;
226 	netns_tracker	ns_tracker;
227 	struct tcf_exts_miss_cookie_node *miss_cookie_node;
228 #endif
229 	/* Map to export classifier specific extension TLV types to the
230 	 * generic extensions API. Unsupported extensions must be set to 0.
231 	 */
232 	int action;
233 	int police;
234 };
235 
236 static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
237 				int action, int police)
238 {
239 #ifdef CONFIG_NET_CLS
240 	return tcf_exts_init_ex(exts, net, action, police, NULL, 0, false);
241 #else
242 	return -EOPNOTSUPP;
243 #endif
244 }
245 
246 /* Return false if the netns is being destroyed in cleanup_net(). Callers
247  * need to do cleanup synchronously in this case, otherwise may race with
248  * tc_action_net_exit(). Return true for other cases.
249  */
250 static inline bool tcf_exts_get_net(struct tcf_exts *exts)
251 {
252 #ifdef CONFIG_NET_CLS_ACT
253 	exts->net = maybe_get_net(exts->net);
254 	if (exts->net)
255 		netns_tracker_alloc(exts->net, &exts->ns_tracker, GFP_KERNEL);
256 	return exts->net != NULL;
257 #else
258 	return true;
259 #endif
260 }
261 
262 static inline void tcf_exts_put_net(struct tcf_exts *exts)
263 {
264 #ifdef CONFIG_NET_CLS_ACT
265 	if (exts->net)
266 		put_net_track(exts->net, &exts->ns_tracker);
267 #endif
268 }
269 
270 #ifdef CONFIG_NET_CLS_ACT
271 #define tcf_exts_for_each_action(i, a, exts) \
272 	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = (exts)->actions[i]); i++)
273 #else
274 #define tcf_exts_for_each_action(i, a, exts) \
275 	for (; 0; (void)(i), (void)(a), (void)(exts))
276 #endif
277 
278 #define tcf_act_for_each_action(i, a, actions) \
279 	for (i = 0; i < TCA_ACT_MAX_PRIO && ((a) = actions[i]); i++)
280 
281 static inline bool tc_act_in_hw(struct tc_action *act)
282 {
283 	return !!act->in_hw_count;
284 }
285 
286 static inline void
287 tcf_exts_hw_stats_update(const struct tcf_exts *exts,
288 			 struct flow_stats *stats,
289 			 bool use_act_stats)
290 {
291 #ifdef CONFIG_NET_CLS_ACT
292 	int i;
293 
294 	for (i = 0; i < exts->nr_actions; i++) {
295 		struct tc_action *a = exts->actions[i];
296 
297 		if (use_act_stats || tc_act_in_hw(a)) {
298 			if (!tcf_action_update_hw_stats(a))
299 				continue;
300 		}
301 
302 		preempt_disable();
303 		tcf_action_stats_update(a, stats->bytes, stats->pkts, stats->drops,
304 					stats->lastused, true);
305 		preempt_enable();
306 
307 		a->used_hw_stats = stats->used_hw_stats;
308 		a->used_hw_stats_valid = stats->used_hw_stats_valid;
309 	}
310 #endif
311 }
312 
313 /**
314  * tcf_exts_has_actions - check if at least one action is present
315  * @exts: tc filter extensions handle
316  *
317  * Returns true if at least one action is present.
318  */
319 static inline bool tcf_exts_has_actions(struct tcf_exts *exts)
320 {
321 #ifdef CONFIG_NET_CLS_ACT
322 	return exts->nr_actions;
323 #else
324 	return false;
325 #endif
326 }
327 
328 /**
329  * tcf_exts_exec - execute tc filter extensions
330  * @skb: socket buffer
331  * @exts: tc filter extensions handle
332  * @res: desired result
333  *
334  * Executes all configured extensions. Returns TC_ACT_OK on a normal execution,
335  * a negative number if the filter must be considered unmatched or
336  * a positive action code (TC_ACT_*) which must be returned to the
337  * underlying layer.
338  */
339 static inline int
340 tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
341 	      struct tcf_result *res)
342 {
343 #ifdef CONFIG_NET_CLS_ACT
344 	return tcf_action_exec(skb, exts->actions, exts->nr_actions, res);
345 #endif
346 	return TC_ACT_OK;
347 }
348 
349 static inline int
350 tcf_exts_exec_ex(struct sk_buff *skb, struct tcf_exts *exts, int act_index,
351 		 struct tcf_result *res)
352 {
353 #ifdef CONFIG_NET_CLS_ACT
354 	return tcf_action_exec(skb, exts->actions + act_index,
355 			       exts->nr_actions - act_index, res);
356 #else
357 	return TC_ACT_OK;
358 #endif
359 }
360 
361 int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
362 		      struct nlattr **tb, struct nlattr *rate_tlv,
363 		      struct tcf_exts *exts, u32 flags,
364 		      struct netlink_ext_ack *extack);
365 int tcf_exts_validate_ex(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
366 			 struct nlattr *rate_tlv, struct tcf_exts *exts,
367 			 u32 flags, u32 fl_flags, struct netlink_ext_ack *extack);
368 void tcf_exts_destroy(struct tcf_exts *exts);
369 void tcf_exts_change(struct tcf_exts *dst, struct tcf_exts *src);
370 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts);
371 int tcf_exts_terse_dump(struct sk_buff *skb, struct tcf_exts *exts);
372 int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts);
373 
374 /**
375  * struct tcf_pkt_info - packet information
376  *
377  * @ptr: start of the pkt data
378  * @nexthdr: offset of the next header
379  */
380 struct tcf_pkt_info {
381 	unsigned char *		ptr;
382 	int			nexthdr;
383 };
384 
385 #ifdef CONFIG_NET_EMATCH
386 
387 struct tcf_ematch_ops;
388 
389 /**
390  * struct tcf_ematch - extended match (ematch)
391  *
392  * @matchid: identifier to allow userspace to reidentify a match
393  * @flags: flags specifying attributes and the relation to other matches
394  * @ops: the operations lookup table of the corresponding ematch module
395  * @datalen: length of the ematch specific configuration data
396  * @data: ematch specific data
397  * @net: the network namespace
398  */
399 struct tcf_ematch {
400 	struct tcf_ematch_ops * ops;
401 	unsigned long		data;
402 	unsigned int		datalen;
403 	u16			matchid;
404 	u16			flags;
405 	struct net		*net;
406 };
407 
408 static inline int tcf_em_is_container(struct tcf_ematch *em)
409 {
410 	return !em->ops;
411 }
412 
413 static inline int tcf_em_is_simple(struct tcf_ematch *em)
414 {
415 	return em->flags & TCF_EM_SIMPLE;
416 }
417 
418 static inline int tcf_em_is_inverted(struct tcf_ematch *em)
419 {
420 	return em->flags & TCF_EM_INVERT;
421 }
422 
423 static inline int tcf_em_last_match(struct tcf_ematch *em)
424 {
425 	return (em->flags & TCF_EM_REL_MASK) == TCF_EM_REL_END;
426 }
427 
428 static inline int tcf_em_early_end(struct tcf_ematch *em, int result)
429 {
430 	if (tcf_em_last_match(em))
431 		return 1;
432 
433 	if (result == 0 && em->flags & TCF_EM_REL_AND)
434 		return 1;
435 
436 	if (result != 0 && em->flags & TCF_EM_REL_OR)
437 		return 1;
438 
439 	return 0;
440 }
441 
442 /**
443  * struct tcf_ematch_tree - ematch tree handle
444  *
445  * @hdr: ematch tree header supplied by userspace
446  * @matches: array of ematches
447  */
448 struct tcf_ematch_tree {
449 	struct tcf_ematch_tree_hdr hdr;
450 	struct tcf_ematch *	matches;
451 
452 };
453 
454 /**
455  * struct tcf_ematch_ops - ematch module operations
456  *
457  * @kind: identifier (kind) of this ematch module
458  * @datalen: length of expected configuration data (optional)
459  * @change: called during validation (optional)
460  * @match: called during ematch tree evaluation, must return 1/0
461  * @destroy: called during destroyage (optional)
462  * @dump: called during dumping process (optional)
463  * @owner: owner, must be set to THIS_MODULE
464  * @link: link to previous/next ematch module (internal use)
465  */
466 struct tcf_ematch_ops {
467 	int			kind;
468 	int			datalen;
469 	int			(*change)(struct net *net, void *,
470 					  int, struct tcf_ematch *);
471 	int			(*match)(struct sk_buff *, struct tcf_ematch *,
472 					 struct tcf_pkt_info *);
473 	void			(*destroy)(struct tcf_ematch *);
474 	int			(*dump)(struct sk_buff *, struct tcf_ematch *);
475 	struct module		*owner;
476 	struct list_head	link;
477 };
478 
479 int tcf_em_register(struct tcf_ematch_ops *);
480 void tcf_em_unregister(struct tcf_ematch_ops *);
481 int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
482 			 struct tcf_ematch_tree *);
483 void tcf_em_tree_destroy(struct tcf_ematch_tree *);
484 int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
485 int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
486 			struct tcf_pkt_info *);
487 
488 /**
489  * tcf_em_tree_match - evaulate an ematch tree
490  *
491  * @skb: socket buffer of the packet in question
492  * @tree: ematch tree to be used for evaluation
493  * @info: packet information examined by classifier
494  *
495  * This function matches @skb against the ematch tree in @tree by going
496  * through all ematches respecting their logic relations returning
497  * as soon as the result is obvious.
498  *
499  * Returns 1 if the ematch tree as-one matches, no ematches are configured
500  * or ematch is not enabled in the kernel, otherwise 0 is returned.
501  */
502 static inline int tcf_em_tree_match(struct sk_buff *skb,
503 				    struct tcf_ematch_tree *tree,
504 				    struct tcf_pkt_info *info)
505 {
506 	if (tree->hdr.nmatches)
507 		return __tcf_em_tree_match(skb, tree, info);
508 	else
509 		return 1;
510 }
511 
512 #define MODULE_ALIAS_TCF_EMATCH(kind)	MODULE_ALIAS("ematch-kind-" __stringify(kind))
513 
514 #else /* CONFIG_NET_EMATCH */
515 
516 struct tcf_ematch_tree {
517 };
518 
519 #define tcf_em_tree_validate(tp, tb, t) ((void)(t), 0)
520 #define tcf_em_tree_destroy(t) do { (void)(t); } while(0)
521 #define tcf_em_tree_dump(skb, t, tlv) (0)
522 #define tcf_em_tree_match(skb, t, info) ((void)(info), 1)
523 
524 #endif /* CONFIG_NET_EMATCH */
525 
526 static inline unsigned char * tcf_get_base_ptr(struct sk_buff *skb, int layer)
527 {
528 	switch (layer) {
529 		case TCF_LAYER_LINK:
530 			return skb_mac_header(skb);
531 		case TCF_LAYER_NETWORK:
532 			return skb_network_header(skb);
533 		case TCF_LAYER_TRANSPORT:
534 			return skb_transport_header(skb);
535 	}
536 
537 	return NULL;
538 }
539 
540 static inline int tcf_valid_offset(const struct sk_buff *skb,
541 				   const unsigned char *ptr, const int len)
542 {
543 	return likely((ptr + len) <= skb_tail_pointer(skb) &&
544 		      ptr >= skb->head &&
545 		      (ptr <= (ptr + len)));
546 }
547 
548 static inline int
549 tcf_change_indev(struct net *net, struct nlattr *indev_tlv,
550 		 struct netlink_ext_ack *extack)
551 {
552 	char indev[IFNAMSIZ];
553 	struct net_device *dev;
554 
555 	if (nla_strscpy(indev, indev_tlv, IFNAMSIZ) < 0) {
556 		NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
557 				    "Interface name too long");
558 		return -EINVAL;
559 	}
560 	dev = __dev_get_by_name(net, indev);
561 	if (!dev) {
562 		NL_SET_ERR_MSG_ATTR(extack, indev_tlv,
563 				    "Network device not found");
564 		return -ENODEV;
565 	}
566 	return dev->ifindex;
567 }
568 
569 static inline bool
570 tcf_match_indev(struct sk_buff *skb, int ifindex)
571 {
572 	if (!ifindex)
573 		return true;
574 	if  (!skb->skb_iif)
575 		return false;
576 	return ifindex == skb->skb_iif;
577 }
578 
579 int tc_setup_offload_action(struct flow_action *flow_action,
580 			    const struct tcf_exts *exts,
581 			    struct netlink_ext_ack *extack);
582 void tc_cleanup_offload_action(struct flow_action *flow_action);
583 int tc_setup_action(struct flow_action *flow_action,
584 		    struct tc_action *actions[],
585 		    u32 miss_cookie_base,
586 		    struct netlink_ext_ack *extack);
587 
588 int tc_setup_cb_call(struct tcf_block *block, enum tc_setup_type type,
589 		     void *type_data, bool err_stop, bool rtnl_held);
590 int tc_setup_cb_add(struct tcf_block *block, struct tcf_proto *tp,
591 		    enum tc_setup_type type, void *type_data, bool err_stop,
592 		    u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
593 int tc_setup_cb_replace(struct tcf_block *block, struct tcf_proto *tp,
594 			enum tc_setup_type type, void *type_data, bool err_stop,
595 			u32 *old_flags, unsigned int *old_in_hw_count,
596 			u32 *new_flags, unsigned int *new_in_hw_count,
597 			bool rtnl_held);
598 int tc_setup_cb_destroy(struct tcf_block *block, struct tcf_proto *tp,
599 			enum tc_setup_type type, void *type_data, bool err_stop,
600 			u32 *flags, unsigned int *in_hw_count, bool rtnl_held);
601 int tc_setup_cb_reoffload(struct tcf_block *block, struct tcf_proto *tp,
602 			  bool add, flow_setup_cb_t *cb,
603 			  enum tc_setup_type type, void *type_data,
604 			  void *cb_priv, u32 *flags, unsigned int *in_hw_count);
605 unsigned int tcf_exts_num_actions(struct tcf_exts *exts);
606 
607 #ifdef CONFIG_NET_CLS_ACT
608 int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
609 		    enum flow_block_binder_type binder_type,
610 		    struct nlattr *block_index_attr,
611 		    struct netlink_ext_ack *extack);
612 void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
613 int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
614 			       struct netlink_ext_ack *extack);
615 struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
616 				  struct sk_buff **to_free, int *ret);
617 int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
618 #else
619 static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
620 				  enum flow_block_binder_type binder_type,
621 				  struct nlattr *block_index_attr,
622 				  struct netlink_ext_ack *extack)
623 {
624 	return 0;
625 }
626 
627 static inline void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch)
628 {
629 }
630 
631 static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
632 					     struct netlink_ext_ack *extack)
633 {
634 	return 0;
635 }
636 
637 static inline struct sk_buff *
638 tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
639 		  struct sk_buff **to_free, int *ret)
640 {
641 	return skb;
642 }
643 
644 static inline int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe)
645 {
646 	return 0;
647 }
648 #endif
649 
650 struct tc_cls_u32_knode {
651 	struct tcf_exts *exts;
652 	struct tcf_result *res;
653 	struct tc_u32_sel *sel;
654 	u32 handle;
655 	u32 val;
656 	u32 mask;
657 	u32 link_handle;
658 	u8 fshift;
659 };
660 
661 struct tc_cls_u32_hnode {
662 	u32 handle;
663 	u32 prio;
664 	unsigned int divisor;
665 };
666 
667 enum tc_clsu32_command {
668 	TC_CLSU32_NEW_KNODE,
669 	TC_CLSU32_REPLACE_KNODE,
670 	TC_CLSU32_DELETE_KNODE,
671 	TC_CLSU32_NEW_HNODE,
672 	TC_CLSU32_REPLACE_HNODE,
673 	TC_CLSU32_DELETE_HNODE,
674 };
675 
676 struct tc_cls_u32_offload {
677 	struct flow_cls_common_offload common;
678 	/* knode values */
679 	enum tc_clsu32_command command;
680 	union {
681 		struct tc_cls_u32_knode knode;
682 		struct tc_cls_u32_hnode hnode;
683 	};
684 };
685 
686 static inline bool tc_can_offload(const struct net_device *dev)
687 {
688 	return dev->features & NETIF_F_HW_TC;
689 }
690 
691 static inline bool tc_can_offload_extack(const struct net_device *dev,
692 					 struct netlink_ext_ack *extack)
693 {
694 	bool can = tc_can_offload(dev);
695 
696 	if (!can)
697 		NL_SET_ERR_MSG(extack, "TC offload is disabled on net device");
698 
699 	return can;
700 }
701 
702 static inline bool
703 tc_cls_can_offload_and_chain0(const struct net_device *dev,
704 			      struct flow_cls_common_offload *common)
705 {
706 	if (!tc_can_offload_extack(dev, common->extack))
707 		return false;
708 	if (common->chain_index) {
709 		NL_SET_ERR_MSG(common->extack,
710 			       "Driver supports only offload of chain 0");
711 		return false;
712 	}
713 	return true;
714 }
715 
716 static inline bool tc_skip_hw(u32 flags)
717 {
718 	return (flags & TCA_CLS_FLAGS_SKIP_HW) ? true : false;
719 }
720 
721 static inline bool tc_skip_sw(u32 flags)
722 {
723 	return (flags & TCA_CLS_FLAGS_SKIP_SW) ? true : false;
724 }
725 
726 /* SKIP_HW and SKIP_SW are mutually exclusive flags. */
727 static inline bool tc_flags_valid(u32 flags)
728 {
729 	if (flags & ~(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW |
730 		      TCA_CLS_FLAGS_VERBOSE))
731 		return false;
732 
733 	flags &= TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW;
734 	if (!(flags ^ (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)))
735 		return false;
736 
737 	return true;
738 }
739 
740 static inline bool tc_in_hw(u32 flags)
741 {
742 	return (flags & TCA_CLS_FLAGS_IN_HW) ? true : false;
743 }
744 
745 static inline void
746 tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common,
747 			   const struct tcf_proto *tp, u32 flags,
748 			   struct netlink_ext_ack *extack)
749 {
750 	cls_common->chain_index = tp->chain->index;
751 	cls_common->protocol = tp->protocol;
752 	cls_common->prio = tp->prio >> 16;
753 	if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE)
754 		cls_common->extack = extack;
755 }
756 
757 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
758 static inline struct tc_skb_ext *tc_skb_ext_alloc(struct sk_buff *skb)
759 {
760 	struct tc_skb_ext *tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
761 
762 	if (tc_skb_ext)
763 		memset(tc_skb_ext, 0, sizeof(*tc_skb_ext));
764 	return tc_skb_ext;
765 }
766 #endif
767 
768 enum tc_matchall_command {
769 	TC_CLSMATCHALL_REPLACE,
770 	TC_CLSMATCHALL_DESTROY,
771 	TC_CLSMATCHALL_STATS,
772 };
773 
774 struct tc_cls_matchall_offload {
775 	struct flow_cls_common_offload common;
776 	enum tc_matchall_command command;
777 	struct flow_rule *rule;
778 	struct flow_stats stats;
779 	bool use_act_stats;
780 	unsigned long cookie;
781 };
782 
783 enum tc_clsbpf_command {
784 	TC_CLSBPF_OFFLOAD,
785 	TC_CLSBPF_STATS,
786 };
787 
788 struct tc_cls_bpf_offload {
789 	struct flow_cls_common_offload common;
790 	enum tc_clsbpf_command command;
791 	struct tcf_exts *exts;
792 	struct bpf_prog *prog;
793 	struct bpf_prog *oldprog;
794 	const char *name;
795 	bool exts_integrated;
796 };
797 
798 /* This structure holds cookie structure that is passed from user
799  * to the kernel for actions and classifiers
800  */
801 struct tc_cookie {
802 	u8  *data;
803 	u32 len;
804 	struct rcu_head rcu;
805 };
806 
807 struct tc_qopt_offload_stats {
808 	struct gnet_stats_basic_sync *bstats;
809 	struct gnet_stats_queue *qstats;
810 };
811 
812 enum tc_mq_command {
813 	TC_MQ_CREATE,
814 	TC_MQ_DESTROY,
815 	TC_MQ_STATS,
816 	TC_MQ_GRAFT,
817 };
818 
819 struct tc_mq_opt_offload_graft_params {
820 	unsigned long queue;
821 	u32 child_handle;
822 };
823 
824 struct tc_mq_qopt_offload {
825 	enum tc_mq_command command;
826 	u32 handle;
827 	union {
828 		struct tc_qopt_offload_stats stats;
829 		struct tc_mq_opt_offload_graft_params graft_params;
830 	};
831 };
832 
833 enum tc_htb_command {
834 	/* Root */
835 	TC_HTB_CREATE, /* Initialize HTB offload. */
836 	TC_HTB_DESTROY, /* Destroy HTB offload. */
837 
838 	/* Classes */
839 	/* Allocate qid and create leaf. */
840 	TC_HTB_LEAF_ALLOC_QUEUE,
841 	/* Convert leaf to inner, preserve and return qid, create new leaf. */
842 	TC_HTB_LEAF_TO_INNER,
843 	/* Delete leaf, while siblings remain. */
844 	TC_HTB_LEAF_DEL,
845 	/* Delete leaf, convert parent to leaf, preserving qid. */
846 	TC_HTB_LEAF_DEL_LAST,
847 	/* TC_HTB_LEAF_DEL_LAST, but delete driver data on hardware errors. */
848 	TC_HTB_LEAF_DEL_LAST_FORCE,
849 	/* Modify parameters of a node. */
850 	TC_HTB_NODE_MODIFY,
851 
852 	/* Class qdisc */
853 	TC_HTB_LEAF_QUERY_QUEUE, /* Query qid by classid. */
854 };
855 
856 struct tc_htb_qopt_offload {
857 	struct netlink_ext_ack *extack;
858 	enum tc_htb_command command;
859 	u32 parent_classid;
860 	u16 classid;
861 	u16 qid;
862 	u32 quantum;
863 	u64 rate;
864 	u64 ceil;
865 	u8 prio;
866 };
867 
868 #define TC_HTB_CLASSID_ROOT U32_MAX
869 
870 enum tc_red_command {
871 	TC_RED_REPLACE,
872 	TC_RED_DESTROY,
873 	TC_RED_STATS,
874 	TC_RED_XSTATS,
875 	TC_RED_GRAFT,
876 };
877 
878 struct tc_red_qopt_offload_params {
879 	u32 min;
880 	u32 max;
881 	u32 probability;
882 	u32 limit;
883 	bool is_ecn;
884 	bool is_harddrop;
885 	bool is_nodrop;
886 	struct gnet_stats_queue *qstats;
887 };
888 
889 struct tc_red_qopt_offload {
890 	enum tc_red_command command;
891 	u32 handle;
892 	u32 parent;
893 	union {
894 		struct tc_red_qopt_offload_params set;
895 		struct tc_qopt_offload_stats stats;
896 		struct red_stats *xstats;
897 		u32 child_handle;
898 	};
899 };
900 
901 enum tc_gred_command {
902 	TC_GRED_REPLACE,
903 	TC_GRED_DESTROY,
904 	TC_GRED_STATS,
905 };
906 
907 struct tc_gred_vq_qopt_offload_params {
908 	bool present;
909 	u32 limit;
910 	u32 prio;
911 	u32 min;
912 	u32 max;
913 	bool is_ecn;
914 	bool is_harddrop;
915 	u32 probability;
916 	/* Only need backlog, see struct tc_prio_qopt_offload_params */
917 	u32 *backlog;
918 };
919 
920 struct tc_gred_qopt_offload_params {
921 	bool grio_on;
922 	bool wred_on;
923 	unsigned int dp_cnt;
924 	unsigned int dp_def;
925 	struct gnet_stats_queue *qstats;
926 	struct tc_gred_vq_qopt_offload_params tab[MAX_DPs];
927 };
928 
929 struct tc_gred_qopt_offload_stats {
930 	struct gnet_stats_basic_sync bstats[MAX_DPs];
931 	struct gnet_stats_queue qstats[MAX_DPs];
932 	struct red_stats *xstats[MAX_DPs];
933 };
934 
935 struct tc_gred_qopt_offload {
936 	enum tc_gred_command command;
937 	u32 handle;
938 	u32 parent;
939 	union {
940 		struct tc_gred_qopt_offload_params set;
941 		struct tc_gred_qopt_offload_stats stats;
942 	};
943 };
944 
945 enum tc_prio_command {
946 	TC_PRIO_REPLACE,
947 	TC_PRIO_DESTROY,
948 	TC_PRIO_STATS,
949 	TC_PRIO_GRAFT,
950 };
951 
952 struct tc_prio_qopt_offload_params {
953 	int bands;
954 	u8 priomap[TC_PRIO_MAX + 1];
955 	/* At the point of un-offloading the Qdisc, the reported backlog and
956 	 * qlen need to be reduced by the portion that is in HW.
957 	 */
958 	struct gnet_stats_queue *qstats;
959 };
960 
961 struct tc_prio_qopt_offload_graft_params {
962 	u8 band;
963 	u32 child_handle;
964 };
965 
966 struct tc_prio_qopt_offload {
967 	enum tc_prio_command command;
968 	u32 handle;
969 	u32 parent;
970 	union {
971 		struct tc_prio_qopt_offload_params replace_params;
972 		struct tc_qopt_offload_stats stats;
973 		struct tc_prio_qopt_offload_graft_params graft_params;
974 	};
975 };
976 
977 enum tc_root_command {
978 	TC_ROOT_GRAFT,
979 };
980 
981 struct tc_root_qopt_offload {
982 	enum tc_root_command command;
983 	u32 handle;
984 	bool ingress;
985 };
986 
987 enum tc_ets_command {
988 	TC_ETS_REPLACE,
989 	TC_ETS_DESTROY,
990 	TC_ETS_STATS,
991 	TC_ETS_GRAFT,
992 };
993 
994 struct tc_ets_qopt_offload_replace_params {
995 	unsigned int bands;
996 	u8 priomap[TC_PRIO_MAX + 1];
997 	unsigned int quanta[TCQ_ETS_MAX_BANDS];	/* 0 for strict bands. */
998 	unsigned int weights[TCQ_ETS_MAX_BANDS];
999 	struct gnet_stats_queue *qstats;
1000 };
1001 
1002 struct tc_ets_qopt_offload_graft_params {
1003 	u8 band;
1004 	u32 child_handle;
1005 };
1006 
1007 struct tc_ets_qopt_offload {
1008 	enum tc_ets_command command;
1009 	u32 handle;
1010 	u32 parent;
1011 	union {
1012 		struct tc_ets_qopt_offload_replace_params replace_params;
1013 		struct tc_qopt_offload_stats stats;
1014 		struct tc_ets_qopt_offload_graft_params graft_params;
1015 	};
1016 };
1017 
1018 enum tc_tbf_command {
1019 	TC_TBF_REPLACE,
1020 	TC_TBF_DESTROY,
1021 	TC_TBF_STATS,
1022 	TC_TBF_GRAFT,
1023 };
1024 
1025 struct tc_tbf_qopt_offload_replace_params {
1026 	struct psched_ratecfg rate;
1027 	u32 max_size;
1028 	struct gnet_stats_queue *qstats;
1029 };
1030 
1031 struct tc_tbf_qopt_offload {
1032 	enum tc_tbf_command command;
1033 	u32 handle;
1034 	u32 parent;
1035 	union {
1036 		struct tc_tbf_qopt_offload_replace_params replace_params;
1037 		struct tc_qopt_offload_stats stats;
1038 		u32 child_handle;
1039 	};
1040 };
1041 
1042 enum tc_fifo_command {
1043 	TC_FIFO_REPLACE,
1044 	TC_FIFO_DESTROY,
1045 	TC_FIFO_STATS,
1046 };
1047 
1048 struct tc_fifo_qopt_offload {
1049 	enum tc_fifo_command command;
1050 	u32 handle;
1051 	u32 parent;
1052 	union {
1053 		struct tc_qopt_offload_stats stats;
1054 	};
1055 };
1056 
1057 #ifdef CONFIG_NET_CLS_ACT
1058 DECLARE_STATIC_KEY_FALSE(tc_skb_ext_tc);
1059 void tc_skb_ext_tc_enable(void);
1060 void tc_skb_ext_tc_disable(void);
1061 #define tc_skb_ext_tc_enabled() static_branch_unlikely(&tc_skb_ext_tc)
1062 #else /* CONFIG_NET_CLS_ACT */
1063 static inline void tc_skb_ext_tc_enable(void) { }
1064 static inline void tc_skb_ext_tc_disable(void) { }
1065 #define tc_skb_ext_tc_enabled() false
1066 #endif
1067 
1068 #endif
1069