xref: /linux/drivers/net/ethernet/marvell/prestera/prestera_flower.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
3 
4 #include "prestera.h"
5 #include "prestera_acl.h"
6 #include "prestera_flow.h"
7 #include "prestera_flower.h"
8 #include "prestera_matchall.h"
9 
10 struct prestera_flower_template {
11 	struct prestera_acl_ruleset *ruleset;
12 	struct list_head list;
13 	u32 chain_index;
14 };
15 
16 static void
prestera_flower_template_free(struct prestera_flower_template * template)17 prestera_flower_template_free(struct prestera_flower_template *template)
18 {
19 	prestera_acl_ruleset_put(template->ruleset);
20 	list_del(&template->list);
21 	kfree(template);
22 }
23 
prestera_flower_template_cleanup(struct prestera_flow_block * block)24 void prestera_flower_template_cleanup(struct prestera_flow_block *block)
25 {
26 	struct prestera_flower_template *template, *tmp;
27 
28 	/* put the reference to all rulesets kept in tmpl create */
29 	list_for_each_entry_safe(template, tmp, &block->template_list, list)
30 		prestera_flower_template_free(template);
31 }
32 
33 static int
prestera_flower_parse_goto_action(struct prestera_flow_block * block,struct prestera_acl_rule * rule,u32 chain_index,const struct flow_action_entry * act)34 prestera_flower_parse_goto_action(struct prestera_flow_block *block,
35 				  struct prestera_acl_rule *rule,
36 				  u32 chain_index,
37 				  const struct flow_action_entry *act)
38 {
39 	struct prestera_acl_ruleset *ruleset;
40 
41 	if (act->chain_index <= chain_index)
42 		/* we can jump only forward */
43 		return -EINVAL;
44 
45 	if (rule->re_arg.jump.valid)
46 		return -EEXIST;
47 
48 	ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
49 					   act->chain_index);
50 	if (IS_ERR(ruleset))
51 		return PTR_ERR(ruleset);
52 
53 	rule->re_arg.jump.valid = 1;
54 	rule->re_arg.jump.i.index = prestera_acl_ruleset_index_get(ruleset);
55 
56 	rule->jump_ruleset = ruleset;
57 
58 	return 0;
59 }
60 
prestera_flower_parse_actions(struct prestera_flow_block * block,struct prestera_acl_rule * rule,struct flow_action * flow_action,u32 chain_index,struct netlink_ext_ack * extack)61 static int prestera_flower_parse_actions(struct prestera_flow_block *block,
62 					 struct prestera_acl_rule *rule,
63 					 struct flow_action *flow_action,
64 					 u32 chain_index,
65 					 struct netlink_ext_ack *extack)
66 {
67 	const struct flow_action_entry *act;
68 	int err, i;
69 
70 	/* whole struct (rule->re_arg) must be initialized with 0 */
71 	if (!flow_action_has_entries(flow_action))
72 		return 0;
73 
74 	if (!flow_action_mixed_hw_stats_check(flow_action, extack))
75 		return -EOPNOTSUPP;
76 
77 	act = flow_action_first_entry_get(flow_action);
78 	if (act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED) {
79 		/* Nothing to do */
80 	} else if (act->hw_stats & FLOW_ACTION_HW_STATS_DELAYED) {
81 		/* setup counter first */
82 		rule->re_arg.count.valid = true;
83 		err = prestera_acl_chain_to_client(chain_index, block->ingress,
84 						   &rule->re_arg.count.client);
85 		if (err)
86 			return err;
87 	} else {
88 		NL_SET_ERR_MSG_MOD(extack, "Unsupported action HW stats type");
89 		return -EOPNOTSUPP;
90 	}
91 
92 	flow_action_for_each(i, act, flow_action) {
93 		switch (act->id) {
94 		case FLOW_ACTION_ACCEPT:
95 			if (rule->re_arg.accept.valid)
96 				return -EEXIST;
97 
98 			rule->re_arg.accept.valid = 1;
99 			break;
100 		case FLOW_ACTION_DROP:
101 			if (rule->re_arg.drop.valid)
102 				return -EEXIST;
103 
104 			rule->re_arg.drop.valid = 1;
105 			break;
106 		case FLOW_ACTION_TRAP:
107 			if (rule->re_arg.trap.valid)
108 				return -EEXIST;
109 
110 			rule->re_arg.trap.valid = 1;
111 			break;
112 		case FLOW_ACTION_POLICE:
113 			if (rule->re_arg.police.valid)
114 				return -EEXIST;
115 
116 			rule->re_arg.police.valid = 1;
117 			rule->re_arg.police.rate =
118 				act->police.rate_bytes_ps;
119 			rule->re_arg.police.burst = act->police.burst;
120 			rule->re_arg.police.ingress = block->ingress;
121 			break;
122 		case FLOW_ACTION_GOTO:
123 			err = prestera_flower_parse_goto_action(block, rule,
124 								chain_index,
125 								act);
126 			if (err)
127 				return err;
128 			break;
129 		default:
130 			NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
131 			pr_err("Unsupported action\n");
132 			return -EOPNOTSUPP;
133 		}
134 	}
135 
136 	return 0;
137 }
138 
prestera_flower_parse_meta(struct prestera_acl_rule * rule,struct flow_cls_offload * f,struct prestera_flow_block * block)139 static int prestera_flower_parse_meta(struct prestera_acl_rule *rule,
140 				      struct flow_cls_offload *f,
141 				      struct prestera_flow_block *block)
142 {
143 	struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
144 	struct prestera_acl_match *r_match = &rule->re_key.match;
145 	struct prestera_port *port;
146 	struct net_device *ingress_dev;
147 	struct flow_match_meta match;
148 	__be16 key, mask;
149 
150 	flow_rule_match_meta(f_rule, &match);
151 
152 	if (match.mask->l2_miss) {
153 		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on \"l2_miss\"");
154 		return -EOPNOTSUPP;
155 	}
156 
157 	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
158 		NL_SET_ERR_MSG_MOD(f->common.extack,
159 				   "Unsupported ingress ifindex mask");
160 		return -EINVAL;
161 	}
162 
163 	ingress_dev = __dev_get_by_index(block->net,
164 					 match.key->ingress_ifindex);
165 	if (!ingress_dev) {
166 		NL_SET_ERR_MSG_MOD(f->common.extack,
167 				   "Can't find specified ingress port to match on");
168 		return -EINVAL;
169 	}
170 
171 	if (!prestera_netdev_check(ingress_dev)) {
172 		NL_SET_ERR_MSG_MOD(f->common.extack,
173 				   "Can't match on switchdev ingress port");
174 		return -EINVAL;
175 	}
176 	port = netdev_priv(ingress_dev);
177 
178 	mask = htons(0x1FFF << 3);
179 	key = htons(port->hw_id << 3);
180 	rule_match_set(r_match->key, SYS_PORT, key);
181 	rule_match_set(r_match->mask, SYS_PORT, mask);
182 
183 	mask = htons(0x3FF);
184 	key = htons(port->dev_id);
185 	rule_match_set(r_match->key, SYS_DEV, key);
186 	rule_match_set(r_match->mask, SYS_DEV, mask);
187 
188 	return 0;
189 }
190 
prestera_flower_parse(struct prestera_flow_block * block,struct prestera_acl_rule * rule,struct flow_cls_offload * f)191 static int prestera_flower_parse(struct prestera_flow_block *block,
192 				 struct prestera_acl_rule *rule,
193 				 struct flow_cls_offload *f)
194 {
195 	struct flow_rule *f_rule = flow_cls_offload_flow_rule(f);
196 	struct flow_dissector *dissector = f_rule->match.dissector;
197 	struct prestera_acl_match *r_match = &rule->re_key.match;
198 	__be16 n_proto_mask = 0;
199 	__be16 n_proto_key = 0;
200 	u16 addr_type = 0;
201 	u8 ip_proto = 0;
202 	int err;
203 
204 	if (dissector->used_keys &
205 	    ~(BIT_ULL(FLOW_DISSECTOR_KEY_META) |
206 	      BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
207 	      BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
208 	      BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
209 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
210 	      BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
211 	      BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
212 	      BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
213 	      BIT_ULL(FLOW_DISSECTOR_KEY_PORTS_RANGE) |
214 	      BIT_ULL(FLOW_DISSECTOR_KEY_VLAN))) {
215 		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
216 		return -EOPNOTSUPP;
217 	}
218 
219 	prestera_acl_rule_priority_set(rule, f->common.prio);
220 
221 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_META)) {
222 		err = prestera_flower_parse_meta(rule, f, block);
223 		if (err)
224 			return err;
225 	}
226 
227 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_CONTROL)) {
228 		struct flow_match_control match;
229 
230 		flow_rule_match_control(f_rule, &match);
231 		addr_type = match.key->addr_type;
232 
233 		if (flow_rule_has_control_flags(match.mask->flags,
234 						f->common.extack))
235 			return -EOPNOTSUPP;
236 	}
237 
238 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) {
239 		struct flow_match_basic match;
240 
241 		flow_rule_match_basic(f_rule, &match);
242 		n_proto_key = match.key->n_proto;
243 		n_proto_mask = match.mask->n_proto;
244 
245 		if (ntohs(match.key->n_proto) == ETH_P_ALL) {
246 			n_proto_key = 0;
247 			n_proto_mask = 0;
248 		}
249 
250 		rule_match_set(r_match->key, ETH_TYPE, n_proto_key);
251 		rule_match_set(r_match->mask, ETH_TYPE, n_proto_mask);
252 
253 		rule_match_set(r_match->key, IP_PROTO, match.key->ip_proto);
254 		rule_match_set(r_match->mask, IP_PROTO, match.mask->ip_proto);
255 		ip_proto = match.key->ip_proto;
256 	}
257 
258 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
259 		struct flow_match_eth_addrs match;
260 
261 		flow_rule_match_eth_addrs(f_rule, &match);
262 
263 		/* DA key, mask */
264 		rule_match_set_n(r_match->key,
265 				 ETH_DMAC_0, &match.key->dst[0], 4);
266 		rule_match_set_n(r_match->key,
267 				 ETH_DMAC_1, &match.key->dst[4], 2);
268 
269 		rule_match_set_n(r_match->mask,
270 				 ETH_DMAC_0, &match.mask->dst[0], 4);
271 		rule_match_set_n(r_match->mask,
272 				 ETH_DMAC_1, &match.mask->dst[4], 2);
273 
274 		/* SA key, mask */
275 		rule_match_set_n(r_match->key,
276 				 ETH_SMAC_0, &match.key->src[0], 4);
277 		rule_match_set_n(r_match->key,
278 				 ETH_SMAC_1, &match.key->src[4], 2);
279 
280 		rule_match_set_n(r_match->mask,
281 				 ETH_SMAC_0, &match.mask->src[0], 4);
282 		rule_match_set_n(r_match->mask,
283 				 ETH_SMAC_1, &match.mask->src[4], 2);
284 	}
285 
286 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
287 		struct flow_match_ipv4_addrs match;
288 
289 		flow_rule_match_ipv4_addrs(f_rule, &match);
290 
291 		rule_match_set(r_match->key, IP_SRC, match.key->src);
292 		rule_match_set(r_match->mask, IP_SRC, match.mask->src);
293 
294 		rule_match_set(r_match->key, IP_DST, match.key->dst);
295 		rule_match_set(r_match->mask, IP_DST, match.mask->dst);
296 	}
297 
298 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS)) {
299 		struct flow_match_ports match;
300 
301 		if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
302 			NL_SET_ERR_MSG_MOD
303 			    (f->common.extack,
304 			     "Only UDP and TCP keys are supported");
305 			return -EINVAL;
306 		}
307 
308 		flow_rule_match_ports(f_rule, &match);
309 
310 		rule_match_set(r_match->key, L4_PORT_SRC, match.key->src);
311 		rule_match_set(r_match->mask, L4_PORT_SRC, match.mask->src);
312 
313 		rule_match_set(r_match->key, L4_PORT_DST, match.key->dst);
314 		rule_match_set(r_match->mask, L4_PORT_DST, match.mask->dst);
315 	}
316 
317 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_PORTS_RANGE)) {
318 		struct flow_match_ports_range match;
319 		__be32 tp_key, tp_mask;
320 
321 		flow_rule_match_ports_range(f_rule, &match);
322 
323 		/* src port range (min, max) */
324 		tp_key = htonl(ntohs(match.key->tp_min.src) |
325 			       (ntohs(match.key->tp_max.src) << 16));
326 		tp_mask = htonl(ntohs(match.mask->tp_min.src) |
327 				(ntohs(match.mask->tp_max.src) << 16));
328 		rule_match_set(r_match->key, L4_PORT_RANGE_SRC, tp_key);
329 		rule_match_set(r_match->mask, L4_PORT_RANGE_SRC, tp_mask);
330 
331 		/* dst port range (min, max) */
332 		tp_key = htonl(ntohs(match.key->tp_min.dst) |
333 			       (ntohs(match.key->tp_max.dst) << 16));
334 		tp_mask = htonl(ntohs(match.mask->tp_min.dst) |
335 				(ntohs(match.mask->tp_max.dst) << 16));
336 		rule_match_set(r_match->key, L4_PORT_RANGE_DST, tp_key);
337 		rule_match_set(r_match->mask, L4_PORT_RANGE_DST, tp_mask);
338 	}
339 
340 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_VLAN)) {
341 		struct flow_match_vlan match;
342 
343 		flow_rule_match_vlan(f_rule, &match);
344 
345 		if (match.mask->vlan_id != 0) {
346 			__be16 key = cpu_to_be16(match.key->vlan_id);
347 			__be16 mask = cpu_to_be16(match.mask->vlan_id);
348 
349 			rule_match_set(r_match->key, VLAN_ID, key);
350 			rule_match_set(r_match->mask, VLAN_ID, mask);
351 		}
352 
353 		rule_match_set(r_match->key, VLAN_TPID, match.key->vlan_tpid);
354 		rule_match_set(r_match->mask, VLAN_TPID, match.mask->vlan_tpid);
355 	}
356 
357 	if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_ICMP)) {
358 		struct flow_match_icmp match;
359 
360 		flow_rule_match_icmp(f_rule, &match);
361 
362 		rule_match_set(r_match->key, ICMP_TYPE, match.key->type);
363 		rule_match_set(r_match->mask, ICMP_TYPE, match.mask->type);
364 
365 		rule_match_set(r_match->key, ICMP_CODE, match.key->code);
366 		rule_match_set(r_match->mask, ICMP_CODE, match.mask->code);
367 	}
368 
369 	return prestera_flower_parse_actions(block, rule, &f->rule->action,
370 					     f->common.chain_index,
371 					     f->common.extack);
372 }
373 
prestera_flower_prio_check(struct prestera_flow_block * block,struct flow_cls_offload * f)374 static int prestera_flower_prio_check(struct prestera_flow_block *block,
375 				      struct flow_cls_offload *f)
376 {
377 	u32 mall_prio_min;
378 	u32 mall_prio_max;
379 	int err;
380 
381 	err = prestera_mall_prio_get(block, &mall_prio_min, &mall_prio_max);
382 	if (err == -ENOENT)
383 		/* No matchall filters installed on this chain. */
384 		return 0;
385 
386 	if (err) {
387 		NL_SET_ERR_MSG(f->common.extack, "Failed to get matchall priorities");
388 		return err;
389 	}
390 
391 	if (f->common.prio <= mall_prio_max && block->ingress) {
392 		NL_SET_ERR_MSG(f->common.extack,
393 			       "Failed to add in front of existing matchall rules");
394 		return -EOPNOTSUPP;
395 	}
396 	if (f->common.prio >= mall_prio_min && !block->ingress) {
397 		NL_SET_ERR_MSG(f->common.extack, "Failed to add behind of existing matchall rules");
398 		return -EOPNOTSUPP;
399 	}
400 
401 	return 0;
402 }
403 
prestera_flower_prio_get(struct prestera_flow_block * block,u32 chain_index,u32 * prio_min,u32 * prio_max)404 int prestera_flower_prio_get(struct prestera_flow_block *block, u32 chain_index,
405 			     u32 *prio_min, u32 *prio_max)
406 {
407 	struct prestera_acl_ruleset *ruleset;
408 
409 	ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block, chain_index);
410 	if (IS_ERR(ruleset))
411 		return PTR_ERR(ruleset);
412 
413 	prestera_acl_ruleset_prio_get(ruleset, prio_min, prio_max);
414 	return 0;
415 }
416 
prestera_flower_replace(struct prestera_flow_block * block,struct flow_cls_offload * f)417 int prestera_flower_replace(struct prestera_flow_block *block,
418 			    struct flow_cls_offload *f)
419 {
420 	struct prestera_acl_ruleset *ruleset;
421 	struct prestera_acl *acl = block->sw->acl;
422 	struct prestera_acl_rule *rule;
423 	int err;
424 
425 	err = prestera_flower_prio_check(block, f);
426 	if (err)
427 		return err;
428 
429 	ruleset = prestera_acl_ruleset_get(acl, block, f->common.chain_index);
430 	if (IS_ERR(ruleset))
431 		return PTR_ERR(ruleset);
432 
433 	/* increments the ruleset reference */
434 	rule = prestera_acl_rule_create(ruleset, f->cookie,
435 					f->common.chain_index);
436 	if (IS_ERR(rule)) {
437 		err = PTR_ERR(rule);
438 		goto err_rule_create;
439 	}
440 
441 	err = prestera_flower_parse(block, rule, f);
442 	if (err)
443 		goto err_rule_add;
444 
445 	if (!prestera_acl_ruleset_is_offload(ruleset)) {
446 		err = prestera_acl_ruleset_offload(ruleset);
447 		if (err)
448 			goto err_ruleset_offload;
449 	}
450 
451 	err = prestera_acl_rule_add(block->sw, rule);
452 	if (err)
453 		goto err_rule_add;
454 
455 	prestera_acl_ruleset_put(ruleset);
456 	return 0;
457 
458 err_ruleset_offload:
459 err_rule_add:
460 	prestera_acl_rule_destroy(rule);
461 err_rule_create:
462 	prestera_acl_ruleset_put(ruleset);
463 	return err;
464 }
465 
prestera_flower_destroy(struct prestera_flow_block * block,struct flow_cls_offload * f)466 void prestera_flower_destroy(struct prestera_flow_block *block,
467 			     struct flow_cls_offload *f)
468 {
469 	struct prestera_acl_ruleset *ruleset;
470 	struct prestera_acl_rule *rule;
471 
472 	ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
473 					      f->common.chain_index);
474 	if (IS_ERR(ruleset))
475 		return;
476 
477 	rule = prestera_acl_rule_lookup(ruleset, f->cookie);
478 	if (rule) {
479 		prestera_acl_rule_del(block->sw, rule);
480 		prestera_acl_rule_destroy(rule);
481 	}
482 	prestera_acl_ruleset_put(ruleset);
483 }
484 
prestera_flower_tmplt_create(struct prestera_flow_block * block,struct flow_cls_offload * f)485 int prestera_flower_tmplt_create(struct prestera_flow_block *block,
486 				 struct flow_cls_offload *f)
487 {
488 	struct prestera_flower_template *template;
489 	struct prestera_acl_ruleset *ruleset;
490 	struct prestera_acl_rule rule;
491 	int err;
492 
493 	memset(&rule, 0, sizeof(rule));
494 	err = prestera_flower_parse(block, &rule, f);
495 	if (err)
496 		return err;
497 
498 	template = kmalloc(sizeof(*template), GFP_KERNEL);
499 	if (!template) {
500 		err = -ENOMEM;
501 		goto err_malloc;
502 	}
503 
504 	prestera_acl_rule_keymask_pcl_id_set(&rule, 0);
505 	ruleset = prestera_acl_ruleset_get(block->sw->acl, block,
506 					   f->common.chain_index);
507 	if (IS_ERR_OR_NULL(ruleset)) {
508 		err = -EINVAL;
509 		goto err_ruleset_get;
510 	}
511 
512 	/* preserve keymask/template to this ruleset */
513 	err = prestera_acl_ruleset_keymask_set(ruleset, rule.re_key.match.mask);
514 	if (err)
515 		goto err_ruleset_keymask_set;
516 
517 	/* skip error, as it is not possible to reject template operation,
518 	 * so, keep the reference to the ruleset for rules to be added
519 	 * to that ruleset later. In case of offload fail, the ruleset
520 	 * will be offloaded again during adding a new rule. Also,
521 	 * unlikly possble that ruleset is already offloaded at this staage.
522 	 */
523 	prestera_acl_ruleset_offload(ruleset);
524 
525 	/* keep the reference to the ruleset */
526 	template->ruleset = ruleset;
527 	template->chain_index = f->common.chain_index;
528 	list_add_rcu(&template->list, &block->template_list);
529 	return 0;
530 
531 err_ruleset_keymask_set:
532 	prestera_acl_ruleset_put(ruleset);
533 err_ruleset_get:
534 	kfree(template);
535 err_malloc:
536 	NL_SET_ERR_MSG_MOD(f->common.extack, "Create chain template failed");
537 	return err;
538 }
539 
prestera_flower_tmplt_destroy(struct prestera_flow_block * block,struct flow_cls_offload * f)540 void prestera_flower_tmplt_destroy(struct prestera_flow_block *block,
541 				   struct flow_cls_offload *f)
542 {
543 	struct prestera_flower_template *template, *tmp;
544 
545 	list_for_each_entry_safe(template, tmp, &block->template_list, list)
546 		if (template->chain_index == f->common.chain_index) {
547 			/* put the reference to the ruleset kept in create */
548 			prestera_flower_template_free(template);
549 			return;
550 		}
551 }
552 
prestera_flower_stats(struct prestera_flow_block * block,struct flow_cls_offload * f)553 int prestera_flower_stats(struct prestera_flow_block *block,
554 			  struct flow_cls_offload *f)
555 {
556 	struct prestera_acl_ruleset *ruleset;
557 	struct prestera_acl_rule *rule;
558 	u64 packets;
559 	u64 lastuse;
560 	u64 bytes;
561 	int err;
562 
563 	ruleset = prestera_acl_ruleset_lookup(block->sw->acl, block,
564 					      f->common.chain_index);
565 	if (IS_ERR(ruleset))
566 		return PTR_ERR(ruleset);
567 
568 	rule = prestera_acl_rule_lookup(ruleset, f->cookie);
569 	if (!rule) {
570 		err = -EINVAL;
571 		goto err_rule_get_stats;
572 	}
573 
574 	err = prestera_acl_rule_get_stats(block->sw->acl, rule, &packets,
575 					  &bytes, &lastuse);
576 	if (err)
577 		goto err_rule_get_stats;
578 
579 	flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
580 			  FLOW_ACTION_HW_STATS_DELAYED);
581 
582 err_rule_get_stats:
583 	prestera_acl_ruleset_put(ruleset);
584 	return err;
585 }
586