xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c (revision 397692eab35cbbd83681880c6a2dbcdb9fd84386)
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/net_namespace.h>
8 #include <net/flow_dissector.h>
9 #include <net/pkt_cls.h>
10 #include <net/tc_act/tc_gact.h>
11 #include <net/tc_act/tc_mirred.h>
12 #include <net/tc_act/tc_vlan.h>
13 
14 #include "spectrum.h"
15 #include "core_acl_flex_keys.h"
16 
17 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
18 					 struct mlxsw_sp_acl_block *block,
19 					 struct mlxsw_sp_acl_rule_info *rulei,
20 					 struct flow_action *flow_action,
21 					 struct netlink_ext_ack *extack)
22 {
23 	const struct flow_action_entry *act;
24 	int mirror_act_count = 0;
25 	int err, i;
26 
27 	if (!flow_action_has_entries(flow_action))
28 		return 0;
29 
30 	/* Count action is inserted first */
31 	err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei, extack);
32 	if (err)
33 		return err;
34 
35 	flow_action_for_each(i, act, flow_action) {
36 		switch (act->id) {
37 		case FLOW_ACTION_ACCEPT:
38 			err = mlxsw_sp_acl_rulei_act_terminate(rulei);
39 			if (err) {
40 				NL_SET_ERR_MSG_MOD(extack, "Cannot append terminate action");
41 				return err;
42 			}
43 			break;
44 		case FLOW_ACTION_DROP: {
45 			bool ingress;
46 
47 			if (mlxsw_sp_acl_block_is_mixed_bound(block)) {
48 				NL_SET_ERR_MSG_MOD(extack, "Drop action is not supported when block is bound to ingress and egress");
49 				return -EOPNOTSUPP;
50 			}
51 			ingress = mlxsw_sp_acl_block_is_ingress_bound(block);
52 			err = mlxsw_sp_acl_rulei_act_drop(rulei, ingress,
53 							  act->cookie, extack);
54 			if (err) {
55 				NL_SET_ERR_MSG_MOD(extack, "Cannot append drop action");
56 				return err;
57 			}
58 
59 			/* Forbid block with this rulei to be bound
60 			 * to ingress/egress in future. Ingress rule is
61 			 * a blocker for egress and vice versa.
62 			 */
63 			if (ingress)
64 				rulei->egress_bind_blocker = 1;
65 			else
66 				rulei->ingress_bind_blocker = 1;
67 			}
68 			break;
69 		case FLOW_ACTION_TRAP:
70 			err = mlxsw_sp_acl_rulei_act_trap(rulei);
71 			if (err) {
72 				NL_SET_ERR_MSG_MOD(extack, "Cannot append trap action");
73 				return err;
74 			}
75 			break;
76 		case FLOW_ACTION_GOTO: {
77 			u32 chain_index = act->chain_index;
78 			struct mlxsw_sp_acl_ruleset *ruleset;
79 			u16 group_id;
80 
81 			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, block,
82 							      chain_index,
83 							      MLXSW_SP_ACL_PROFILE_FLOWER);
84 			if (IS_ERR(ruleset))
85 				return PTR_ERR(ruleset);
86 
87 			group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
88 			err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
89 			if (err) {
90 				NL_SET_ERR_MSG_MOD(extack, "Cannot append jump action");
91 				return err;
92 			}
93 			}
94 			break;
95 		case FLOW_ACTION_REDIRECT: {
96 			struct net_device *out_dev;
97 			struct mlxsw_sp_fid *fid;
98 			u16 fid_index;
99 
100 			if (mlxsw_sp_acl_block_is_egress_bound(block)) {
101 				NL_SET_ERR_MSG_MOD(extack, "Redirect action is not supported on egress");
102 				return -EOPNOTSUPP;
103 			}
104 
105 			/* Forbid block with this rulei to be bound
106 			 * to egress in future.
107 			 */
108 			rulei->egress_bind_blocker = 1;
109 
110 			fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
111 			fid_index = mlxsw_sp_fid_index(fid);
112 			err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
113 							     fid_index, extack);
114 			if (err)
115 				return err;
116 
117 			out_dev = act->dev;
118 			err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
119 							 out_dev, extack);
120 			if (err)
121 				return err;
122 			}
123 			break;
124 		case FLOW_ACTION_MIRRED: {
125 			struct net_device *out_dev = act->dev;
126 
127 			if (mirror_act_count++) {
128 				NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported");
129 				return -EOPNOTSUPP;
130 			}
131 
132 			err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
133 							    block, out_dev,
134 							    extack);
135 			if (err)
136 				return err;
137 			}
138 			break;
139 		case FLOW_ACTION_VLAN_MANGLE: {
140 			u16 proto = be16_to_cpu(act->vlan.proto);
141 			u8 prio = act->vlan.prio;
142 			u16 vid = act->vlan.vid;
143 
144 			return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
145 							   act->id, vid,
146 							   proto, prio, extack);
147 			}
148 		default:
149 			NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
150 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
151 			return -EOPNOTSUPP;
152 		}
153 	}
154 	return 0;
155 }
156 
157 static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
158 				      struct flow_cls_offload *f,
159 				      struct mlxsw_sp_acl_block *block)
160 {
161 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
162 	struct mlxsw_sp_port *mlxsw_sp_port;
163 	struct net_device *ingress_dev;
164 	struct flow_match_meta match;
165 
166 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
167 		return 0;
168 
169 	flow_rule_match_meta(rule, &match);
170 	if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
171 		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask");
172 		return -EINVAL;
173 	}
174 
175 	ingress_dev = __dev_get_by_index(block->net,
176 					 match.key->ingress_ifindex);
177 	if (!ingress_dev) {
178 		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on");
179 		return -EINVAL;
180 	}
181 
182 	if (!mlxsw_sp_port_dev_check(ingress_dev)) {
183 		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port");
184 		return -EINVAL;
185 	}
186 
187 	mlxsw_sp_port = netdev_priv(ingress_dev);
188 	if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
189 		NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device");
190 		return -EINVAL;
191 	}
192 
193 	mlxsw_sp_acl_rulei_keymask_u32(rulei,
194 				       MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
195 				       mlxsw_sp_port->local_port,
196 				       0xFFFFFFFF);
197 	return 0;
198 }
199 
200 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
201 				       struct flow_cls_offload *f)
202 {
203 	struct flow_match_ipv4_addrs match;
204 
205 	flow_rule_match_ipv4_addrs(f->rule, &match);
206 
207 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
208 				       (char *) &match.key->src,
209 				       (char *) &match.mask->src, 4);
210 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
211 				       (char *) &match.key->dst,
212 				       (char *) &match.mask->dst, 4);
213 }
214 
215 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
216 				       struct flow_cls_offload *f)
217 {
218 	struct flow_match_ipv6_addrs match;
219 
220 	flow_rule_match_ipv6_addrs(f->rule, &match);
221 
222 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127,
223 				       &match.key->src.s6_addr[0x0],
224 				       &match.mask->src.s6_addr[0x0], 4);
225 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95,
226 				       &match.key->src.s6_addr[0x4],
227 				       &match.mask->src.s6_addr[0x4], 4);
228 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63,
229 				       &match.key->src.s6_addr[0x8],
230 				       &match.mask->src.s6_addr[0x8], 4);
231 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31,
232 				       &match.key->src.s6_addr[0xC],
233 				       &match.mask->src.s6_addr[0xC], 4);
234 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127,
235 				       &match.key->dst.s6_addr[0x0],
236 				       &match.mask->dst.s6_addr[0x0], 4);
237 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95,
238 				       &match.key->dst.s6_addr[0x4],
239 				       &match.mask->dst.s6_addr[0x4], 4);
240 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63,
241 				       &match.key->dst.s6_addr[0x8],
242 				       &match.mask->dst.s6_addr[0x8], 4);
243 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31,
244 				       &match.key->dst.s6_addr[0xC],
245 				       &match.mask->dst.s6_addr[0xC], 4);
246 }
247 
248 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
249 				       struct mlxsw_sp_acl_rule_info *rulei,
250 				       struct flow_cls_offload *f,
251 				       u8 ip_proto)
252 {
253 	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
254 	struct flow_match_ports match;
255 
256 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS))
257 		return 0;
258 
259 	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
260 		NL_SET_ERR_MSG_MOD(f->common.extack, "Only UDP and TCP keys are supported");
261 		dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
262 		return -EINVAL;
263 	}
264 
265 	flow_rule_match_ports(rule, &match);
266 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
267 				       ntohs(match.key->dst),
268 				       ntohs(match.mask->dst));
269 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
270 				       ntohs(match.key->src),
271 				       ntohs(match.mask->src));
272 	return 0;
273 }
274 
275 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
276 				     struct mlxsw_sp_acl_rule_info *rulei,
277 				     struct flow_cls_offload *f,
278 				     u8 ip_proto)
279 {
280 	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
281 	struct flow_match_tcp match;
282 
283 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP))
284 		return 0;
285 
286 	if (ip_proto != IPPROTO_TCP) {
287 		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP keys supported only for TCP");
288 		dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
289 		return -EINVAL;
290 	}
291 
292 	flow_rule_match_tcp(rule, &match);
293 
294 	if (match.mask->flags & htons(0x0E00)) {
295 		NL_SET_ERR_MSG_MOD(f->common.extack, "TCP flags match not supported on reserved bits");
296 		dev_err(mlxsw_sp->bus_info->dev, "TCP flags match not supported on reserved bits\n");
297 		return -EINVAL;
298 	}
299 
300 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
301 				       ntohs(match.key->flags),
302 				       ntohs(match.mask->flags));
303 	return 0;
304 }
305 
306 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
307 				    struct mlxsw_sp_acl_rule_info *rulei,
308 				    struct flow_cls_offload *f,
309 				    u16 n_proto)
310 {
311 	const struct flow_rule *rule = flow_cls_offload_flow_rule(f);
312 	struct flow_match_ip match;
313 
314 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP))
315 		return 0;
316 
317 	if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
318 		NL_SET_ERR_MSG_MOD(f->common.extack, "IP keys supported only for IPv4/6");
319 		dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
320 		return -EINVAL;
321 	}
322 
323 	flow_rule_match_ip(rule, &match);
324 
325 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
326 				       match.key->ttl, match.mask->ttl);
327 
328 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
329 				       match.key->tos & 0x3,
330 				       match.mask->tos & 0x3);
331 
332 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
333 				       match.key->tos >> 2,
334 				       match.mask->tos >> 2);
335 
336 	return 0;
337 }
338 
339 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
340 				 struct mlxsw_sp_acl_block *block,
341 				 struct mlxsw_sp_acl_rule_info *rulei,
342 				 struct flow_cls_offload *f)
343 {
344 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
345 	struct flow_dissector *dissector = rule->match.dissector;
346 	u16 n_proto_mask = 0;
347 	u16 n_proto_key = 0;
348 	u16 addr_type = 0;
349 	u8 ip_proto = 0;
350 	int err;
351 
352 	if (dissector->used_keys &
353 	    ~(BIT(FLOW_DISSECTOR_KEY_META) |
354 	      BIT(FLOW_DISSECTOR_KEY_CONTROL) |
355 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
356 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
357 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
358 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
359 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
360 	      BIT(FLOW_DISSECTOR_KEY_TCP) |
361 	      BIT(FLOW_DISSECTOR_KEY_IP) |
362 	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
363 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
364 		NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported key");
365 		return -EOPNOTSUPP;
366 	}
367 
368 	mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
369 
370 	err = mlxsw_sp_flower_parse_meta(rulei, f, block);
371 	if (err)
372 		return err;
373 
374 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
375 		struct flow_match_control match;
376 
377 		flow_rule_match_control(rule, &match);
378 		addr_type = match.key->addr_type;
379 	}
380 
381 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
382 		struct flow_match_basic match;
383 
384 		flow_rule_match_basic(rule, &match);
385 		n_proto_key = ntohs(match.key->n_proto);
386 		n_proto_mask = ntohs(match.mask->n_proto);
387 
388 		if (n_proto_key == ETH_P_ALL) {
389 			n_proto_key = 0;
390 			n_proto_mask = 0;
391 		}
392 		mlxsw_sp_acl_rulei_keymask_u32(rulei,
393 					       MLXSW_AFK_ELEMENT_ETHERTYPE,
394 					       n_proto_key, n_proto_mask);
395 
396 		ip_proto = match.key->ip_proto;
397 		mlxsw_sp_acl_rulei_keymask_u32(rulei,
398 					       MLXSW_AFK_ELEMENT_IP_PROTO,
399 					       match.key->ip_proto,
400 					       match.mask->ip_proto);
401 	}
402 
403 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
404 		struct flow_match_eth_addrs match;
405 
406 		flow_rule_match_eth_addrs(rule, &match);
407 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
408 					       MLXSW_AFK_ELEMENT_DMAC_32_47,
409 					       match.key->dst,
410 					       match.mask->dst, 2);
411 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
412 					       MLXSW_AFK_ELEMENT_DMAC_0_31,
413 					       match.key->dst + 2,
414 					       match.mask->dst + 2, 4);
415 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
416 					       MLXSW_AFK_ELEMENT_SMAC_32_47,
417 					       match.key->src,
418 					       match.mask->src, 2);
419 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
420 					       MLXSW_AFK_ELEMENT_SMAC_0_31,
421 					       match.key->src + 2,
422 					       match.mask->src + 2, 4);
423 	}
424 
425 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
426 		struct flow_match_vlan match;
427 
428 		flow_rule_match_vlan(rule, &match);
429 		if (mlxsw_sp_acl_block_is_egress_bound(block)) {
430 			NL_SET_ERR_MSG_MOD(f->common.extack, "vlan_id key is not supported on egress");
431 			return -EOPNOTSUPP;
432 		}
433 
434 		/* Forbid block with this rulei to be bound
435 		 * to egress in future.
436 		 */
437 		rulei->egress_bind_blocker = 1;
438 
439 		if (match.mask->vlan_id != 0)
440 			mlxsw_sp_acl_rulei_keymask_u32(rulei,
441 						       MLXSW_AFK_ELEMENT_VID,
442 						       match.key->vlan_id,
443 						       match.mask->vlan_id);
444 		if (match.mask->vlan_priority != 0)
445 			mlxsw_sp_acl_rulei_keymask_u32(rulei,
446 						       MLXSW_AFK_ELEMENT_PCP,
447 						       match.key->vlan_priority,
448 						       match.mask->vlan_priority);
449 	}
450 
451 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
452 		mlxsw_sp_flower_parse_ipv4(rulei, f);
453 
454 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
455 		mlxsw_sp_flower_parse_ipv6(rulei, f);
456 
457 	err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
458 	if (err)
459 		return err;
460 	err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
461 	if (err)
462 		return err;
463 
464 	err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
465 	if (err)
466 		return err;
467 
468 	return mlxsw_sp_flower_parse_actions(mlxsw_sp, block, rulei,
469 					     &f->rule->action,
470 					     f->common.extack);
471 }
472 
473 int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp,
474 			    struct mlxsw_sp_acl_block *block,
475 			    struct flow_cls_offload *f)
476 {
477 	struct mlxsw_sp_acl_rule_info *rulei;
478 	struct mlxsw_sp_acl_ruleset *ruleset;
479 	struct mlxsw_sp_acl_rule *rule;
480 	int err;
481 
482 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
483 					   f->common.chain_index,
484 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
485 	if (IS_ERR(ruleset))
486 		return PTR_ERR(ruleset);
487 
488 	rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL,
489 					f->common.extack);
490 	if (IS_ERR(rule)) {
491 		err = PTR_ERR(rule);
492 		goto err_rule_create;
493 	}
494 
495 	rulei = mlxsw_sp_acl_rule_rulei(rule);
496 	err = mlxsw_sp_flower_parse(mlxsw_sp, block, rulei, f);
497 	if (err)
498 		goto err_flower_parse;
499 
500 	err = mlxsw_sp_acl_rulei_commit(rulei);
501 	if (err)
502 		goto err_rulei_commit;
503 
504 	err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
505 	if (err)
506 		goto err_rule_add;
507 
508 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
509 	return 0;
510 
511 err_rule_add:
512 err_rulei_commit:
513 err_flower_parse:
514 	mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
515 err_rule_create:
516 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
517 	return err;
518 }
519 
520 void mlxsw_sp_flower_destroy(struct mlxsw_sp *mlxsw_sp,
521 			     struct mlxsw_sp_acl_block *block,
522 			     struct flow_cls_offload *f)
523 {
524 	struct mlxsw_sp_acl_ruleset *ruleset;
525 	struct mlxsw_sp_acl_rule *rule;
526 
527 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
528 					   f->common.chain_index,
529 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
530 	if (IS_ERR(ruleset))
531 		return;
532 
533 	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
534 	if (rule) {
535 		mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
536 		mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
537 	}
538 
539 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
540 }
541 
542 int mlxsw_sp_flower_stats(struct mlxsw_sp *mlxsw_sp,
543 			  struct mlxsw_sp_acl_block *block,
544 			  struct flow_cls_offload *f)
545 {
546 	struct mlxsw_sp_acl_ruleset *ruleset;
547 	struct mlxsw_sp_acl_rule *rule;
548 	u64 packets;
549 	u64 lastuse;
550 	u64 bytes;
551 	int err;
552 
553 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
554 					   f->common.chain_index,
555 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
556 	if (WARN_ON(IS_ERR(ruleset)))
557 		return -EINVAL;
558 
559 	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
560 	if (!rule)
561 		return -EINVAL;
562 
563 	err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
564 					  &lastuse);
565 	if (err)
566 		goto err_rule_get_stats;
567 
568 	flow_stats_update(&f->stats, bytes, packets, lastuse);
569 
570 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
571 	return 0;
572 
573 err_rule_get_stats:
574 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
575 	return err;
576 }
577 
578 int mlxsw_sp_flower_tmplt_create(struct mlxsw_sp *mlxsw_sp,
579 				 struct mlxsw_sp_acl_block *block,
580 				 struct flow_cls_offload *f)
581 {
582 	struct mlxsw_sp_acl_ruleset *ruleset;
583 	struct mlxsw_sp_acl_rule_info rulei;
584 	int err;
585 
586 	memset(&rulei, 0, sizeof(rulei));
587 	err = mlxsw_sp_flower_parse(mlxsw_sp, block, &rulei, f);
588 	if (err)
589 		return err;
590 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
591 					   f->common.chain_index,
592 					   MLXSW_SP_ACL_PROFILE_FLOWER,
593 					   &rulei.values.elusage);
594 
595 	/* keep the reference to the ruleset */
596 	return PTR_ERR_OR_ZERO(ruleset);
597 }
598 
599 void mlxsw_sp_flower_tmplt_destroy(struct mlxsw_sp *mlxsw_sp,
600 				   struct mlxsw_sp_acl_block *block,
601 				   struct flow_cls_offload *f)
602 {
603 	struct mlxsw_sp_acl_ruleset *ruleset;
604 
605 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, block,
606 					   f->common.chain_index,
607 					   MLXSW_SP_ACL_PROFILE_FLOWER, NULL);
608 	if (IS_ERR(ruleset))
609 		return;
610 	/* put the reference to the ruleset kept in create */
611 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
612 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
613 }
614