xref: /linux/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c (revision 6ebe6dbd6886af07b102aca42e44edbee94a22d9)
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
3  * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Neither the names of the copyright holders nor the names of its
15  *    contributors may be used to endorse or promote products derived from
16  *    this software without specific prior written permission.
17  *
18  * Alternatively, this software may be distributed under the terms of the
19  * GNU General Public License ("GPL") version 2 as published by the Free
20  * Software Foundation.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include <linux/kernel.h>
36 #include <linux/errno.h>
37 #include <linux/netdevice.h>
38 #include <net/flow_dissector.h>
39 #include <net/pkt_cls.h>
40 #include <net/tc_act/tc_gact.h>
41 #include <net/tc_act/tc_mirred.h>
42 #include <net/tc_act/tc_vlan.h>
43 
44 #include "spectrum.h"
45 #include "core_acl_flex_keys.h"
46 
47 static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
48 					 struct net_device *dev, bool ingress,
49 					 struct mlxsw_sp_acl_rule_info *rulei,
50 					 struct tcf_exts *exts)
51 {
52 	const struct tc_action *a;
53 	LIST_HEAD(actions);
54 	int err;
55 
56 	if (!tcf_exts_has_actions(exts))
57 		return 0;
58 
59 	/* Count action is inserted first */
60 	err = mlxsw_sp_acl_rulei_act_count(mlxsw_sp, rulei);
61 	if (err)
62 		return err;
63 
64 	tcf_exts_to_list(exts, &actions);
65 	list_for_each_entry(a, &actions, list) {
66 		if (is_tcf_gact_ok(a)) {
67 			err = mlxsw_sp_acl_rulei_act_continue(rulei);
68 			if (err)
69 				return err;
70 		} else if (is_tcf_gact_shot(a)) {
71 			err = mlxsw_sp_acl_rulei_act_drop(rulei);
72 			if (err)
73 				return err;
74 		} else if (is_tcf_gact_trap(a)) {
75 			err = mlxsw_sp_acl_rulei_act_trap(rulei);
76 			if (err)
77 				return err;
78 		} else if (is_tcf_gact_goto_chain(a)) {
79 			u32 chain_index = tcf_gact_goto_chain_index(a);
80 			struct mlxsw_sp_acl_ruleset *ruleset;
81 			u16 group_id;
82 
83 			ruleset = mlxsw_sp_acl_ruleset_lookup(mlxsw_sp, dev,
84 							      ingress,
85 							      chain_index,
86 							      MLXSW_SP_ACL_PROFILE_FLOWER);
87 			if (IS_ERR(ruleset))
88 				return PTR_ERR(ruleset);
89 
90 			group_id = mlxsw_sp_acl_ruleset_group_id(ruleset);
91 			err = mlxsw_sp_acl_rulei_act_jump(rulei, group_id);
92 			if (err)
93 				return err;
94 		} else if (is_tcf_mirred_egress_redirect(a)) {
95 			struct net_device *out_dev;
96 			struct mlxsw_sp_fid *fid;
97 			u16 fid_index;
98 
99 			fid = mlxsw_sp_acl_dummy_fid(mlxsw_sp);
100 			fid_index = mlxsw_sp_fid_index(fid);
101 			err = mlxsw_sp_acl_rulei_act_fid_set(mlxsw_sp, rulei,
102 							     fid_index);
103 			if (err)
104 				return err;
105 
106 			out_dev = tcf_mirred_dev(a);
107 			if (out_dev == dev)
108 				out_dev = NULL;
109 
110 			err = mlxsw_sp_acl_rulei_act_fwd(mlxsw_sp, rulei,
111 							 out_dev);
112 			if (err)
113 				return err;
114 		} else if (is_tcf_vlan(a)) {
115 			u16 proto = be16_to_cpu(tcf_vlan_push_proto(a));
116 			u32 action = tcf_vlan_action(a);
117 			u8 prio = tcf_vlan_push_prio(a);
118 			u16 vid = tcf_vlan_push_vid(a);
119 
120 			return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
121 							   action, vid,
122 							   proto, prio);
123 		} else {
124 			dev_err(mlxsw_sp->bus_info->dev, "Unsupported action\n");
125 			return -EOPNOTSUPP;
126 		}
127 	}
128 	return 0;
129 }
130 
131 static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
132 				       struct tc_cls_flower_offload *f)
133 {
134 	struct flow_dissector_key_ipv4_addrs *key =
135 		skb_flow_dissector_target(f->dissector,
136 					  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
137 					  f->key);
138 	struct flow_dissector_key_ipv4_addrs *mask =
139 		skb_flow_dissector_target(f->dissector,
140 					  FLOW_DISSECTOR_KEY_IPV4_ADDRS,
141 					  f->mask);
142 
143 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_IP4,
144 				       ntohl(key->src), ntohl(mask->src));
145 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_IP4,
146 				       ntohl(key->dst), ntohl(mask->dst));
147 }
148 
149 static void mlxsw_sp_flower_parse_ipv6(struct mlxsw_sp_acl_rule_info *rulei,
150 				       struct tc_cls_flower_offload *f)
151 {
152 	struct flow_dissector_key_ipv6_addrs *key =
153 		skb_flow_dissector_target(f->dissector,
154 					  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
155 					  f->key);
156 	struct flow_dissector_key_ipv6_addrs *mask =
157 		skb_flow_dissector_target(f->dissector,
158 					  FLOW_DISSECTOR_KEY_IPV6_ADDRS,
159 					  f->mask);
160 	size_t addr_half_size = sizeof(key->src) / 2;
161 
162 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_HI,
163 				       &key->src.s6_addr[0],
164 				       &mask->src.s6_addr[0],
165 				       addr_half_size);
166 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP6_LO,
167 				       &key->src.s6_addr[addr_half_size],
168 				       &mask->src.s6_addr[addr_half_size],
169 				       addr_half_size);
170 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_HI,
171 				       &key->dst.s6_addr[0],
172 				       &mask->dst.s6_addr[0],
173 				       addr_half_size);
174 	mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP6_LO,
175 				       &key->dst.s6_addr[addr_half_size],
176 				       &mask->dst.s6_addr[addr_half_size],
177 				       addr_half_size);
178 }
179 
180 static int mlxsw_sp_flower_parse_ports(struct mlxsw_sp *mlxsw_sp,
181 				       struct mlxsw_sp_acl_rule_info *rulei,
182 				       struct tc_cls_flower_offload *f,
183 				       u8 ip_proto)
184 {
185 	struct flow_dissector_key_ports *key, *mask;
186 
187 	if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS))
188 		return 0;
189 
190 	if (ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
191 		dev_err(mlxsw_sp->bus_info->dev, "Only UDP and TCP keys are supported\n");
192 		return -EINVAL;
193 	}
194 
195 	key = skb_flow_dissector_target(f->dissector,
196 					FLOW_DISSECTOR_KEY_PORTS,
197 					f->key);
198 	mask = skb_flow_dissector_target(f->dissector,
199 					 FLOW_DISSECTOR_KEY_PORTS,
200 					 f->mask);
201 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_DST_L4_PORT,
202 				       ntohs(key->dst), ntohs(mask->dst));
203 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_SRC_L4_PORT,
204 				       ntohs(key->src), ntohs(mask->src));
205 	return 0;
206 }
207 
208 static int mlxsw_sp_flower_parse_tcp(struct mlxsw_sp *mlxsw_sp,
209 				     struct mlxsw_sp_acl_rule_info *rulei,
210 				     struct tc_cls_flower_offload *f,
211 				     u8 ip_proto)
212 {
213 	struct flow_dissector_key_tcp *key, *mask;
214 
215 	if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP))
216 		return 0;
217 
218 	if (ip_proto != IPPROTO_TCP) {
219 		dev_err(mlxsw_sp->bus_info->dev, "TCP keys supported only for TCP\n");
220 		return -EINVAL;
221 	}
222 
223 	key = skb_flow_dissector_target(f->dissector,
224 					FLOW_DISSECTOR_KEY_TCP,
225 					f->key);
226 	mask = skb_flow_dissector_target(f->dissector,
227 					 FLOW_DISSECTOR_KEY_TCP,
228 					 f->mask);
229 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_TCP_FLAGS,
230 				       ntohs(key->flags), ntohs(mask->flags));
231 	return 0;
232 }
233 
234 static int mlxsw_sp_flower_parse_ip(struct mlxsw_sp *mlxsw_sp,
235 				    struct mlxsw_sp_acl_rule_info *rulei,
236 				    struct tc_cls_flower_offload *f,
237 				    u16 n_proto)
238 {
239 	struct flow_dissector_key_ip *key, *mask;
240 
241 	if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP))
242 		return 0;
243 
244 	if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6) {
245 		dev_err(mlxsw_sp->bus_info->dev, "IP keys supported only for IPv4/6\n");
246 		return -EINVAL;
247 	}
248 
249 	key = skb_flow_dissector_target(f->dissector,
250 					FLOW_DISSECTOR_KEY_IP,
251 					f->key);
252 	mask = skb_flow_dissector_target(f->dissector,
253 					 FLOW_DISSECTOR_KEY_IP,
254 					 f->mask);
255 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_TTL_,
256 				       key->ttl, mask->ttl);
257 
258 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_ECN,
259 				       key->tos & 0x3, mask->tos & 0x3);
260 
261 	mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_IP_DSCP,
262 				       key->tos >> 6, mask->tos >> 6);
263 
264 	return 0;
265 }
266 
267 static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
268 				 struct net_device *dev, bool ingress,
269 				 struct mlxsw_sp_acl_rule_info *rulei,
270 				 struct tc_cls_flower_offload *f)
271 {
272 	u16 n_proto_mask = 0;
273 	u16 n_proto_key = 0;
274 	u16 addr_type = 0;
275 	u8 ip_proto = 0;
276 	int err;
277 
278 	if (f->dissector->used_keys &
279 	    ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
280 	      BIT(FLOW_DISSECTOR_KEY_BASIC) |
281 	      BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
282 	      BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
283 	      BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
284 	      BIT(FLOW_DISSECTOR_KEY_PORTS) |
285 	      BIT(FLOW_DISSECTOR_KEY_TCP) |
286 	      BIT(FLOW_DISSECTOR_KEY_IP) |
287 	      BIT(FLOW_DISSECTOR_KEY_VLAN))) {
288 		dev_err(mlxsw_sp->bus_info->dev, "Unsupported key\n");
289 		return -EOPNOTSUPP;
290 	}
291 
292 	mlxsw_sp_acl_rulei_priority(rulei, f->common.prio);
293 
294 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
295 		struct flow_dissector_key_control *key =
296 			skb_flow_dissector_target(f->dissector,
297 						  FLOW_DISSECTOR_KEY_CONTROL,
298 						  f->key);
299 		addr_type = key->addr_type;
300 	}
301 
302 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
303 		struct flow_dissector_key_basic *key =
304 			skb_flow_dissector_target(f->dissector,
305 						  FLOW_DISSECTOR_KEY_BASIC,
306 						  f->key);
307 		struct flow_dissector_key_basic *mask =
308 			skb_flow_dissector_target(f->dissector,
309 						  FLOW_DISSECTOR_KEY_BASIC,
310 						  f->mask);
311 		n_proto_key = ntohs(key->n_proto);
312 		n_proto_mask = ntohs(mask->n_proto);
313 
314 		if (n_proto_key == ETH_P_ALL) {
315 			n_proto_key = 0;
316 			n_proto_mask = 0;
317 		}
318 		mlxsw_sp_acl_rulei_keymask_u32(rulei,
319 					       MLXSW_AFK_ELEMENT_ETHERTYPE,
320 					       n_proto_key, n_proto_mask);
321 
322 		ip_proto = key->ip_proto;
323 		mlxsw_sp_acl_rulei_keymask_u32(rulei,
324 					       MLXSW_AFK_ELEMENT_IP_PROTO,
325 					       key->ip_proto, mask->ip_proto);
326 	}
327 
328 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
329 		struct flow_dissector_key_eth_addrs *key =
330 			skb_flow_dissector_target(f->dissector,
331 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
332 						  f->key);
333 		struct flow_dissector_key_eth_addrs *mask =
334 			skb_flow_dissector_target(f->dissector,
335 						  FLOW_DISSECTOR_KEY_ETH_ADDRS,
336 						  f->mask);
337 
338 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
339 					       MLXSW_AFK_ELEMENT_DMAC,
340 					       key->dst, mask->dst,
341 					       sizeof(key->dst));
342 		mlxsw_sp_acl_rulei_keymask_buf(rulei,
343 					       MLXSW_AFK_ELEMENT_SMAC,
344 					       key->src, mask->src,
345 					       sizeof(key->src));
346 	}
347 
348 	if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
349 		struct flow_dissector_key_vlan *key =
350 			skb_flow_dissector_target(f->dissector,
351 						  FLOW_DISSECTOR_KEY_VLAN,
352 						  f->key);
353 		struct flow_dissector_key_vlan *mask =
354 			skb_flow_dissector_target(f->dissector,
355 						  FLOW_DISSECTOR_KEY_VLAN,
356 						  f->mask);
357 		if (mask->vlan_id != 0)
358 			mlxsw_sp_acl_rulei_keymask_u32(rulei,
359 						       MLXSW_AFK_ELEMENT_VID,
360 						       key->vlan_id,
361 						       mask->vlan_id);
362 		if (mask->vlan_priority != 0)
363 			mlxsw_sp_acl_rulei_keymask_u32(rulei,
364 						       MLXSW_AFK_ELEMENT_PCP,
365 						       key->vlan_priority,
366 						       mask->vlan_priority);
367 	}
368 
369 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
370 		mlxsw_sp_flower_parse_ipv4(rulei, f);
371 
372 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS)
373 		mlxsw_sp_flower_parse_ipv6(rulei, f);
374 
375 	err = mlxsw_sp_flower_parse_ports(mlxsw_sp, rulei, f, ip_proto);
376 	if (err)
377 		return err;
378 	err = mlxsw_sp_flower_parse_tcp(mlxsw_sp, rulei, f, ip_proto);
379 	if (err)
380 		return err;
381 
382 	err = mlxsw_sp_flower_parse_ip(mlxsw_sp, rulei, f, n_proto_key & n_proto_mask);
383 	if (err)
384 		return err;
385 
386 	return mlxsw_sp_flower_parse_actions(mlxsw_sp, dev, ingress,
387 					     rulei, f->exts);
388 }
389 
390 int mlxsw_sp_flower_replace(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
391 			    struct tc_cls_flower_offload *f)
392 {
393 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
394 	struct net_device *dev = mlxsw_sp_port->dev;
395 	struct mlxsw_sp_acl_rule_info *rulei;
396 	struct mlxsw_sp_acl_ruleset *ruleset;
397 	struct mlxsw_sp_acl_rule *rule;
398 	int err;
399 
400 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, dev, ingress,
401 					   f->common.chain_index,
402 					   MLXSW_SP_ACL_PROFILE_FLOWER);
403 	if (IS_ERR(ruleset))
404 		return PTR_ERR(ruleset);
405 
406 	rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie);
407 	if (IS_ERR(rule)) {
408 		err = PTR_ERR(rule);
409 		goto err_rule_create;
410 	}
411 
412 	rulei = mlxsw_sp_acl_rule_rulei(rule);
413 	err = mlxsw_sp_flower_parse(mlxsw_sp, dev, ingress, rulei, f);
414 	if (err)
415 		goto err_flower_parse;
416 
417 	err = mlxsw_sp_acl_rulei_commit(rulei);
418 	if (err)
419 		goto err_rulei_commit;
420 
421 	err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule);
422 	if (err)
423 		goto err_rule_add;
424 
425 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
426 	mlxsw_sp_port->acl_rule_count++;
427 	return 0;
428 
429 err_rule_add:
430 err_rulei_commit:
431 err_flower_parse:
432 	mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
433 err_rule_create:
434 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
435 	return err;
436 }
437 
438 void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
439 			     struct tc_cls_flower_offload *f)
440 {
441 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
442 	struct mlxsw_sp_acl_ruleset *ruleset;
443 	struct mlxsw_sp_acl_rule *rule;
444 
445 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
446 					   ingress, f->common.chain_index,
447 					   MLXSW_SP_ACL_PROFILE_FLOWER);
448 	if (IS_ERR(ruleset))
449 		return;
450 
451 	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
452 	if (rule) {
453 		mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
454 		mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
455 	}
456 
457 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
458 	mlxsw_sp_port->acl_rule_count--;
459 }
460 
461 int mlxsw_sp_flower_stats(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
462 			  struct tc_cls_flower_offload *f)
463 {
464 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
465 	struct mlxsw_sp_acl_ruleset *ruleset;
466 	struct mlxsw_sp_acl_rule *rule;
467 	u64 packets;
468 	u64 lastuse;
469 	u64 bytes;
470 	int err;
471 
472 	ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
473 					   ingress, f->common.chain_index,
474 					   MLXSW_SP_ACL_PROFILE_FLOWER);
475 	if (WARN_ON(IS_ERR(ruleset)))
476 		return -EINVAL;
477 
478 	rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
479 	if (!rule)
480 		return -EINVAL;
481 
482 	err = mlxsw_sp_acl_rule_get_stats(mlxsw_sp, rule, &packets, &bytes,
483 					  &lastuse);
484 	if (err)
485 		goto err_rule_get_stats;
486 
487 	tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
488 
489 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
490 	return 0;
491 
492 err_rule_get_stats:
493 	mlxsw_sp_acl_ruleset_put(mlxsw_sp, ruleset);
494 	return err;
495 }
496