xref: /linux/drivers/net/ethernet/netronome/nfp/flower/action.c (revision c32e64e852f3f5c0fd709f84bc94736840088375)
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/bitfield.h>
35 #include <net/geneve.h>
36 #include <net/pkt_cls.h>
37 #include <net/switchdev.h>
38 #include <net/tc_act/tc_csum.h>
39 #include <net/tc_act/tc_gact.h>
40 #include <net/tc_act/tc_mirred.h>
41 #include <net/tc_act/tc_pedit.h>
42 #include <net/tc_act/tc_vlan.h>
43 #include <net/tc_act/tc_tunnel_key.h>
44 
45 #include "cmsg.h"
46 #include "main.h"
47 #include "../nfp_net_repr.h"
48 
49 /* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
50  * to change. Such changes will break our FW ABI.
51  */
52 #define NFP_FL_TUNNEL_CSUM			cpu_to_be16(0x01)
53 #define NFP_FL_TUNNEL_KEY			cpu_to_be16(0x04)
54 #define NFP_FL_TUNNEL_GENEVE_OPT		cpu_to_be16(0x0800)
55 #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS	IP_TUNNEL_INFO_TX
56 #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS	(NFP_FL_TUNNEL_CSUM | \
57 						 NFP_FL_TUNNEL_KEY | \
58 						 NFP_FL_TUNNEL_GENEVE_OPT)
59 
60 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
61 {
62 	size_t act_size = sizeof(struct nfp_fl_pop_vlan);
63 
64 	pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
65 	pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
66 	pop_vlan->reserved = 0;
67 }
68 
69 static void
70 nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
71 		 const struct tc_action *action)
72 {
73 	size_t act_size = sizeof(struct nfp_fl_push_vlan);
74 	u16 tmp_push_vlan_tci;
75 
76 	push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
77 	push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
78 	push_vlan->reserved = 0;
79 	push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
80 
81 	tmp_push_vlan_tci =
82 		FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
83 		FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
84 		NFP_FL_PUSH_VLAN_CFI;
85 	push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
86 }
87 
88 static int
89 nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
90 	       struct nfp_fl_payload *nfp_flow, int act_len)
91 {
92 	size_t act_size = sizeof(struct nfp_fl_pre_lag);
93 	struct nfp_fl_pre_lag *pre_lag;
94 	struct net_device *out_dev;
95 	int err;
96 
97 	out_dev = tcf_mirred_dev(action);
98 	if (!out_dev || !netif_is_lag_master(out_dev))
99 		return 0;
100 
101 	if (act_len + act_size > NFP_FL_MAX_A_SIZ)
102 		return -EOPNOTSUPP;
103 
104 	/* Pre_lag action must be first on action list.
105 	 * If other actions already exist they need pushed forward.
106 	 */
107 	if (act_len)
108 		memmove(nfp_flow->action_data + act_size,
109 			nfp_flow->action_data, act_len);
110 
111 	pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
112 	err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
113 	if (err)
114 		return err;
115 
116 	pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
117 	pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
118 
119 	nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
120 
121 	return act_size;
122 }
123 
124 static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
125 					 enum nfp_flower_tun_type tun_type)
126 {
127 	if (!out_dev->rtnl_link_ops)
128 		return false;
129 
130 	if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
131 		return tun_type == NFP_FL_TUNNEL_VXLAN;
132 
133 	if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
134 		return tun_type == NFP_FL_TUNNEL_GENEVE;
135 
136 	return false;
137 }
138 
139 static int
140 nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
141 	      const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
142 	      bool last, struct net_device *in_dev,
143 	      enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
144 {
145 	size_t act_size = sizeof(struct nfp_fl_output);
146 	struct nfp_flower_priv *priv = app->priv;
147 	struct net_device *out_dev;
148 	u16 tmp_flags;
149 
150 	output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
151 	output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
152 
153 	out_dev = tcf_mirred_dev(action);
154 	if (!out_dev)
155 		return -EOPNOTSUPP;
156 
157 	tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
158 
159 	if (tun_type) {
160 		/* Verify the egress netdev matches the tunnel type. */
161 		if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
162 			return -EOPNOTSUPP;
163 
164 		if (*tun_out_cnt)
165 			return -EOPNOTSUPP;
166 		(*tun_out_cnt)++;
167 
168 		output->flags = cpu_to_be16(tmp_flags |
169 					    NFP_FL_OUT_FLAGS_USE_TUN);
170 		output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
171 	} else if (netif_is_lag_master(out_dev) &&
172 		   priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
173 		int gid;
174 
175 		output->flags = cpu_to_be16(tmp_flags);
176 		gid = nfp_flower_lag_get_output_id(app, out_dev);
177 		if (gid < 0)
178 			return gid;
179 		output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
180 	} else {
181 		/* Set action output parameters. */
182 		output->flags = cpu_to_be16(tmp_flags);
183 
184 		/* Only offload if egress ports are on the same device as the
185 		 * ingress port.
186 		 */
187 		if (!switchdev_port_same_parent_id(in_dev, out_dev))
188 			return -EOPNOTSUPP;
189 		if (!nfp_netdev_is_nfp_repr(out_dev))
190 			return -EOPNOTSUPP;
191 
192 		output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
193 		if (!output->port)
194 			return -EOPNOTSUPP;
195 	}
196 	nfp_flow->meta.shortcut = output->port;
197 
198 	return 0;
199 }
200 
201 static enum nfp_flower_tun_type
202 nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
203 				const struct tc_action *action)
204 {
205 	struct ip_tunnel_info *tun = tcf_tunnel_info(action);
206 	struct nfp_flower_priv *priv = app->priv;
207 
208 	switch (tun->key.tp_dst) {
209 	case htons(NFP_FL_VXLAN_PORT):
210 		return NFP_FL_TUNNEL_VXLAN;
211 	case htons(NFP_FL_GENEVE_PORT):
212 		if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
213 			return NFP_FL_TUNNEL_GENEVE;
214 		/* FALLTHROUGH */
215 	default:
216 		return NFP_FL_TUNNEL_NONE;
217 	}
218 }
219 
220 static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
221 {
222 	size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
223 	struct nfp_fl_pre_tunnel *pre_tun_act;
224 
225 	/* Pre_tunnel action must be first on action list.
226 	 * If other actions already exist they need to be pushed forward.
227 	 */
228 	if (act_len)
229 		memmove(act_data + act_size, act_data, act_len);
230 
231 	pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
232 
233 	memset(pre_tun_act, 0, act_size);
234 
235 	pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
236 	pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
237 
238 	return pre_tun_act;
239 }
240 
241 static int
242 nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
243 			   const struct tc_action *action)
244 {
245 	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
246 	int opt_len, opt_cnt, act_start, tot_push_len;
247 	u8 *src = ip_tunnel_info_opts(ip_tun);
248 
249 	/* We need to populate the options in reverse order for HW.
250 	 * Therefore we go through the options, calculating the
251 	 * number of options and the total size, then we populate
252 	 * them in reverse order in the action list.
253 	 */
254 	opt_cnt = 0;
255 	tot_push_len = 0;
256 	opt_len = ip_tun->options_len;
257 	while (opt_len > 0) {
258 		struct geneve_opt *opt = (struct geneve_opt *)src;
259 
260 		opt_cnt++;
261 		if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
262 			return -EOPNOTSUPP;
263 
264 		tot_push_len += sizeof(struct nfp_fl_push_geneve) +
265 			       opt->length * 4;
266 		if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
267 			return -EOPNOTSUPP;
268 
269 		opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
270 		src += sizeof(struct geneve_opt) + opt->length * 4;
271 	}
272 
273 	if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
274 		return -EOPNOTSUPP;
275 
276 	act_start = *list_len;
277 	*list_len += tot_push_len;
278 	src = ip_tunnel_info_opts(ip_tun);
279 	while (opt_cnt) {
280 		struct geneve_opt *opt = (struct geneve_opt *)src;
281 		struct nfp_fl_push_geneve *push;
282 		size_t act_size, len;
283 
284 		opt_cnt--;
285 		act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
286 		tot_push_len -= act_size;
287 		len = act_start + tot_push_len;
288 
289 		push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
290 		push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
291 		push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
292 		push->reserved = 0;
293 		push->class = opt->opt_class;
294 		push->type = opt->type;
295 		push->length = opt->length;
296 		memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
297 
298 		src += sizeof(struct geneve_opt) + opt->length * 4;
299 	}
300 
301 	return 0;
302 }
303 
304 static int
305 nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
306 			struct nfp_fl_set_ipv4_udp_tun *set_tun,
307 			const struct tc_action *action,
308 			struct nfp_fl_pre_tunnel *pre_tun,
309 			enum nfp_flower_tun_type tun_type,
310 			struct net_device *netdev)
311 {
312 	size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
313 	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
314 	struct nfp_flower_priv *priv = app->priv;
315 	u32 tmp_set_ip_tun_type_index = 0;
316 	/* Currently support one pre-tunnel so index is always 0. */
317 	int pretun_idx = 0;
318 
319 	BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
320 		     NFP_FL_TUNNEL_KEY	!= TUNNEL_KEY ||
321 		     NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
322 	if (ip_tun->options_len &&
323 	    (tun_type != NFP_FL_TUNNEL_GENEVE ||
324 	    !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
325 		return -EOPNOTSUPP;
326 
327 	set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
328 	set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
329 
330 	/* Set tunnel type and pre-tunnel index. */
331 	tmp_set_ip_tun_type_index |=
332 		FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
333 		FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
334 
335 	set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
336 	set_tun->tun_id = ip_tun->key.tun_id;
337 
338 	if (ip_tun->key.ttl) {
339 		set_tun->ttl = ip_tun->key.ttl;
340 	} else {
341 		struct net *net = dev_net(netdev);
342 		struct flowi4 flow = {};
343 		struct rtable *rt;
344 		int err;
345 
346 		/* Do a route lookup to determine ttl - if fails then use
347 		 * default. Note that CONFIG_INET is a requirement of
348 		 * CONFIG_NET_SWITCHDEV so must be defined here.
349 		 */
350 		flow.daddr = ip_tun->key.u.ipv4.dst;
351 		flow.flowi4_proto = IPPROTO_UDP;
352 		rt = ip_route_output_key(net, &flow);
353 		err = PTR_ERR_OR_ZERO(rt);
354 		if (!err) {
355 			set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
356 			ip_rt_put(rt);
357 		} else {
358 			set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
359 		}
360 	}
361 
362 	set_tun->tos = ip_tun->key.tos;
363 
364 	if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
365 	    ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
366 		return -EOPNOTSUPP;
367 	set_tun->tun_flags = ip_tun->key.tun_flags;
368 
369 	if (tun_type == NFP_FL_TUNNEL_GENEVE) {
370 		set_tun->tun_proto = htons(ETH_P_TEB);
371 		set_tun->tun_len = ip_tun->options_len / 4;
372 	}
373 
374 	/* Complete pre_tunnel action. */
375 	pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
376 
377 	return 0;
378 }
379 
380 static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
381 {
382 	u32 oldvalue = get_unaligned((u32 *)p_exact);
383 	u32 oldmask = get_unaligned((u32 *)p_mask);
384 
385 	value &= mask;
386 	value |= oldvalue & ~mask;
387 
388 	put_unaligned(oldmask | mask, (u32 *)p_mask);
389 	put_unaligned(value, (u32 *)p_exact);
390 }
391 
392 static int
393 nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
394 	       struct nfp_fl_set_eth *set_eth)
395 {
396 	u32 exact, mask;
397 
398 	if (off + 4 > ETH_ALEN * 2)
399 		return -EOPNOTSUPP;
400 
401 	mask = ~tcf_pedit_mask(action, idx);
402 	exact = tcf_pedit_val(action, idx);
403 
404 	if (exact & ~mask)
405 		return -EOPNOTSUPP;
406 
407 	nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
408 			    &set_eth->eth_addr_mask[off]);
409 
410 	set_eth->reserved = cpu_to_be16(0);
411 	set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
412 	set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
413 
414 	return 0;
415 }
416 
417 static int
418 nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
419 	       struct nfp_fl_set_ip4_addrs *set_ip_addr)
420 {
421 	__be32 exact, mask;
422 
423 	/* We are expecting tcf_pedit to return a big endian value */
424 	mask = (__force __be32)~tcf_pedit_mask(action, idx);
425 	exact = (__force __be32)tcf_pedit_val(action, idx);
426 
427 	if (exact & ~mask)
428 		return -EOPNOTSUPP;
429 
430 	switch (off) {
431 	case offsetof(struct iphdr, daddr):
432 		set_ip_addr->ipv4_dst_mask |= mask;
433 		set_ip_addr->ipv4_dst &= ~mask;
434 		set_ip_addr->ipv4_dst |= exact & mask;
435 		break;
436 	case offsetof(struct iphdr, saddr):
437 		set_ip_addr->ipv4_src_mask |= mask;
438 		set_ip_addr->ipv4_src &= ~mask;
439 		set_ip_addr->ipv4_src |= exact & mask;
440 		break;
441 	default:
442 		return -EOPNOTSUPP;
443 	}
444 
445 	set_ip_addr->reserved = cpu_to_be16(0);
446 	set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
447 	set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
448 
449 	return 0;
450 }
451 
452 static void
453 nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
454 		      struct nfp_fl_set_ipv6_addr *ip6)
455 {
456 	ip6->ipv6[word].mask |= mask;
457 	ip6->ipv6[word].exact &= ~mask;
458 	ip6->ipv6[word].exact |= exact & mask;
459 
460 	ip6->reserved = cpu_to_be16(0);
461 	ip6->head.jump_id = opcode_tag;
462 	ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
463 }
464 
465 static int
466 nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
467 	       struct nfp_fl_set_ipv6_addr *ip_dst,
468 	       struct nfp_fl_set_ipv6_addr *ip_src)
469 {
470 	__be32 exact, mask;
471 	u8 word;
472 
473 	/* We are expecting tcf_pedit to return a big endian value */
474 	mask = (__force __be32)~tcf_pedit_mask(action, idx);
475 	exact = (__force __be32)tcf_pedit_val(action, idx);
476 
477 	if (exact & ~mask)
478 		return -EOPNOTSUPP;
479 
480 	if (off < offsetof(struct ipv6hdr, saddr)) {
481 		return -EOPNOTSUPP;
482 	} else if (off < offsetof(struct ipv6hdr, daddr)) {
483 		word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
484 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
485 				      exact, mask, ip_src);
486 	} else if (off < offsetof(struct ipv6hdr, daddr) +
487 		       sizeof(struct in6_addr)) {
488 		word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
489 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
490 				      exact, mask, ip_dst);
491 	} else {
492 		return -EOPNOTSUPP;
493 	}
494 
495 	return 0;
496 }
497 
498 static int
499 nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
500 		 struct nfp_fl_set_tport *set_tport, int opcode)
501 {
502 	u32 exact, mask;
503 
504 	if (off)
505 		return -EOPNOTSUPP;
506 
507 	mask = ~tcf_pedit_mask(action, idx);
508 	exact = tcf_pedit_val(action, idx);
509 
510 	if (exact & ~mask)
511 		return -EOPNOTSUPP;
512 
513 	nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
514 			    set_tport->tp_port_mask);
515 
516 	set_tport->reserved = cpu_to_be16(0);
517 	set_tport->head.jump_id = opcode;
518 	set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
519 
520 	return 0;
521 }
522 
523 static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
524 {
525 	switch (ip_proto) {
526 	case 0:
527 		/* Filter doesn't force proto match,
528 		 * both TCP and UDP will be updated if encountered
529 		 */
530 		return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
531 	case IPPROTO_TCP:
532 		return TCA_CSUM_UPDATE_FLAG_TCP;
533 	case IPPROTO_UDP:
534 		return TCA_CSUM_UPDATE_FLAG_UDP;
535 	default:
536 		/* All other protocols will be ignored by FW */
537 		return 0;
538 	}
539 }
540 
541 static int
542 nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
543 	     char *nfp_action, int *a_len, u32 *csum_updated)
544 {
545 	struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
546 	struct nfp_fl_set_ip4_addrs set_ip_addr;
547 	struct nfp_fl_set_tport set_tport;
548 	struct nfp_fl_set_eth set_eth;
549 	enum pedit_header_type htype;
550 	int idx, nkeys, err;
551 	size_t act_size = 0;
552 	u32 offset, cmd;
553 	u8 ip_proto = 0;
554 
555 	memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
556 	memset(&set_ip6_src, 0, sizeof(set_ip6_src));
557 	memset(&set_ip_addr, 0, sizeof(set_ip_addr));
558 	memset(&set_tport, 0, sizeof(set_tport));
559 	memset(&set_eth, 0, sizeof(set_eth));
560 	nkeys = tcf_pedit_nkeys(action);
561 
562 	for (idx = 0; idx < nkeys; idx++) {
563 		cmd = tcf_pedit_cmd(action, idx);
564 		htype = tcf_pedit_htype(action, idx);
565 		offset = tcf_pedit_offset(action, idx);
566 
567 		if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
568 			return -EOPNOTSUPP;
569 
570 		switch (htype) {
571 		case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
572 			err = nfp_fl_set_eth(action, idx, offset, &set_eth);
573 			break;
574 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
575 			err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
576 			break;
577 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
578 			err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
579 					     &set_ip6_src);
580 			break;
581 		case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
582 			err = nfp_fl_set_tport(action, idx, offset, &set_tport,
583 					       NFP_FL_ACTION_OPCODE_SET_TCP);
584 			break;
585 		case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
586 			err = nfp_fl_set_tport(action, idx, offset, &set_tport,
587 					       NFP_FL_ACTION_OPCODE_SET_UDP);
588 			break;
589 		default:
590 			return -EOPNOTSUPP;
591 		}
592 		if (err)
593 			return err;
594 	}
595 
596 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
597 		struct flow_dissector_key_basic *basic;
598 
599 		basic = skb_flow_dissector_target(flow->dissector,
600 						  FLOW_DISSECTOR_KEY_BASIC,
601 						  flow->key);
602 		ip_proto = basic->ip_proto;
603 	}
604 
605 	if (set_eth.head.len_lw) {
606 		act_size = sizeof(set_eth);
607 		memcpy(nfp_action, &set_eth, act_size);
608 		*a_len += act_size;
609 	}
610 	if (set_ip_addr.head.len_lw) {
611 		nfp_action += act_size;
612 		act_size = sizeof(set_ip_addr);
613 		memcpy(nfp_action, &set_ip_addr, act_size);
614 		*a_len += act_size;
615 
616 		/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
617 		*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
618 				nfp_fl_csum_l4_to_flag(ip_proto);
619 	}
620 	if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
621 		/* TC compiles set src and dst IPv6 address as a single action,
622 		 * the hardware requires this to be 2 separate actions.
623 		 */
624 		nfp_action += act_size;
625 		act_size = sizeof(set_ip6_src);
626 		memcpy(nfp_action, &set_ip6_src, act_size);
627 		*a_len += act_size;
628 
629 		act_size = sizeof(set_ip6_dst);
630 		memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
631 		       act_size);
632 		*a_len += act_size;
633 
634 		/* Hardware will automatically fix TCP/UDP checksum. */
635 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
636 	} else if (set_ip6_dst.head.len_lw) {
637 		nfp_action += act_size;
638 		act_size = sizeof(set_ip6_dst);
639 		memcpy(nfp_action, &set_ip6_dst, act_size);
640 		*a_len += act_size;
641 
642 		/* Hardware will automatically fix TCP/UDP checksum. */
643 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
644 	} else if (set_ip6_src.head.len_lw) {
645 		nfp_action += act_size;
646 		act_size = sizeof(set_ip6_src);
647 		memcpy(nfp_action, &set_ip6_src, act_size);
648 		*a_len += act_size;
649 
650 		/* Hardware will automatically fix TCP/UDP checksum. */
651 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
652 	}
653 	if (set_tport.head.len_lw) {
654 		nfp_action += act_size;
655 		act_size = sizeof(set_tport);
656 		memcpy(nfp_action, &set_tport, act_size);
657 		*a_len += act_size;
658 
659 		/* Hardware will automatically fix TCP/UDP checksum. */
660 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
661 	}
662 
663 	return 0;
664 }
665 
666 static int
667 nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
668 			 struct nfp_fl_payload *nfp_fl, int *a_len,
669 			 struct net_device *netdev, bool last,
670 			 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
671 			 int *out_cnt, u32 *csum_updated)
672 {
673 	struct nfp_flower_priv *priv = app->priv;
674 	struct nfp_fl_output *output;
675 	int err, prelag_size;
676 
677 	/* If csum_updated has not been reset by now, it means HW will
678 	 * incorrectly update csums when they are not requested.
679 	 */
680 	if (*csum_updated)
681 		return -EOPNOTSUPP;
682 
683 	if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
684 		return -EOPNOTSUPP;
685 
686 	output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
687 	err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type,
688 			    tun_out_cnt);
689 	if (err)
690 		return err;
691 
692 	*a_len += sizeof(struct nfp_fl_output);
693 
694 	if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
695 		/* nfp_fl_pre_lag returns -err or size of prelag action added.
696 		 * This will be 0 if it is not egressing to a lag dev.
697 		 */
698 		prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len);
699 		if (prelag_size < 0)
700 			return prelag_size;
701 		else if (prelag_size > 0 && (!last || *out_cnt))
702 			return -EOPNOTSUPP;
703 
704 		*a_len += prelag_size;
705 	}
706 	(*out_cnt)++;
707 
708 	return 0;
709 }
710 
711 static int
712 nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
713 		       struct tc_cls_flower_offload *flow,
714 		       struct nfp_fl_payload *nfp_fl, int *a_len,
715 		       struct net_device *netdev,
716 		       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
717 		       int *out_cnt, u32 *csum_updated)
718 {
719 	struct nfp_fl_set_ipv4_udp_tun *set_tun;
720 	struct nfp_fl_pre_tunnel *pre_tun;
721 	struct nfp_fl_push_vlan *psh_v;
722 	struct nfp_fl_pop_vlan *pop_v;
723 	int err;
724 
725 	if (is_tcf_gact_shot(a)) {
726 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
727 	} else if (is_tcf_mirred_egress_redirect(a)) {
728 		err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
729 					       true, tun_type, tun_out_cnt,
730 					       out_cnt, csum_updated);
731 		if (err)
732 			return err;
733 
734 	} else if (is_tcf_mirred_egress_mirror(a)) {
735 		err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
736 					       false, tun_type, tun_out_cnt,
737 					       out_cnt, csum_updated);
738 		if (err)
739 			return err;
740 
741 	} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
742 		if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
743 			return -EOPNOTSUPP;
744 
745 		pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
746 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
747 
748 		nfp_fl_pop_vlan(pop_v);
749 		*a_len += sizeof(struct nfp_fl_pop_vlan);
750 	} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
751 		if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
752 			return -EOPNOTSUPP;
753 
754 		psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
755 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
756 
757 		nfp_fl_push_vlan(psh_v, a);
758 		*a_len += sizeof(struct nfp_fl_push_vlan);
759 	} else if (is_tcf_tunnel_set(a)) {
760 		struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
761 		struct nfp_repr *repr = netdev_priv(netdev);
762 
763 		*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
764 		if (*tun_type == NFP_FL_TUNNEL_NONE)
765 			return -EOPNOTSUPP;
766 
767 		if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
768 			return -EOPNOTSUPP;
769 
770 		/* Pre-tunnel action is required for tunnel encap.
771 		 * This checks for next hop entries on NFP.
772 		 * If none, the packet falls back before applying other actions.
773 		 */
774 		if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
775 		    sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
776 			return -EOPNOTSUPP;
777 
778 		pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
779 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
780 		*a_len += sizeof(struct nfp_fl_pre_tunnel);
781 
782 		err = nfp_fl_push_geneve_options(nfp_fl, a_len, a);
783 		if (err)
784 			return err;
785 
786 		set_tun = (void *)&nfp_fl->action_data[*a_len];
787 		err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun,
788 					      *tun_type, netdev);
789 		if (err)
790 			return err;
791 		*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
792 	} else if (is_tcf_tunnel_release(a)) {
793 		/* Tunnel decap is handled by default so accept action. */
794 		return 0;
795 	} else if (is_tcf_pedit(a)) {
796 		if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
797 				 a_len, csum_updated))
798 			return -EOPNOTSUPP;
799 	} else if (is_tcf_csum(a)) {
800 		/* csum action requests recalc of something we have not fixed */
801 		if (tcf_csum_update_flags(a) & ~*csum_updated)
802 			return -EOPNOTSUPP;
803 		/* If we will correctly fix the csum we can remove it from the
804 		 * csum update list. Which will later be used to check support.
805 		 */
806 		*csum_updated &= ~tcf_csum_update_flags(a);
807 	} else {
808 		/* Currently we do not handle any other actions. */
809 		return -EOPNOTSUPP;
810 	}
811 
812 	return 0;
813 }
814 
815 int nfp_flower_compile_action(struct nfp_app *app,
816 			      struct tc_cls_flower_offload *flow,
817 			      struct net_device *netdev,
818 			      struct nfp_fl_payload *nfp_flow)
819 {
820 	int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
821 	enum nfp_flower_tun_type tun_type;
822 	const struct tc_action *a;
823 	u32 csum_updated = 0;
824 
825 	memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
826 	nfp_flow->meta.act_len = 0;
827 	tun_type = NFP_FL_TUNNEL_NONE;
828 	act_len = 0;
829 	act_cnt = 0;
830 	tun_out_cnt = 0;
831 	out_cnt = 0;
832 
833 	tcf_exts_for_each_action(i, a, flow->exts) {
834 		err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
835 					     netdev, &tun_type, &tun_out_cnt,
836 					     &out_cnt, &csum_updated);
837 		if (err)
838 			return err;
839 		act_cnt++;
840 	}
841 
842 	/* We optimise when the action list is small, this can unfortunately
843 	 * not happen once we have more than one action in the action list.
844 	 */
845 	if (act_cnt > 1)
846 		nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
847 
848 	nfp_flow->meta.act_len = act_len;
849 
850 	return 0;
851 }
852