xref: /linux/drivers/net/ethernet/netronome/nfp/flower/action.c (revision fe8ecccc10b3adc071de05ca7af728ca1a4ac9aa)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/bitfield.h>
5 #include <net/geneve.h>
6 #include <net/pkt_cls.h>
7 #include <net/switchdev.h>
8 #include <net/tc_act/tc_csum.h>
9 #include <net/tc_act/tc_gact.h>
10 #include <net/tc_act/tc_mirred.h>
11 #include <net/tc_act/tc_pedit.h>
12 #include <net/tc_act/tc_vlan.h>
13 #include <net/tc_act/tc_tunnel_key.h>
14 
15 #include "cmsg.h"
16 #include "main.h"
17 #include "../nfp_net_repr.h"
18 
19 /* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
20  * to change. Such changes will break our FW ABI.
21  */
22 #define NFP_FL_TUNNEL_CSUM			cpu_to_be16(0x01)
23 #define NFP_FL_TUNNEL_KEY			cpu_to_be16(0x04)
24 #define NFP_FL_TUNNEL_GENEVE_OPT		cpu_to_be16(0x0800)
25 #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS	IP_TUNNEL_INFO_TX
26 #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS	(NFP_FL_TUNNEL_CSUM | \
27 						 NFP_FL_TUNNEL_KEY | \
28 						 NFP_FL_TUNNEL_GENEVE_OPT)
29 
30 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
31 {
32 	size_t act_size = sizeof(struct nfp_fl_pop_vlan);
33 
34 	pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
35 	pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
36 	pop_vlan->reserved = 0;
37 }
38 
39 static void
40 nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
41 		 const struct tc_action *action)
42 {
43 	size_t act_size = sizeof(struct nfp_fl_push_vlan);
44 	u16 tmp_push_vlan_tci;
45 
46 	push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
47 	push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
48 	push_vlan->reserved = 0;
49 	push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
50 
51 	tmp_push_vlan_tci =
52 		FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
53 		FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
54 		NFP_FL_PUSH_VLAN_CFI;
55 	push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
56 }
57 
58 static int
59 nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
60 	       struct nfp_fl_payload *nfp_flow, int act_len)
61 {
62 	size_t act_size = sizeof(struct nfp_fl_pre_lag);
63 	struct nfp_fl_pre_lag *pre_lag;
64 	struct net_device *out_dev;
65 	int err;
66 
67 	out_dev = tcf_mirred_dev(action);
68 	if (!out_dev || !netif_is_lag_master(out_dev))
69 		return 0;
70 
71 	if (act_len + act_size > NFP_FL_MAX_A_SIZ)
72 		return -EOPNOTSUPP;
73 
74 	/* Pre_lag action must be first on action list.
75 	 * If other actions already exist they need pushed forward.
76 	 */
77 	if (act_len)
78 		memmove(nfp_flow->action_data + act_size,
79 			nfp_flow->action_data, act_len);
80 
81 	pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
82 	err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
83 	if (err)
84 		return err;
85 
86 	pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
87 	pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
88 
89 	nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
90 
91 	return act_size;
92 }
93 
94 static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
95 					 enum nfp_flower_tun_type tun_type)
96 {
97 	if (!out_dev->rtnl_link_ops)
98 		return false;
99 
100 	if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
101 		return tun_type == NFP_FL_TUNNEL_VXLAN;
102 
103 	if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
104 		return tun_type == NFP_FL_TUNNEL_GENEVE;
105 
106 	return false;
107 }
108 
109 static int
110 nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
111 	      const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
112 	      bool last, struct net_device *in_dev,
113 	      enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
114 {
115 	size_t act_size = sizeof(struct nfp_fl_output);
116 	struct nfp_flower_priv *priv = app->priv;
117 	struct net_device *out_dev;
118 	u16 tmp_flags;
119 
120 	output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
121 	output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
122 
123 	out_dev = tcf_mirred_dev(action);
124 	if (!out_dev)
125 		return -EOPNOTSUPP;
126 
127 	tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
128 
129 	if (tun_type) {
130 		/* Verify the egress netdev matches the tunnel type. */
131 		if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
132 			return -EOPNOTSUPP;
133 
134 		if (*tun_out_cnt)
135 			return -EOPNOTSUPP;
136 		(*tun_out_cnt)++;
137 
138 		output->flags = cpu_to_be16(tmp_flags |
139 					    NFP_FL_OUT_FLAGS_USE_TUN);
140 		output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
141 	} else if (netif_is_lag_master(out_dev) &&
142 		   priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
143 		int gid;
144 
145 		output->flags = cpu_to_be16(tmp_flags);
146 		gid = nfp_flower_lag_get_output_id(app, out_dev);
147 		if (gid < 0)
148 			return gid;
149 		output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
150 	} else {
151 		/* Set action output parameters. */
152 		output->flags = cpu_to_be16(tmp_flags);
153 
154 		/* Only offload if egress ports are on the same device as the
155 		 * ingress port.
156 		 */
157 		if (!switchdev_port_same_parent_id(in_dev, out_dev))
158 			return -EOPNOTSUPP;
159 		if (!nfp_netdev_is_nfp_repr(out_dev))
160 			return -EOPNOTSUPP;
161 
162 		output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
163 		if (!output->port)
164 			return -EOPNOTSUPP;
165 	}
166 	nfp_flow->meta.shortcut = output->port;
167 
168 	return 0;
169 }
170 
171 static enum nfp_flower_tun_type
172 nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
173 				const struct tc_action *action)
174 {
175 	struct ip_tunnel_info *tun = tcf_tunnel_info(action);
176 	struct nfp_flower_priv *priv = app->priv;
177 
178 	switch (tun->key.tp_dst) {
179 	case htons(NFP_FL_VXLAN_PORT):
180 		return NFP_FL_TUNNEL_VXLAN;
181 	case htons(NFP_FL_GENEVE_PORT):
182 		if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
183 			return NFP_FL_TUNNEL_GENEVE;
184 		/* FALLTHROUGH */
185 	default:
186 		return NFP_FL_TUNNEL_NONE;
187 	}
188 }
189 
190 static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
191 {
192 	size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
193 	struct nfp_fl_pre_tunnel *pre_tun_act;
194 
195 	/* Pre_tunnel action must be first on action list.
196 	 * If other actions already exist they need to be pushed forward.
197 	 */
198 	if (act_len)
199 		memmove(act_data + act_size, act_data, act_len);
200 
201 	pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
202 
203 	memset(pre_tun_act, 0, act_size);
204 
205 	pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
206 	pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
207 
208 	return pre_tun_act;
209 }
210 
211 static int
212 nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
213 			   const struct tc_action *action)
214 {
215 	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
216 	int opt_len, opt_cnt, act_start, tot_push_len;
217 	u8 *src = ip_tunnel_info_opts(ip_tun);
218 
219 	/* We need to populate the options in reverse order for HW.
220 	 * Therefore we go through the options, calculating the
221 	 * number of options and the total size, then we populate
222 	 * them in reverse order in the action list.
223 	 */
224 	opt_cnt = 0;
225 	tot_push_len = 0;
226 	opt_len = ip_tun->options_len;
227 	while (opt_len > 0) {
228 		struct geneve_opt *opt = (struct geneve_opt *)src;
229 
230 		opt_cnt++;
231 		if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
232 			return -EOPNOTSUPP;
233 
234 		tot_push_len += sizeof(struct nfp_fl_push_geneve) +
235 			       opt->length * 4;
236 		if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
237 			return -EOPNOTSUPP;
238 
239 		opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
240 		src += sizeof(struct geneve_opt) + opt->length * 4;
241 	}
242 
243 	if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
244 		return -EOPNOTSUPP;
245 
246 	act_start = *list_len;
247 	*list_len += tot_push_len;
248 	src = ip_tunnel_info_opts(ip_tun);
249 	while (opt_cnt) {
250 		struct geneve_opt *opt = (struct geneve_opt *)src;
251 		struct nfp_fl_push_geneve *push;
252 		size_t act_size, len;
253 
254 		opt_cnt--;
255 		act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
256 		tot_push_len -= act_size;
257 		len = act_start + tot_push_len;
258 
259 		push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
260 		push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
261 		push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
262 		push->reserved = 0;
263 		push->class = opt->opt_class;
264 		push->type = opt->type;
265 		push->length = opt->length;
266 		memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
267 
268 		src += sizeof(struct geneve_opt) + opt->length * 4;
269 	}
270 
271 	return 0;
272 }
273 
274 static int
275 nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
276 			struct nfp_fl_set_ipv4_udp_tun *set_tun,
277 			const struct tc_action *action,
278 			struct nfp_fl_pre_tunnel *pre_tun,
279 			enum nfp_flower_tun_type tun_type,
280 			struct net_device *netdev)
281 {
282 	size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
283 	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
284 	struct nfp_flower_priv *priv = app->priv;
285 	u32 tmp_set_ip_tun_type_index = 0;
286 	/* Currently support one pre-tunnel so index is always 0. */
287 	int pretun_idx = 0;
288 
289 	BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
290 		     NFP_FL_TUNNEL_KEY	!= TUNNEL_KEY ||
291 		     NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
292 	if (ip_tun->options_len &&
293 	    (tun_type != NFP_FL_TUNNEL_GENEVE ||
294 	    !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
295 		return -EOPNOTSUPP;
296 
297 	set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
298 	set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
299 
300 	/* Set tunnel type and pre-tunnel index. */
301 	tmp_set_ip_tun_type_index |=
302 		FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
303 		FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
304 
305 	set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
306 	set_tun->tun_id = ip_tun->key.tun_id;
307 
308 	if (ip_tun->key.ttl) {
309 		set_tun->ttl = ip_tun->key.ttl;
310 	} else {
311 		struct net *net = dev_net(netdev);
312 		struct flowi4 flow = {};
313 		struct rtable *rt;
314 		int err;
315 
316 		/* Do a route lookup to determine ttl - if fails then use
317 		 * default. Note that CONFIG_INET is a requirement of
318 		 * CONFIG_NET_SWITCHDEV so must be defined here.
319 		 */
320 		flow.daddr = ip_tun->key.u.ipv4.dst;
321 		flow.flowi4_proto = IPPROTO_UDP;
322 		rt = ip_route_output_key(net, &flow);
323 		err = PTR_ERR_OR_ZERO(rt);
324 		if (!err) {
325 			set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
326 			ip_rt_put(rt);
327 		} else {
328 			set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
329 		}
330 	}
331 
332 	set_tun->tos = ip_tun->key.tos;
333 
334 	if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
335 	    ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
336 		return -EOPNOTSUPP;
337 	set_tun->tun_flags = ip_tun->key.tun_flags;
338 
339 	if (tun_type == NFP_FL_TUNNEL_GENEVE) {
340 		set_tun->tun_proto = htons(ETH_P_TEB);
341 		set_tun->tun_len = ip_tun->options_len / 4;
342 	}
343 
344 	/* Complete pre_tunnel action. */
345 	pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
346 
347 	return 0;
348 }
349 
350 static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
351 {
352 	u32 oldvalue = get_unaligned((u32 *)p_exact);
353 	u32 oldmask = get_unaligned((u32 *)p_mask);
354 
355 	value &= mask;
356 	value |= oldvalue & ~mask;
357 
358 	put_unaligned(oldmask | mask, (u32 *)p_mask);
359 	put_unaligned(value, (u32 *)p_exact);
360 }
361 
362 static int
363 nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
364 	       struct nfp_fl_set_eth *set_eth)
365 {
366 	u32 exact, mask;
367 
368 	if (off + 4 > ETH_ALEN * 2)
369 		return -EOPNOTSUPP;
370 
371 	mask = ~tcf_pedit_mask(action, idx);
372 	exact = tcf_pedit_val(action, idx);
373 
374 	if (exact & ~mask)
375 		return -EOPNOTSUPP;
376 
377 	nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
378 			    &set_eth->eth_addr_mask[off]);
379 
380 	set_eth->reserved = cpu_to_be16(0);
381 	set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
382 	set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
383 
384 	return 0;
385 }
386 
387 static int
388 nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
389 	       struct nfp_fl_set_ip4_addrs *set_ip_addr)
390 {
391 	__be32 exact, mask;
392 
393 	/* We are expecting tcf_pedit to return a big endian value */
394 	mask = (__force __be32)~tcf_pedit_mask(action, idx);
395 	exact = (__force __be32)tcf_pedit_val(action, idx);
396 
397 	if (exact & ~mask)
398 		return -EOPNOTSUPP;
399 
400 	switch (off) {
401 	case offsetof(struct iphdr, daddr):
402 		set_ip_addr->ipv4_dst_mask = mask;
403 		set_ip_addr->ipv4_dst = exact;
404 		break;
405 	case offsetof(struct iphdr, saddr):
406 		set_ip_addr->ipv4_src_mask = mask;
407 		set_ip_addr->ipv4_src = exact;
408 		break;
409 	default:
410 		return -EOPNOTSUPP;
411 	}
412 
413 	set_ip_addr->reserved = cpu_to_be16(0);
414 	set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
415 	set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
416 
417 	return 0;
418 }
419 
420 static void
421 nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask,
422 		      struct nfp_fl_set_ipv6_addr *ip6)
423 {
424 	ip6->ipv6[idx % 4].mask = mask;
425 	ip6->ipv6[idx % 4].exact = exact;
426 
427 	ip6->reserved = cpu_to_be16(0);
428 	ip6->head.jump_id = opcode_tag;
429 	ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
430 }
431 
432 static int
433 nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
434 	       struct nfp_fl_set_ipv6_addr *ip_dst,
435 	       struct nfp_fl_set_ipv6_addr *ip_src)
436 {
437 	__be32 exact, mask;
438 
439 	/* We are expecting tcf_pedit to return a big endian value */
440 	mask = (__force __be32)~tcf_pedit_mask(action, idx);
441 	exact = (__force __be32)tcf_pedit_val(action, idx);
442 
443 	if (exact & ~mask)
444 		return -EOPNOTSUPP;
445 
446 	if (off < offsetof(struct ipv6hdr, saddr))
447 		return -EOPNOTSUPP;
448 	else if (off < offsetof(struct ipv6hdr, daddr))
449 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx,
450 				      exact, mask, ip_src);
451 	else if (off < offsetof(struct ipv6hdr, daddr) +
452 		       sizeof(struct in6_addr))
453 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx,
454 				      exact, mask, ip_dst);
455 	else
456 		return -EOPNOTSUPP;
457 
458 	return 0;
459 }
460 
461 static int
462 nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
463 		 struct nfp_fl_set_tport *set_tport, int opcode)
464 {
465 	u32 exact, mask;
466 
467 	if (off)
468 		return -EOPNOTSUPP;
469 
470 	mask = ~tcf_pedit_mask(action, idx);
471 	exact = tcf_pedit_val(action, idx);
472 
473 	if (exact & ~mask)
474 		return -EOPNOTSUPP;
475 
476 	nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
477 			    set_tport->tp_port_mask);
478 
479 	set_tport->reserved = cpu_to_be16(0);
480 	set_tport->head.jump_id = opcode;
481 	set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
482 
483 	return 0;
484 }
485 
486 static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
487 {
488 	switch (ip_proto) {
489 	case 0:
490 		/* Filter doesn't force proto match,
491 		 * both TCP and UDP will be updated if encountered
492 		 */
493 		return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
494 	case IPPROTO_TCP:
495 		return TCA_CSUM_UPDATE_FLAG_TCP;
496 	case IPPROTO_UDP:
497 		return TCA_CSUM_UPDATE_FLAG_UDP;
498 	default:
499 		/* All other protocols will be ignored by FW */
500 		return 0;
501 	}
502 }
503 
504 static int
505 nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
506 	     char *nfp_action, int *a_len, u32 *csum_updated)
507 {
508 	struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
509 	struct nfp_fl_set_ip4_addrs set_ip_addr;
510 	struct nfp_fl_set_tport set_tport;
511 	struct nfp_fl_set_eth set_eth;
512 	enum pedit_header_type htype;
513 	int idx, nkeys, err;
514 	size_t act_size;
515 	u32 offset, cmd;
516 	u8 ip_proto = 0;
517 
518 	memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
519 	memset(&set_ip6_src, 0, sizeof(set_ip6_src));
520 	memset(&set_ip_addr, 0, sizeof(set_ip_addr));
521 	memset(&set_tport, 0, sizeof(set_tport));
522 	memset(&set_eth, 0, sizeof(set_eth));
523 	nkeys = tcf_pedit_nkeys(action);
524 
525 	for (idx = 0; idx < nkeys; idx++) {
526 		cmd = tcf_pedit_cmd(action, idx);
527 		htype = tcf_pedit_htype(action, idx);
528 		offset = tcf_pedit_offset(action, idx);
529 
530 		if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
531 			return -EOPNOTSUPP;
532 
533 		switch (htype) {
534 		case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
535 			err = nfp_fl_set_eth(action, idx, offset, &set_eth);
536 			break;
537 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
538 			err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
539 			break;
540 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
541 			err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
542 					     &set_ip6_src);
543 			break;
544 		case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
545 			err = nfp_fl_set_tport(action, idx, offset, &set_tport,
546 					       NFP_FL_ACTION_OPCODE_SET_TCP);
547 			break;
548 		case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
549 			err = nfp_fl_set_tport(action, idx, offset, &set_tport,
550 					       NFP_FL_ACTION_OPCODE_SET_UDP);
551 			break;
552 		default:
553 			return -EOPNOTSUPP;
554 		}
555 		if (err)
556 			return err;
557 	}
558 
559 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
560 		struct flow_dissector_key_basic *basic;
561 
562 		basic = skb_flow_dissector_target(flow->dissector,
563 						  FLOW_DISSECTOR_KEY_BASIC,
564 						  flow->key);
565 		ip_proto = basic->ip_proto;
566 	}
567 
568 	if (set_eth.head.len_lw) {
569 		act_size = sizeof(set_eth);
570 		memcpy(nfp_action, &set_eth, act_size);
571 		*a_len += act_size;
572 	} else if (set_ip_addr.head.len_lw) {
573 		act_size = sizeof(set_ip_addr);
574 		memcpy(nfp_action, &set_ip_addr, act_size);
575 		*a_len += act_size;
576 
577 		/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
578 		*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
579 				nfp_fl_csum_l4_to_flag(ip_proto);
580 	} else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
581 		/* TC compiles set src and dst IPv6 address as a single action,
582 		 * the hardware requires this to be 2 separate actions.
583 		 */
584 		act_size = sizeof(set_ip6_src);
585 		memcpy(nfp_action, &set_ip6_src, act_size);
586 		*a_len += act_size;
587 
588 		act_size = sizeof(set_ip6_dst);
589 		memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
590 		       act_size);
591 		*a_len += act_size;
592 
593 		/* Hardware will automatically fix TCP/UDP checksum. */
594 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
595 	} else if (set_ip6_dst.head.len_lw) {
596 		act_size = sizeof(set_ip6_dst);
597 		memcpy(nfp_action, &set_ip6_dst, act_size);
598 		*a_len += act_size;
599 
600 		/* Hardware will automatically fix TCP/UDP checksum. */
601 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
602 	} else if (set_ip6_src.head.len_lw) {
603 		act_size = sizeof(set_ip6_src);
604 		memcpy(nfp_action, &set_ip6_src, act_size);
605 		*a_len += act_size;
606 
607 		/* Hardware will automatically fix TCP/UDP checksum. */
608 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
609 	} else if (set_tport.head.len_lw) {
610 		act_size = sizeof(set_tport);
611 		memcpy(nfp_action, &set_tport, act_size);
612 		*a_len += act_size;
613 
614 		/* Hardware will automatically fix TCP/UDP checksum. */
615 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
616 	}
617 
618 	return 0;
619 }
620 
621 static int
622 nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
623 			 struct nfp_fl_payload *nfp_fl, int *a_len,
624 			 struct net_device *netdev, bool last,
625 			 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
626 			 int *out_cnt, u32 *csum_updated)
627 {
628 	struct nfp_flower_priv *priv = app->priv;
629 	struct nfp_fl_output *output;
630 	int err, prelag_size;
631 
632 	/* If csum_updated has not been reset by now, it means HW will
633 	 * incorrectly update csums when they are not requested.
634 	 */
635 	if (*csum_updated)
636 		return -EOPNOTSUPP;
637 
638 	if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
639 		return -EOPNOTSUPP;
640 
641 	output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
642 	err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type,
643 			    tun_out_cnt);
644 	if (err)
645 		return err;
646 
647 	*a_len += sizeof(struct nfp_fl_output);
648 
649 	if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
650 		/* nfp_fl_pre_lag returns -err or size of prelag action added.
651 		 * This will be 0 if it is not egressing to a lag dev.
652 		 */
653 		prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len);
654 		if (prelag_size < 0)
655 			return prelag_size;
656 		else if (prelag_size > 0 && (!last || *out_cnt))
657 			return -EOPNOTSUPP;
658 
659 		*a_len += prelag_size;
660 	}
661 	(*out_cnt)++;
662 
663 	return 0;
664 }
665 
666 static int
667 nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
668 		       struct tc_cls_flower_offload *flow,
669 		       struct nfp_fl_payload *nfp_fl, int *a_len,
670 		       struct net_device *netdev,
671 		       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
672 		       int *out_cnt, u32 *csum_updated)
673 {
674 	struct nfp_fl_set_ipv4_udp_tun *set_tun;
675 	struct nfp_fl_pre_tunnel *pre_tun;
676 	struct nfp_fl_push_vlan *psh_v;
677 	struct nfp_fl_pop_vlan *pop_v;
678 	int err;
679 
680 	if (is_tcf_gact_shot(a)) {
681 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
682 	} else if (is_tcf_mirred_egress_redirect(a)) {
683 		err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
684 					       true, tun_type, tun_out_cnt,
685 					       out_cnt, csum_updated);
686 		if (err)
687 			return err;
688 
689 	} else if (is_tcf_mirred_egress_mirror(a)) {
690 		err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
691 					       false, tun_type, tun_out_cnt,
692 					       out_cnt, csum_updated);
693 		if (err)
694 			return err;
695 
696 	} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
697 		if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
698 			return -EOPNOTSUPP;
699 
700 		pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
701 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
702 
703 		nfp_fl_pop_vlan(pop_v);
704 		*a_len += sizeof(struct nfp_fl_pop_vlan);
705 	} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
706 		if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
707 			return -EOPNOTSUPP;
708 
709 		psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
710 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
711 
712 		nfp_fl_push_vlan(psh_v, a);
713 		*a_len += sizeof(struct nfp_fl_push_vlan);
714 	} else if (is_tcf_tunnel_set(a)) {
715 		struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
716 		struct nfp_repr *repr = netdev_priv(netdev);
717 
718 		*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
719 		if (*tun_type == NFP_FL_TUNNEL_NONE)
720 			return -EOPNOTSUPP;
721 
722 		if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
723 			return -EOPNOTSUPP;
724 
725 		/* Pre-tunnel action is required for tunnel encap.
726 		 * This checks for next hop entries on NFP.
727 		 * If none, the packet falls back before applying other actions.
728 		 */
729 		if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
730 		    sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
731 			return -EOPNOTSUPP;
732 
733 		pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
734 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
735 		*a_len += sizeof(struct nfp_fl_pre_tunnel);
736 
737 		err = nfp_fl_push_geneve_options(nfp_fl, a_len, a);
738 		if (err)
739 			return err;
740 
741 		set_tun = (void *)&nfp_fl->action_data[*a_len];
742 		err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun,
743 					      *tun_type, netdev);
744 		if (err)
745 			return err;
746 		*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
747 	} else if (is_tcf_tunnel_release(a)) {
748 		/* Tunnel decap is handled by default so accept action. */
749 		return 0;
750 	} else if (is_tcf_pedit(a)) {
751 		if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
752 				 a_len, csum_updated))
753 			return -EOPNOTSUPP;
754 	} else if (is_tcf_csum(a)) {
755 		/* csum action requests recalc of something we have not fixed */
756 		if (tcf_csum_update_flags(a) & ~*csum_updated)
757 			return -EOPNOTSUPP;
758 		/* If we will correctly fix the csum we can remove it from the
759 		 * csum update list. Which will later be used to check support.
760 		 */
761 		*csum_updated &= ~tcf_csum_update_flags(a);
762 	} else {
763 		/* Currently we do not handle any other actions. */
764 		return -EOPNOTSUPP;
765 	}
766 
767 	return 0;
768 }
769 
770 int nfp_flower_compile_action(struct nfp_app *app,
771 			      struct tc_cls_flower_offload *flow,
772 			      struct net_device *netdev,
773 			      struct nfp_fl_payload *nfp_flow)
774 {
775 	int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
776 	enum nfp_flower_tun_type tun_type;
777 	const struct tc_action *a;
778 	u32 csum_updated = 0;
779 
780 	memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
781 	nfp_flow->meta.act_len = 0;
782 	tun_type = NFP_FL_TUNNEL_NONE;
783 	act_len = 0;
784 	act_cnt = 0;
785 	tun_out_cnt = 0;
786 	out_cnt = 0;
787 
788 	tcf_exts_for_each_action(i, a, flow->exts) {
789 		err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
790 					     netdev, &tun_type, &tun_out_cnt,
791 					     &out_cnt, &csum_updated);
792 		if (err)
793 			return err;
794 		act_cnt++;
795 	}
796 
797 	/* We optimise when the action list is small, this can unfortunately
798 	 * not happen once we have more than one action in the action list.
799 	 */
800 	if (act_cnt > 1)
801 		nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
802 
803 	nfp_flow->meta.act_len = act_len;
804 
805 	return 0;
806 }
807