xref: /linux/drivers/net/ethernet/netronome/nfp/flower/action.c (revision 77380998d91dee8aafdbe42634776ba1ef692f1e)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3 
4 #include <linux/bitfield.h>
5 #include <net/geneve.h>
6 #include <net/pkt_cls.h>
7 #include <net/switchdev.h>
8 #include <net/tc_act/tc_csum.h>
9 #include <net/tc_act/tc_gact.h>
10 #include <net/tc_act/tc_mirred.h>
11 #include <net/tc_act/tc_pedit.h>
12 #include <net/tc_act/tc_vlan.h>
13 #include <net/tc_act/tc_tunnel_key.h>
14 
15 #include "cmsg.h"
16 #include "main.h"
17 #include "../nfp_net_repr.h"
18 
19 /* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
20  * to change. Such changes will break our FW ABI.
21  */
22 #define NFP_FL_TUNNEL_CSUM			cpu_to_be16(0x01)
23 #define NFP_FL_TUNNEL_KEY			cpu_to_be16(0x04)
24 #define NFP_FL_TUNNEL_GENEVE_OPT		cpu_to_be16(0x0800)
25 #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS	IP_TUNNEL_INFO_TX
26 #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS	(NFP_FL_TUNNEL_CSUM | \
27 						 NFP_FL_TUNNEL_KEY | \
28 						 NFP_FL_TUNNEL_GENEVE_OPT)
29 
30 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
31 {
32 	size_t act_size = sizeof(struct nfp_fl_pop_vlan);
33 
34 	pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
35 	pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
36 	pop_vlan->reserved = 0;
37 }
38 
39 static void
40 nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
41 		 const struct tc_action *action)
42 {
43 	size_t act_size = sizeof(struct nfp_fl_push_vlan);
44 	u16 tmp_push_vlan_tci;
45 
46 	push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
47 	push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
48 	push_vlan->reserved = 0;
49 	push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
50 
51 	tmp_push_vlan_tci =
52 		FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
53 		FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action)) |
54 		NFP_FL_PUSH_VLAN_CFI;
55 	push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
56 }
57 
58 static int
59 nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
60 	       struct nfp_fl_payload *nfp_flow, int act_len)
61 {
62 	size_t act_size = sizeof(struct nfp_fl_pre_lag);
63 	struct nfp_fl_pre_lag *pre_lag;
64 	struct net_device *out_dev;
65 	int err;
66 
67 	out_dev = tcf_mirred_dev(action);
68 	if (!out_dev || !netif_is_lag_master(out_dev))
69 		return 0;
70 
71 	if (act_len + act_size > NFP_FL_MAX_A_SIZ)
72 		return -EOPNOTSUPP;
73 
74 	/* Pre_lag action must be first on action list.
75 	 * If other actions already exist they need pushed forward.
76 	 */
77 	if (act_len)
78 		memmove(nfp_flow->action_data + act_size,
79 			nfp_flow->action_data, act_len);
80 
81 	pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
82 	err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
83 	if (err)
84 		return err;
85 
86 	pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
87 	pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
88 
89 	nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
90 
91 	return act_size;
92 }
93 
94 static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
95 					 enum nfp_flower_tun_type tun_type)
96 {
97 	if (!out_dev->rtnl_link_ops)
98 		return false;
99 
100 	if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
101 		return tun_type == NFP_FL_TUNNEL_VXLAN;
102 
103 	if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
104 		return tun_type == NFP_FL_TUNNEL_GENEVE;
105 
106 	return false;
107 }
108 
109 static int
110 nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
111 	      const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
112 	      bool last, struct net_device *in_dev,
113 	      enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
114 {
115 	size_t act_size = sizeof(struct nfp_fl_output);
116 	struct nfp_flower_priv *priv = app->priv;
117 	struct net_device *out_dev;
118 	u16 tmp_flags;
119 
120 	output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
121 	output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
122 
123 	out_dev = tcf_mirred_dev(action);
124 	if (!out_dev)
125 		return -EOPNOTSUPP;
126 
127 	tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
128 
129 	if (tun_type) {
130 		/* Verify the egress netdev matches the tunnel type. */
131 		if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
132 			return -EOPNOTSUPP;
133 
134 		if (*tun_out_cnt)
135 			return -EOPNOTSUPP;
136 		(*tun_out_cnt)++;
137 
138 		output->flags = cpu_to_be16(tmp_flags |
139 					    NFP_FL_OUT_FLAGS_USE_TUN);
140 		output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
141 	} else if (netif_is_lag_master(out_dev) &&
142 		   priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
143 		int gid;
144 
145 		output->flags = cpu_to_be16(tmp_flags);
146 		gid = nfp_flower_lag_get_output_id(app, out_dev);
147 		if (gid < 0)
148 			return gid;
149 		output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
150 	} else {
151 		/* Set action output parameters. */
152 		output->flags = cpu_to_be16(tmp_flags);
153 
154 		/* Only offload if egress ports are on the same device as the
155 		 * ingress port.
156 		 */
157 		if (!switchdev_port_same_parent_id(in_dev, out_dev))
158 			return -EOPNOTSUPP;
159 		if (!nfp_netdev_is_nfp_repr(out_dev))
160 			return -EOPNOTSUPP;
161 
162 		output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
163 		if (!output->port)
164 			return -EOPNOTSUPP;
165 	}
166 	nfp_flow->meta.shortcut = output->port;
167 
168 	return 0;
169 }
170 
171 static enum nfp_flower_tun_type
172 nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
173 				const struct tc_action *action)
174 {
175 	struct ip_tunnel_info *tun = tcf_tunnel_info(action);
176 	struct nfp_flower_priv *priv = app->priv;
177 
178 	switch (tun->key.tp_dst) {
179 	case htons(NFP_FL_VXLAN_PORT):
180 		return NFP_FL_TUNNEL_VXLAN;
181 	case htons(NFP_FL_GENEVE_PORT):
182 		if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
183 			return NFP_FL_TUNNEL_GENEVE;
184 		/* FALLTHROUGH */
185 	default:
186 		return NFP_FL_TUNNEL_NONE;
187 	}
188 }
189 
190 static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
191 {
192 	size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
193 	struct nfp_fl_pre_tunnel *pre_tun_act;
194 
195 	/* Pre_tunnel action must be first on action list.
196 	 * If other actions already exist they need to be pushed forward.
197 	 */
198 	if (act_len)
199 		memmove(act_data + act_size, act_data, act_len);
200 
201 	pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
202 
203 	memset(pre_tun_act, 0, act_size);
204 
205 	pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
206 	pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
207 
208 	return pre_tun_act;
209 }
210 
211 static int
212 nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
213 			   const struct tc_action *action)
214 {
215 	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
216 	int opt_len, opt_cnt, act_start, tot_push_len;
217 	u8 *src = ip_tunnel_info_opts(ip_tun);
218 
219 	/* We need to populate the options in reverse order for HW.
220 	 * Therefore we go through the options, calculating the
221 	 * number of options and the total size, then we populate
222 	 * them in reverse order in the action list.
223 	 */
224 	opt_cnt = 0;
225 	tot_push_len = 0;
226 	opt_len = ip_tun->options_len;
227 	while (opt_len > 0) {
228 		struct geneve_opt *opt = (struct geneve_opt *)src;
229 
230 		opt_cnt++;
231 		if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
232 			return -EOPNOTSUPP;
233 
234 		tot_push_len += sizeof(struct nfp_fl_push_geneve) +
235 			       opt->length * 4;
236 		if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
237 			return -EOPNOTSUPP;
238 
239 		opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
240 		src += sizeof(struct geneve_opt) + opt->length * 4;
241 	}
242 
243 	if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
244 		return -EOPNOTSUPP;
245 
246 	act_start = *list_len;
247 	*list_len += tot_push_len;
248 	src = ip_tunnel_info_opts(ip_tun);
249 	while (opt_cnt) {
250 		struct geneve_opt *opt = (struct geneve_opt *)src;
251 		struct nfp_fl_push_geneve *push;
252 		size_t act_size, len;
253 
254 		opt_cnt--;
255 		act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
256 		tot_push_len -= act_size;
257 		len = act_start + tot_push_len;
258 
259 		push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
260 		push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
261 		push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
262 		push->reserved = 0;
263 		push->class = opt->opt_class;
264 		push->type = opt->type;
265 		push->length = opt->length;
266 		memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
267 
268 		src += sizeof(struct geneve_opt) + opt->length * 4;
269 	}
270 
271 	return 0;
272 }
273 
274 static int
275 nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
276 			struct nfp_fl_set_ipv4_udp_tun *set_tun,
277 			const struct tc_action *action,
278 			struct nfp_fl_pre_tunnel *pre_tun,
279 			enum nfp_flower_tun_type tun_type,
280 			struct net_device *netdev)
281 {
282 	size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
283 	struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
284 	struct nfp_flower_priv *priv = app->priv;
285 	u32 tmp_set_ip_tun_type_index = 0;
286 	/* Currently support one pre-tunnel so index is always 0. */
287 	int pretun_idx = 0;
288 
289 	BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
290 		     NFP_FL_TUNNEL_KEY	!= TUNNEL_KEY ||
291 		     NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
292 	if (ip_tun->options_len &&
293 	    (tun_type != NFP_FL_TUNNEL_GENEVE ||
294 	    !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
295 		return -EOPNOTSUPP;
296 
297 	set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
298 	set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
299 
300 	/* Set tunnel type and pre-tunnel index. */
301 	tmp_set_ip_tun_type_index |=
302 		FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
303 		FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
304 
305 	set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
306 	set_tun->tun_id = ip_tun->key.tun_id;
307 
308 	if (ip_tun->key.ttl) {
309 		set_tun->ttl = ip_tun->key.ttl;
310 	} else {
311 		struct net *net = dev_net(netdev);
312 		struct flowi4 flow = {};
313 		struct rtable *rt;
314 		int err;
315 
316 		/* Do a route lookup to determine ttl - if fails then use
317 		 * default. Note that CONFIG_INET is a requirement of
318 		 * CONFIG_NET_SWITCHDEV so must be defined here.
319 		 */
320 		flow.daddr = ip_tun->key.u.ipv4.dst;
321 		flow.flowi4_proto = IPPROTO_UDP;
322 		rt = ip_route_output_key(net, &flow);
323 		err = PTR_ERR_OR_ZERO(rt);
324 		if (!err) {
325 			set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
326 			ip_rt_put(rt);
327 		} else {
328 			set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
329 		}
330 	}
331 
332 	set_tun->tos = ip_tun->key.tos;
333 
334 	if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
335 	    ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
336 		return -EOPNOTSUPP;
337 	set_tun->tun_flags = ip_tun->key.tun_flags;
338 
339 	if (tun_type == NFP_FL_TUNNEL_GENEVE) {
340 		set_tun->tun_proto = htons(ETH_P_TEB);
341 		set_tun->tun_len = ip_tun->options_len / 4;
342 	}
343 
344 	/* Complete pre_tunnel action. */
345 	pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
346 
347 	return 0;
348 }
349 
350 static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
351 {
352 	u32 oldvalue = get_unaligned((u32 *)p_exact);
353 	u32 oldmask = get_unaligned((u32 *)p_mask);
354 
355 	value &= mask;
356 	value |= oldvalue & ~mask;
357 
358 	put_unaligned(oldmask | mask, (u32 *)p_mask);
359 	put_unaligned(value, (u32 *)p_exact);
360 }
361 
362 static int
363 nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
364 	       struct nfp_fl_set_eth *set_eth)
365 {
366 	u32 exact, mask;
367 
368 	if (off + 4 > ETH_ALEN * 2)
369 		return -EOPNOTSUPP;
370 
371 	mask = ~tcf_pedit_mask(action, idx);
372 	exact = tcf_pedit_val(action, idx);
373 
374 	if (exact & ~mask)
375 		return -EOPNOTSUPP;
376 
377 	nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
378 			    &set_eth->eth_addr_mask[off]);
379 
380 	set_eth->reserved = cpu_to_be16(0);
381 	set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
382 	set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
383 
384 	return 0;
385 }
386 
387 struct ipv4_ttl_word {
388 	__u8	ttl;
389 	__u8	protocol;
390 	__sum16	check;
391 };
392 
393 static int
394 nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
395 	       struct nfp_fl_set_ip4_addrs *set_ip_addr,
396 	       struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos)
397 {
398 	struct ipv4_ttl_word *ttl_word_mask;
399 	struct ipv4_ttl_word *ttl_word;
400 	struct iphdr *tos_word_mask;
401 	struct iphdr *tos_word;
402 	__be32 exact, mask;
403 
404 	/* We are expecting tcf_pedit to return a big endian value */
405 	mask = (__force __be32)~tcf_pedit_mask(action, idx);
406 	exact = (__force __be32)tcf_pedit_val(action, idx);
407 
408 	if (exact & ~mask)
409 		return -EOPNOTSUPP;
410 
411 	switch (off) {
412 	case offsetof(struct iphdr, daddr):
413 		set_ip_addr->ipv4_dst_mask |= mask;
414 		set_ip_addr->ipv4_dst &= ~mask;
415 		set_ip_addr->ipv4_dst |= exact & mask;
416 		set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
417 		set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
418 					   NFP_FL_LW_SIZ;
419 		break;
420 	case offsetof(struct iphdr, saddr):
421 		set_ip_addr->ipv4_src_mask |= mask;
422 		set_ip_addr->ipv4_src &= ~mask;
423 		set_ip_addr->ipv4_src |= exact & mask;
424 		set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
425 		set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >>
426 					   NFP_FL_LW_SIZ;
427 		break;
428 	case offsetof(struct iphdr, ttl):
429 		ttl_word_mask = (struct ipv4_ttl_word *)&mask;
430 		ttl_word = (struct ipv4_ttl_word *)&exact;
431 
432 		if (ttl_word_mask->protocol || ttl_word_mask->check)
433 			return -EOPNOTSUPP;
434 
435 		set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl;
436 		set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl;
437 		set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl;
438 		set_ip_ttl_tos->head.jump_id =
439 			NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
440 		set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
441 					      NFP_FL_LW_SIZ;
442 		break;
443 	case round_down(offsetof(struct iphdr, tos), 4):
444 		tos_word_mask = (struct iphdr *)&mask;
445 		tos_word = (struct iphdr *)&exact;
446 
447 		if (tos_word_mask->version || tos_word_mask->ihl ||
448 		    tos_word_mask->tot_len)
449 			return -EOPNOTSUPP;
450 
451 		set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos;
452 		set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos;
453 		set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos;
454 		set_ip_ttl_tos->head.jump_id =
455 			NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS;
456 		set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >>
457 					      NFP_FL_LW_SIZ;
458 		break;
459 	default:
460 		return -EOPNOTSUPP;
461 	}
462 
463 	return 0;
464 }
465 
466 static void
467 nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
468 		      struct nfp_fl_set_ipv6_addr *ip6)
469 {
470 	ip6->ipv6[word].mask |= mask;
471 	ip6->ipv6[word].exact &= ~mask;
472 	ip6->ipv6[word].exact |= exact & mask;
473 
474 	ip6->reserved = cpu_to_be16(0);
475 	ip6->head.jump_id = opcode_tag;
476 	ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
477 }
478 
479 struct ipv6_hop_limit_word {
480 	__be16 payload_len;
481 	u8 nexthdr;
482 	u8 hop_limit;
483 };
484 
485 static int
486 nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask,
487 				    struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
488 {
489 	struct ipv6_hop_limit_word *fl_hl_mask;
490 	struct ipv6_hop_limit_word *fl_hl;
491 
492 	switch (off) {
493 	case offsetof(struct ipv6hdr, payload_len):
494 		fl_hl_mask = (struct ipv6_hop_limit_word *)&mask;
495 		fl_hl = (struct ipv6_hop_limit_word *)&exact;
496 
497 		if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len)
498 			return -EOPNOTSUPP;
499 
500 		ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit;
501 		ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit;
502 		ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit &
503 					    fl_hl_mask->hop_limit;
504 		break;
505 	case round_down(offsetof(struct ipv6hdr, flow_lbl), 4):
506 		if (mask & ~IPV6_FLOW_LABEL_MASK ||
507 		    exact & ~IPV6_FLOW_LABEL_MASK)
508 			return -EOPNOTSUPP;
509 
510 		ip_hl_fl->ipv6_label_mask |= mask;
511 		ip_hl_fl->ipv6_label &= ~mask;
512 		ip_hl_fl->ipv6_label |= exact & mask;
513 		break;
514 	}
515 
516 	ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL;
517 	ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ;
518 
519 	return 0;
520 }
521 
522 static int
523 nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
524 	       struct nfp_fl_set_ipv6_addr *ip_dst,
525 	       struct nfp_fl_set_ipv6_addr *ip_src,
526 	       struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl)
527 {
528 	__be32 exact, mask;
529 	int err = 0;
530 	u8 word;
531 
532 	/* We are expecting tcf_pedit to return a big endian value */
533 	mask = (__force __be32)~tcf_pedit_mask(action, idx);
534 	exact = (__force __be32)tcf_pedit_val(action, idx);
535 
536 	if (exact & ~mask)
537 		return -EOPNOTSUPP;
538 
539 	if (off < offsetof(struct ipv6hdr, saddr)) {
540 		err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask,
541 							  ip_hl_fl);
542 	} else if (off < offsetof(struct ipv6hdr, daddr)) {
543 		word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
544 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
545 				      exact, mask, ip_src);
546 	} else if (off < offsetof(struct ipv6hdr, daddr) +
547 		       sizeof(struct in6_addr)) {
548 		word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
549 		nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
550 				      exact, mask, ip_dst);
551 	} else {
552 		return -EOPNOTSUPP;
553 	}
554 
555 	return err;
556 }
557 
558 static int
559 nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
560 		 struct nfp_fl_set_tport *set_tport, int opcode)
561 {
562 	u32 exact, mask;
563 
564 	if (off)
565 		return -EOPNOTSUPP;
566 
567 	mask = ~tcf_pedit_mask(action, idx);
568 	exact = tcf_pedit_val(action, idx);
569 
570 	if (exact & ~mask)
571 		return -EOPNOTSUPP;
572 
573 	nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
574 			    set_tport->tp_port_mask);
575 
576 	set_tport->reserved = cpu_to_be16(0);
577 	set_tport->head.jump_id = opcode;
578 	set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
579 
580 	return 0;
581 }
582 
583 static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
584 {
585 	switch (ip_proto) {
586 	case 0:
587 		/* Filter doesn't force proto match,
588 		 * both TCP and UDP will be updated if encountered
589 		 */
590 		return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
591 	case IPPROTO_TCP:
592 		return TCA_CSUM_UPDATE_FLAG_TCP;
593 	case IPPROTO_UDP:
594 		return TCA_CSUM_UPDATE_FLAG_UDP;
595 	default:
596 		/* All other protocols will be ignored by FW */
597 		return 0;
598 	}
599 }
600 
601 static int
602 nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
603 	     char *nfp_action, int *a_len, u32 *csum_updated)
604 {
605 	struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
606 	struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl;
607 	struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos;
608 	struct nfp_fl_set_ip4_addrs set_ip_addr;
609 	struct nfp_fl_set_tport set_tport;
610 	struct nfp_fl_set_eth set_eth;
611 	enum pedit_header_type htype;
612 	int idx, nkeys, err;
613 	size_t act_size = 0;
614 	u32 offset, cmd;
615 	u8 ip_proto = 0;
616 
617 	memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl));
618 	memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos));
619 	memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
620 	memset(&set_ip6_src, 0, sizeof(set_ip6_src));
621 	memset(&set_ip_addr, 0, sizeof(set_ip_addr));
622 	memset(&set_tport, 0, sizeof(set_tport));
623 	memset(&set_eth, 0, sizeof(set_eth));
624 	nkeys = tcf_pedit_nkeys(action);
625 
626 	for (idx = 0; idx < nkeys; idx++) {
627 		cmd = tcf_pedit_cmd(action, idx);
628 		htype = tcf_pedit_htype(action, idx);
629 		offset = tcf_pedit_offset(action, idx);
630 
631 		if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
632 			return -EOPNOTSUPP;
633 
634 		switch (htype) {
635 		case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
636 			err = nfp_fl_set_eth(action, idx, offset, &set_eth);
637 			break;
638 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
639 			err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr,
640 					     &set_ip_ttl_tos);
641 			break;
642 		case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
643 			err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
644 					     &set_ip6_src, &set_ip6_tc_hl_fl);
645 			break;
646 		case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
647 			err = nfp_fl_set_tport(action, idx, offset, &set_tport,
648 					       NFP_FL_ACTION_OPCODE_SET_TCP);
649 			break;
650 		case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
651 			err = nfp_fl_set_tport(action, idx, offset, &set_tport,
652 					       NFP_FL_ACTION_OPCODE_SET_UDP);
653 			break;
654 		default:
655 			return -EOPNOTSUPP;
656 		}
657 		if (err)
658 			return err;
659 	}
660 
661 	if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
662 		struct flow_dissector_key_basic *basic;
663 
664 		basic = skb_flow_dissector_target(flow->dissector,
665 						  FLOW_DISSECTOR_KEY_BASIC,
666 						  flow->key);
667 		ip_proto = basic->ip_proto;
668 	}
669 
670 	if (set_eth.head.len_lw) {
671 		act_size = sizeof(set_eth);
672 		memcpy(nfp_action, &set_eth, act_size);
673 		*a_len += act_size;
674 	}
675 	if (set_ip_ttl_tos.head.len_lw) {
676 		nfp_action += act_size;
677 		act_size = sizeof(set_ip_ttl_tos);
678 		memcpy(nfp_action, &set_ip_ttl_tos, act_size);
679 		*a_len += act_size;
680 
681 		/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
682 		*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
683 				nfp_fl_csum_l4_to_flag(ip_proto);
684 	}
685 	if (set_ip_addr.head.len_lw) {
686 		nfp_action += act_size;
687 		act_size = sizeof(set_ip_addr);
688 		memcpy(nfp_action, &set_ip_addr, act_size);
689 		*a_len += act_size;
690 
691 		/* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
692 		*csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
693 				nfp_fl_csum_l4_to_flag(ip_proto);
694 	}
695 	if (set_ip6_tc_hl_fl.head.len_lw) {
696 		nfp_action += act_size;
697 		act_size = sizeof(set_ip6_tc_hl_fl);
698 		memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size);
699 		*a_len += act_size;
700 
701 		/* Hardware will automatically fix TCP/UDP checksum. */
702 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
703 	}
704 	if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
705 		/* TC compiles set src and dst IPv6 address as a single action,
706 		 * the hardware requires this to be 2 separate actions.
707 		 */
708 		nfp_action += act_size;
709 		act_size = sizeof(set_ip6_src);
710 		memcpy(nfp_action, &set_ip6_src, act_size);
711 		*a_len += act_size;
712 
713 		act_size = sizeof(set_ip6_dst);
714 		memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
715 		       act_size);
716 		*a_len += act_size;
717 
718 		/* Hardware will automatically fix TCP/UDP checksum. */
719 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
720 	} else if (set_ip6_dst.head.len_lw) {
721 		nfp_action += act_size;
722 		act_size = sizeof(set_ip6_dst);
723 		memcpy(nfp_action, &set_ip6_dst, act_size);
724 		*a_len += act_size;
725 
726 		/* Hardware will automatically fix TCP/UDP checksum. */
727 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
728 	} else if (set_ip6_src.head.len_lw) {
729 		nfp_action += act_size;
730 		act_size = sizeof(set_ip6_src);
731 		memcpy(nfp_action, &set_ip6_src, act_size);
732 		*a_len += act_size;
733 
734 		/* Hardware will automatically fix TCP/UDP checksum. */
735 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
736 	}
737 	if (set_tport.head.len_lw) {
738 		nfp_action += act_size;
739 		act_size = sizeof(set_tport);
740 		memcpy(nfp_action, &set_tport, act_size);
741 		*a_len += act_size;
742 
743 		/* Hardware will automatically fix TCP/UDP checksum. */
744 		*csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
745 	}
746 
747 	return 0;
748 }
749 
750 static int
751 nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
752 			 struct nfp_fl_payload *nfp_fl, int *a_len,
753 			 struct net_device *netdev, bool last,
754 			 enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
755 			 int *out_cnt, u32 *csum_updated)
756 {
757 	struct nfp_flower_priv *priv = app->priv;
758 	struct nfp_fl_output *output;
759 	int err, prelag_size;
760 
761 	/* If csum_updated has not been reset by now, it means HW will
762 	 * incorrectly update csums when they are not requested.
763 	 */
764 	if (*csum_updated)
765 		return -EOPNOTSUPP;
766 
767 	if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
768 		return -EOPNOTSUPP;
769 
770 	output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
771 	err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type,
772 			    tun_out_cnt);
773 	if (err)
774 		return err;
775 
776 	*a_len += sizeof(struct nfp_fl_output);
777 
778 	if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
779 		/* nfp_fl_pre_lag returns -err or size of prelag action added.
780 		 * This will be 0 if it is not egressing to a lag dev.
781 		 */
782 		prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len);
783 		if (prelag_size < 0)
784 			return prelag_size;
785 		else if (prelag_size > 0 && (!last || *out_cnt))
786 			return -EOPNOTSUPP;
787 
788 		*a_len += prelag_size;
789 	}
790 	(*out_cnt)++;
791 
792 	return 0;
793 }
794 
795 static int
796 nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
797 		       struct tc_cls_flower_offload *flow,
798 		       struct nfp_fl_payload *nfp_fl, int *a_len,
799 		       struct net_device *netdev,
800 		       enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
801 		       int *out_cnt, u32 *csum_updated)
802 {
803 	struct nfp_fl_set_ipv4_udp_tun *set_tun;
804 	struct nfp_fl_pre_tunnel *pre_tun;
805 	struct nfp_fl_push_vlan *psh_v;
806 	struct nfp_fl_pop_vlan *pop_v;
807 	int err;
808 
809 	if (is_tcf_gact_shot(a)) {
810 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
811 	} else if (is_tcf_mirred_egress_redirect(a)) {
812 		err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
813 					       true, tun_type, tun_out_cnt,
814 					       out_cnt, csum_updated);
815 		if (err)
816 			return err;
817 
818 	} else if (is_tcf_mirred_egress_mirror(a)) {
819 		err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
820 					       false, tun_type, tun_out_cnt,
821 					       out_cnt, csum_updated);
822 		if (err)
823 			return err;
824 
825 	} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
826 		if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
827 			return -EOPNOTSUPP;
828 
829 		pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
830 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
831 
832 		nfp_fl_pop_vlan(pop_v);
833 		*a_len += sizeof(struct nfp_fl_pop_vlan);
834 	} else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
835 		if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
836 			return -EOPNOTSUPP;
837 
838 		psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
839 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
840 
841 		nfp_fl_push_vlan(psh_v, a);
842 		*a_len += sizeof(struct nfp_fl_push_vlan);
843 	} else if (is_tcf_tunnel_set(a)) {
844 		struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
845 		struct nfp_repr *repr = netdev_priv(netdev);
846 
847 		*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
848 		if (*tun_type == NFP_FL_TUNNEL_NONE)
849 			return -EOPNOTSUPP;
850 
851 		if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
852 			return -EOPNOTSUPP;
853 
854 		/* Pre-tunnel action is required for tunnel encap.
855 		 * This checks for next hop entries on NFP.
856 		 * If none, the packet falls back before applying other actions.
857 		 */
858 		if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
859 		    sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
860 			return -EOPNOTSUPP;
861 
862 		pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
863 		nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
864 		*a_len += sizeof(struct nfp_fl_pre_tunnel);
865 
866 		err = nfp_fl_push_geneve_options(nfp_fl, a_len, a);
867 		if (err)
868 			return err;
869 
870 		set_tun = (void *)&nfp_fl->action_data[*a_len];
871 		err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun,
872 					      *tun_type, netdev);
873 		if (err)
874 			return err;
875 		*a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
876 	} else if (is_tcf_tunnel_release(a)) {
877 		/* Tunnel decap is handled by default so accept action. */
878 		return 0;
879 	} else if (is_tcf_pedit(a)) {
880 		if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
881 				 a_len, csum_updated))
882 			return -EOPNOTSUPP;
883 	} else if (is_tcf_csum(a)) {
884 		/* csum action requests recalc of something we have not fixed */
885 		if (tcf_csum_update_flags(a) & ~*csum_updated)
886 			return -EOPNOTSUPP;
887 		/* If we will correctly fix the csum we can remove it from the
888 		 * csum update list. Which will later be used to check support.
889 		 */
890 		*csum_updated &= ~tcf_csum_update_flags(a);
891 	} else {
892 		/* Currently we do not handle any other actions. */
893 		return -EOPNOTSUPP;
894 	}
895 
896 	return 0;
897 }
898 
899 int nfp_flower_compile_action(struct nfp_app *app,
900 			      struct tc_cls_flower_offload *flow,
901 			      struct net_device *netdev,
902 			      struct nfp_fl_payload *nfp_flow)
903 {
904 	int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
905 	enum nfp_flower_tun_type tun_type;
906 	const struct tc_action *a;
907 	u32 csum_updated = 0;
908 
909 	memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
910 	nfp_flow->meta.act_len = 0;
911 	tun_type = NFP_FL_TUNNEL_NONE;
912 	act_len = 0;
913 	act_cnt = 0;
914 	tun_out_cnt = 0;
915 	out_cnt = 0;
916 
917 	tcf_exts_for_each_action(i, a, flow->exts) {
918 		err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
919 					     netdev, &tun_type, &tun_out_cnt,
920 					     &out_cnt, &csum_updated);
921 		if (err)
922 			return err;
923 		act_cnt++;
924 	}
925 
926 	/* We optimise when the action list is small, this can unfortunately
927 	 * not happen once we have more than one action in the action list.
928 	 */
929 	if (act_cnt > 1)
930 		nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
931 
932 	nfp_flow->meta.act_len = act_len;
933 
934 	return 0;
935 }
936