xref: /linux/drivers/net/ethernet/netronome/nfp/flower/conntrack.c (revision 0c7c237b1c35011ef0b8d30c1d5c20bc6ae7b69b)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2021 Corigine, Inc. */
3 
4 #include <net/tc_act/tc_csum.h>
5 #include <net/tc_act/tc_ct.h>
6 
7 #include "conntrack.h"
8 #include "../nfp_port.h"
9 
10 const struct rhashtable_params nfp_tc_ct_merge_params = {
11 	.head_offset		= offsetof(struct nfp_fl_ct_tc_merge,
12 					   hash_node),
13 	.key_len		= sizeof(unsigned long) * 2,
14 	.key_offset		= offsetof(struct nfp_fl_ct_tc_merge, cookie),
15 	.automatic_shrinking	= true,
16 };
17 
18 const struct rhashtable_params nfp_nft_ct_merge_params = {
19 	.head_offset		= offsetof(struct nfp_fl_nft_tc_merge,
20 					   hash_node),
21 	.key_len		= sizeof(unsigned long) * 3,
22 	.key_offset		= offsetof(struct nfp_fl_nft_tc_merge, cookie),
23 	.automatic_shrinking	= true,
24 };
25 
26 static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
27 					      enum flow_action_id act_id);
28 
29 /**
30  * get_hashentry() - Wrapper around hashtable lookup.
31  * @ht:		hashtable where entry could be found
32  * @key:	key to lookup
33  * @params:	hashtable params
34  * @size:	size of entry to allocate if not in table
35  *
36  * Returns an entry from a hashtable. If entry does not exist
37  * yet allocate the memory for it and return the new entry.
38  */
39 static void *get_hashentry(struct rhashtable *ht, void *key,
40 			   const struct rhashtable_params params, size_t size)
41 {
42 	void *result;
43 
44 	result = rhashtable_lookup_fast(ht, key, params);
45 
46 	if (result)
47 		return result;
48 
49 	result = kzalloc(size, GFP_KERNEL);
50 	if (!result)
51 		return ERR_PTR(-ENOMEM);
52 
53 	return result;
54 }
55 
56 bool is_pre_ct_flow(struct flow_cls_offload *flow)
57 {
58 	struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
59 	struct flow_dissector *dissector = rule->match.dissector;
60 	struct flow_action_entry *act;
61 	struct flow_match_ct ct;
62 	int i;
63 
64 	if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
65 		flow_rule_match_ct(rule, &ct);
66 		if (ct.key->ct_state)
67 			return false;
68 	}
69 
70 	if (flow->common.chain_index)
71 		return false;
72 
73 	flow_action_for_each(i, act, &flow->rule->action) {
74 		if (act->id == FLOW_ACTION_CT) {
75 			/* The pre_ct rule only have the ct or ct nat action, cannot
76 			 * contains other ct action e.g ct commit and so on.
77 			 */
78 			if ((!act->ct.action || act->ct.action == TCA_CT_ACT_NAT))
79 				return true;
80 			else
81 				return false;
82 		}
83 	}
84 
85 	return false;
86 }
87 
88 bool is_post_ct_flow(struct flow_cls_offload *flow)
89 {
90 	struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
91 	struct flow_dissector *dissector = rule->match.dissector;
92 	struct flow_action_entry *act;
93 	bool exist_ct_clear = false;
94 	struct flow_match_ct ct;
95 	int i;
96 
97 	if (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT)) {
98 		flow_rule_match_ct(rule, &ct);
99 		if (ct.key->ct_state & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED)
100 			return true;
101 	} else {
102 		/* post ct entry cannot contains any ct action except ct_clear. */
103 		flow_action_for_each(i, act, &flow->rule->action) {
104 			if (act->id == FLOW_ACTION_CT) {
105 				/* ignore ct clear action. */
106 				if (act->ct.action == TCA_CT_ACT_CLEAR) {
107 					exist_ct_clear = true;
108 					continue;
109 				}
110 
111 				return false;
112 			}
113 		}
114 		/* when do nat with ct, the post ct entry ignore the ct status,
115 		 * will match the nat field(sip/dip) instead. In this situation,
116 		 * the flow chain index is not zero and contains ct clear action.
117 		 */
118 		if (flow->common.chain_index && exist_ct_clear)
119 			return true;
120 	}
121 
122 	return false;
123 }
124 
125 /**
126  * get_mangled_key() - Mangle the key if mangle act exists
127  * @rule:	rule that carries the actions
128  * @buf:	pointer to key to be mangled
129  * @offset:	used to adjust mangled offset in L2/L3/L4 header
130  * @key_sz:	key size
131  * @htype:	mangling type
132  *
133  * Returns buf where the mangled key stores.
134  */
135 static void *get_mangled_key(struct flow_rule *rule, void *buf,
136 			     u32 offset, size_t key_sz,
137 			     enum flow_action_mangle_base htype)
138 {
139 	struct flow_action_entry *act;
140 	u32 *val = (u32 *)buf;
141 	u32 off, msk, key;
142 	int i;
143 
144 	flow_action_for_each(i, act, &rule->action) {
145 		if (act->id == FLOW_ACTION_MANGLE &&
146 		    act->mangle.htype == htype) {
147 			off = act->mangle.offset - offset;
148 			msk = act->mangle.mask;
149 			key = act->mangle.val;
150 
151 			/* Mangling is supposed to be u32 aligned */
152 			if (off % 4 || off >= key_sz)
153 				continue;
154 
155 			val[off >> 2] &= msk;
156 			val[off >> 2] |= key;
157 		}
158 	}
159 
160 	return buf;
161 }
162 
163 /* Only tos and ttl are involved in flow_match_ip structure, which
164  * doesn't conform to the layout of ip/ipv6 header definition. So
165  * they need particular process here: fill them into the ip/ipv6
166  * header, so that mangling actions can work directly.
167  */
168 #define NFP_IPV4_TOS_MASK	GENMASK(23, 16)
169 #define NFP_IPV4_TTL_MASK	GENMASK(31, 24)
170 #define NFP_IPV6_TCLASS_MASK	GENMASK(27, 20)
171 #define NFP_IPV6_HLIMIT_MASK	GENMASK(7, 0)
172 static void *get_mangled_tos_ttl(struct flow_rule *rule, void *buf,
173 				 bool is_v6)
174 {
175 	struct flow_match_ip match;
176 	/* IPv4's ttl field is in third dword. */
177 	__be32 ip_hdr[3];
178 	u32 tmp, hdr_len;
179 
180 	flow_rule_match_ip(rule, &match);
181 
182 	if (is_v6) {
183 		tmp = FIELD_PREP(NFP_IPV6_TCLASS_MASK, match.key->tos);
184 		ip_hdr[0] = cpu_to_be32(tmp);
185 		tmp = FIELD_PREP(NFP_IPV6_HLIMIT_MASK, match.key->ttl);
186 		ip_hdr[1] = cpu_to_be32(tmp);
187 		hdr_len = 2 * sizeof(__be32);
188 	} else {
189 		tmp = FIELD_PREP(NFP_IPV4_TOS_MASK, match.key->tos);
190 		ip_hdr[0] = cpu_to_be32(tmp);
191 		tmp = FIELD_PREP(NFP_IPV4_TTL_MASK, match.key->ttl);
192 		ip_hdr[2] = cpu_to_be32(tmp);
193 		hdr_len = 3 * sizeof(__be32);
194 	}
195 
196 	get_mangled_key(rule, ip_hdr, 0, hdr_len,
197 			is_v6 ? FLOW_ACT_MANGLE_HDR_TYPE_IP6 :
198 				FLOW_ACT_MANGLE_HDR_TYPE_IP4);
199 
200 	match.key = buf;
201 
202 	if (is_v6) {
203 		tmp = be32_to_cpu(ip_hdr[0]);
204 		match.key->tos = FIELD_GET(NFP_IPV6_TCLASS_MASK, tmp);
205 		tmp = be32_to_cpu(ip_hdr[1]);
206 		match.key->ttl = FIELD_GET(NFP_IPV6_HLIMIT_MASK, tmp);
207 	} else {
208 		tmp = be32_to_cpu(ip_hdr[0]);
209 		match.key->tos = FIELD_GET(NFP_IPV4_TOS_MASK, tmp);
210 		tmp = be32_to_cpu(ip_hdr[2]);
211 		match.key->ttl = FIELD_GET(NFP_IPV4_TTL_MASK, tmp);
212 	}
213 
214 	return buf;
215 }
216 
217 /* Note entry1 and entry2 are not swappable. only skip ip and
218  * tport merge check for pre_ct and post_ct when pre_ct do nat.
219  */
220 static bool nfp_ct_merge_check_cannot_skip(struct nfp_fl_ct_flow_entry *entry1,
221 					   struct nfp_fl_ct_flow_entry *entry2)
222 {
223 	/* only pre_ct have NFP_FL_ACTION_DO_NAT flag. */
224 	if ((entry1->flags & NFP_FL_ACTION_DO_NAT) &&
225 	    entry2->type == CT_TYPE_POST_CT)
226 		return false;
227 
228 	return true;
229 }
230 
231 /* Note entry1 and entry2 are not swappable, entry1 should be
232  * the former flow whose mangle action need be taken into account
233  * if existed, and entry2 should be the latter flow whose action
234  * we don't care.
235  */
236 static int nfp_ct_merge_check(struct nfp_fl_ct_flow_entry *entry1,
237 			      struct nfp_fl_ct_flow_entry *entry2)
238 {
239 	unsigned int ovlp_keys = entry1->rule->match.dissector->used_keys &
240 				 entry2->rule->match.dissector->used_keys;
241 	bool out, is_v6 = false;
242 	u8 ip_proto = 0;
243 	/* Temporary buffer for mangling keys, 64 is enough to cover max
244 	 * struct size of key in various fields that may be mangled.
245 	 * Supported fields to mangle:
246 	 * mac_src/mac_dst(struct flow_match_eth_addrs, 12B)
247 	 * nw_tos/nw_ttl(struct flow_match_ip, 2B)
248 	 * nw_src/nw_dst(struct flow_match_ipv4/6_addrs, 32B)
249 	 * tp_src/tp_dst(struct flow_match_ports, 4B)
250 	 */
251 	char buf[64];
252 
253 	if (entry1->netdev && entry2->netdev &&
254 	    entry1->netdev != entry2->netdev)
255 		return -EINVAL;
256 
257 	/* Check the overlapped fields one by one, the unmasked part
258 	 * should not conflict with each other.
259 	 */
260 	if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) {
261 		struct flow_match_control match1, match2;
262 
263 		flow_rule_match_control(entry1->rule, &match1);
264 		flow_rule_match_control(entry2->rule, &match2);
265 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
266 		if (out)
267 			goto check_failed;
268 	}
269 
270 	if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) {
271 		struct flow_match_basic match1, match2;
272 
273 		flow_rule_match_basic(entry1->rule, &match1);
274 		flow_rule_match_basic(entry2->rule, &match2);
275 
276 		/* n_proto field is a must in ct-related flows,
277 		 * it should be either ipv4 or ipv6.
278 		 */
279 		is_v6 = match1.key->n_proto == htons(ETH_P_IPV6);
280 		/* ip_proto field is a must when port field is cared */
281 		ip_proto = match1.key->ip_proto;
282 
283 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
284 		if (out)
285 			goto check_failed;
286 	}
287 
288 	/* if pre ct entry do nat, the nat ip exists in nft entry,
289 	 * will be do merge check when do nft and post ct merge,
290 	 * so skip this ip merge check here.
291 	 */
292 	if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS)) &&
293 	    nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
294 		struct flow_match_ipv4_addrs match1, match2;
295 
296 		flow_rule_match_ipv4_addrs(entry1->rule, &match1);
297 		flow_rule_match_ipv4_addrs(entry2->rule, &match2);
298 
299 		memcpy(buf, match1.key, sizeof(*match1.key));
300 		match1.key = get_mangled_key(entry1->rule, buf,
301 					     offsetof(struct iphdr, saddr),
302 					     sizeof(*match1.key),
303 					     FLOW_ACT_MANGLE_HDR_TYPE_IP4);
304 
305 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
306 		if (out)
307 			goto check_failed;
308 	}
309 
310 	/* if pre ct entry do nat, the nat ip exists in nft entry,
311 	 * will be do merge check when do nft and post ct merge,
312 	 * so skip this ip merge check here.
313 	 */
314 	if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS)) &&
315 	    nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
316 		struct flow_match_ipv6_addrs match1, match2;
317 
318 		flow_rule_match_ipv6_addrs(entry1->rule, &match1);
319 		flow_rule_match_ipv6_addrs(entry2->rule, &match2);
320 
321 		memcpy(buf, match1.key, sizeof(*match1.key));
322 		match1.key = get_mangled_key(entry1->rule, buf,
323 					     offsetof(struct ipv6hdr, saddr),
324 					     sizeof(*match1.key),
325 					     FLOW_ACT_MANGLE_HDR_TYPE_IP6);
326 
327 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
328 		if (out)
329 			goto check_failed;
330 	}
331 
332 	/* if pre ct entry do nat, the nat tport exists in nft entry,
333 	 * will be do merge check when do nft and post ct merge,
334 	 * so skip this tport merge check here.
335 	 */
336 	if ((ovlp_keys & BIT(FLOW_DISSECTOR_KEY_PORTS)) &&
337 	    nfp_ct_merge_check_cannot_skip(entry1, entry2)) {
338 		enum flow_action_mangle_base htype = FLOW_ACT_MANGLE_UNSPEC;
339 		struct flow_match_ports match1, match2;
340 
341 		flow_rule_match_ports(entry1->rule, &match1);
342 		flow_rule_match_ports(entry2->rule, &match2);
343 
344 		if (ip_proto == IPPROTO_UDP)
345 			htype = FLOW_ACT_MANGLE_HDR_TYPE_UDP;
346 		else if (ip_proto == IPPROTO_TCP)
347 			htype = FLOW_ACT_MANGLE_HDR_TYPE_TCP;
348 
349 		memcpy(buf, match1.key, sizeof(*match1.key));
350 		match1.key = get_mangled_key(entry1->rule, buf, 0,
351 					     sizeof(*match1.key), htype);
352 
353 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
354 		if (out)
355 			goto check_failed;
356 	}
357 
358 	if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
359 		struct flow_match_eth_addrs match1, match2;
360 
361 		flow_rule_match_eth_addrs(entry1->rule, &match1);
362 		flow_rule_match_eth_addrs(entry2->rule, &match2);
363 
364 		memcpy(buf, match1.key, sizeof(*match1.key));
365 		match1.key = get_mangled_key(entry1->rule, buf, 0,
366 					     sizeof(*match1.key),
367 					     FLOW_ACT_MANGLE_HDR_TYPE_ETH);
368 
369 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
370 		if (out)
371 			goto check_failed;
372 	}
373 
374 	if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_VLAN)) {
375 		struct flow_match_vlan match1, match2;
376 
377 		flow_rule_match_vlan(entry1->rule, &match1);
378 		flow_rule_match_vlan(entry2->rule, &match2);
379 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
380 		if (out)
381 			goto check_failed;
382 	}
383 
384 	if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_MPLS)) {
385 		struct flow_match_mpls match1, match2;
386 
387 		flow_rule_match_mpls(entry1->rule, &match1);
388 		flow_rule_match_mpls(entry2->rule, &match2);
389 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
390 		if (out)
391 			goto check_failed;
392 	}
393 
394 	if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_TCP)) {
395 		struct flow_match_tcp match1, match2;
396 
397 		flow_rule_match_tcp(entry1->rule, &match1);
398 		flow_rule_match_tcp(entry2->rule, &match2);
399 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
400 		if (out)
401 			goto check_failed;
402 	}
403 
404 	if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_IP)) {
405 		struct flow_match_ip match1, match2;
406 
407 		flow_rule_match_ip(entry1->rule, &match1);
408 		flow_rule_match_ip(entry2->rule, &match2);
409 
410 		match1.key = get_mangled_tos_ttl(entry1->rule, buf, is_v6);
411 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
412 		if (out)
413 			goto check_failed;
414 	}
415 
416 	if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_KEYID)) {
417 		struct flow_match_enc_keyid match1, match2;
418 
419 		flow_rule_match_enc_keyid(entry1->rule, &match1);
420 		flow_rule_match_enc_keyid(entry2->rule, &match2);
421 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
422 		if (out)
423 			goto check_failed;
424 	}
425 
426 	if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
427 		struct flow_match_ipv4_addrs match1, match2;
428 
429 		flow_rule_match_enc_ipv4_addrs(entry1->rule, &match1);
430 		flow_rule_match_enc_ipv4_addrs(entry2->rule, &match2);
431 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
432 		if (out)
433 			goto check_failed;
434 	}
435 
436 	if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
437 		struct flow_match_ipv6_addrs match1, match2;
438 
439 		flow_rule_match_enc_ipv6_addrs(entry1->rule, &match1);
440 		flow_rule_match_enc_ipv6_addrs(entry2->rule, &match2);
441 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
442 		if (out)
443 			goto check_failed;
444 	}
445 
446 	if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
447 		struct flow_match_control match1, match2;
448 
449 		flow_rule_match_enc_control(entry1->rule, &match1);
450 		flow_rule_match_enc_control(entry2->rule, &match2);
451 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
452 		if (out)
453 			goto check_failed;
454 	}
455 
456 	if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_IP)) {
457 		struct flow_match_ip match1, match2;
458 
459 		flow_rule_match_enc_ip(entry1->rule, &match1);
460 		flow_rule_match_enc_ip(entry2->rule, &match2);
461 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
462 		if (out)
463 			goto check_failed;
464 	}
465 
466 	if (ovlp_keys & BIT(FLOW_DISSECTOR_KEY_ENC_OPTS)) {
467 		struct flow_match_enc_opts match1, match2;
468 
469 		flow_rule_match_enc_opts(entry1->rule, &match1);
470 		flow_rule_match_enc_opts(entry2->rule, &match2);
471 		COMPARE_UNMASKED_FIELDS(match1, match2, &out);
472 		if (out)
473 			goto check_failed;
474 	}
475 
476 	return 0;
477 
478 check_failed:
479 	return -EINVAL;
480 }
481 
482 static int nfp_ct_check_vlan_merge(struct flow_action_entry *a_in,
483 				   struct flow_rule *rule)
484 {
485 	struct flow_match_vlan match;
486 
487 	if (unlikely(flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)))
488 		return -EOPNOTSUPP;
489 
490 	/* post_ct does not match VLAN KEY, can be merged. */
491 	if (likely(!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)))
492 		return 0;
493 
494 	switch (a_in->id) {
495 	/* pre_ct has pop vlan, post_ct cannot match VLAN KEY, cannot be merged. */
496 	case FLOW_ACTION_VLAN_POP:
497 		return -EOPNOTSUPP;
498 
499 	case FLOW_ACTION_VLAN_PUSH:
500 	case FLOW_ACTION_VLAN_MANGLE:
501 		flow_rule_match_vlan(rule, &match);
502 		/* different vlan id, cannot be merged. */
503 		if ((match.key->vlan_id & match.mask->vlan_id) ^
504 		    (a_in->vlan.vid & match.mask->vlan_id))
505 			return -EOPNOTSUPP;
506 
507 		/* different tpid, cannot be merged. */
508 		if ((match.key->vlan_tpid & match.mask->vlan_tpid) ^
509 		    (a_in->vlan.proto & match.mask->vlan_tpid))
510 			return -EOPNOTSUPP;
511 
512 		/* different priority, cannot be merged. */
513 		if ((match.key->vlan_priority & match.mask->vlan_priority) ^
514 		    (a_in->vlan.prio & match.mask->vlan_priority))
515 			return -EOPNOTSUPP;
516 
517 		break;
518 	default:
519 		return -EOPNOTSUPP;
520 	}
521 
522 	return 0;
523 }
524 
525 /* Extra check for multiple ct-zones merge
526  * currently surpport nft entries merge check in different zones
527  */
528 static int nfp_ct_merge_extra_check(struct nfp_fl_ct_flow_entry *nft_entry,
529 				    struct nfp_fl_ct_tc_merge *tc_m_entry)
530 {
531 	struct nfp_fl_nft_tc_merge *prev_nft_m_entry;
532 	struct nfp_fl_ct_flow_entry *pre_ct_entry;
533 
534 	pre_ct_entry = tc_m_entry->pre_ct_parent;
535 	prev_nft_m_entry = pre_ct_entry->prev_m_entries[pre_ct_entry->num_prev_m_entries - 1];
536 
537 	return nfp_ct_merge_check(prev_nft_m_entry->nft_parent, nft_entry);
538 }
539 
540 static int nfp_ct_merge_act_check(struct nfp_fl_ct_flow_entry *pre_ct_entry,
541 				  struct nfp_fl_ct_flow_entry *post_ct_entry,
542 				  struct nfp_fl_ct_flow_entry *nft_entry)
543 {
544 	struct flow_action_entry *act;
545 	int i, err;
546 
547 	/* Check for pre_ct->action conflicts */
548 	flow_action_for_each(i, act, &pre_ct_entry->rule->action) {
549 		switch (act->id) {
550 		case FLOW_ACTION_VLAN_PUSH:
551 		case FLOW_ACTION_VLAN_POP:
552 		case FLOW_ACTION_VLAN_MANGLE:
553 			err = nfp_ct_check_vlan_merge(act, post_ct_entry->rule);
554 			if (err)
555 				return err;
556 			break;
557 		case FLOW_ACTION_MPLS_PUSH:
558 		case FLOW_ACTION_MPLS_POP:
559 		case FLOW_ACTION_MPLS_MANGLE:
560 			return -EOPNOTSUPP;
561 		default:
562 			break;
563 		}
564 	}
565 
566 	/* Check for nft->action conflicts */
567 	flow_action_for_each(i, act, &nft_entry->rule->action) {
568 		switch (act->id) {
569 		case FLOW_ACTION_VLAN_PUSH:
570 		case FLOW_ACTION_VLAN_POP:
571 		case FLOW_ACTION_VLAN_MANGLE:
572 		case FLOW_ACTION_MPLS_PUSH:
573 		case FLOW_ACTION_MPLS_POP:
574 		case FLOW_ACTION_MPLS_MANGLE:
575 			return -EOPNOTSUPP;
576 		default:
577 			break;
578 		}
579 	}
580 	return 0;
581 }
582 
583 static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry,
584 			     struct nfp_fl_ct_flow_entry *nft_entry)
585 {
586 	struct flow_dissector *dissector = post_ct_entry->rule->match.dissector;
587 	struct flow_action_entry *ct_met;
588 	struct flow_match_ct ct;
589 	int i;
590 
591 	ct_met = get_flow_act(nft_entry->rule, FLOW_ACTION_CT_METADATA);
592 	if (ct_met && (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CT))) {
593 		u32 *act_lbl;
594 
595 		act_lbl = ct_met->ct_metadata.labels;
596 		flow_rule_match_ct(post_ct_entry->rule, &ct);
597 		for (i = 0; i < 4; i++) {
598 			if ((ct.key->ct_labels[i] & ct.mask->ct_labels[i]) ^
599 			    (act_lbl[i] & ct.mask->ct_labels[i]))
600 				return -EINVAL;
601 		}
602 
603 		if ((ct.key->ct_mark & ct.mask->ct_mark) ^
604 		    (ct_met->ct_metadata.mark & ct.mask->ct_mark))
605 			return -EINVAL;
606 
607 		return 0;
608 	} else {
609 		/* post_ct with ct clear action will not match the
610 		 * ct status when nft is nat entry.
611 		 */
612 		if (nft_entry->flags & NFP_FL_ACTION_DO_MANGLE)
613 			return 0;
614 	}
615 
616 	return -EINVAL;
617 }
618 
619 static int
620 nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
621 {
622 	int key_size;
623 
624 	/* This field must always be present */
625 	key_size = sizeof(struct nfp_flower_meta_tci);
626 	map[FLOW_PAY_META_TCI] = 0;
627 
628 	if (in_key_ls.key_layer & NFP_FLOWER_LAYER_EXT_META) {
629 		map[FLOW_PAY_EXT_META] = key_size;
630 		key_size += sizeof(struct nfp_flower_ext_meta);
631 	}
632 	if (in_key_ls.key_layer & NFP_FLOWER_LAYER_PORT) {
633 		map[FLOW_PAY_INPORT] = key_size;
634 		key_size += sizeof(struct nfp_flower_in_port);
635 	}
636 	if (in_key_ls.key_layer & NFP_FLOWER_LAYER_MAC) {
637 		map[FLOW_PAY_MAC_MPLS] = key_size;
638 		key_size += sizeof(struct nfp_flower_mac_mpls);
639 	}
640 	if (in_key_ls.key_layer & NFP_FLOWER_LAYER_TP) {
641 		map[FLOW_PAY_L4] = key_size;
642 		key_size += sizeof(struct nfp_flower_tp_ports);
643 	}
644 	if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV4) {
645 		map[FLOW_PAY_IPV4] = key_size;
646 		key_size += sizeof(struct nfp_flower_ipv4);
647 	}
648 	if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV6) {
649 		map[FLOW_PAY_IPV6] = key_size;
650 		key_size += sizeof(struct nfp_flower_ipv6);
651 	}
652 
653 	if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
654 		map[FLOW_PAY_QINQ] = key_size;
655 		key_size += sizeof(struct nfp_flower_vlan);
656 	}
657 
658 	if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
659 		map[FLOW_PAY_GRE] = key_size;
660 		if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
661 			key_size += sizeof(struct nfp_flower_ipv6_gre_tun);
662 		else
663 			key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
664 	}
665 
666 	if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) ||
667 	    (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) {
668 		map[FLOW_PAY_UDP_TUN] = key_size;
669 		if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
670 			key_size += sizeof(struct nfp_flower_ipv6_udp_tun);
671 		else
672 			key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
673 	}
674 
675 	if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
676 		map[FLOW_PAY_GENEVE_OPT] = key_size;
677 		key_size += sizeof(struct nfp_flower_geneve_options);
678 	}
679 
680 	return key_size;
681 }
682 
683 /* get the csum flag according the ip proto and mangle action. */
684 static void nfp_fl_get_csum_flag(struct flow_action_entry *a_in, u8 ip_proto, u32 *csum)
685 {
686 	if (a_in->id != FLOW_ACTION_MANGLE)
687 		return;
688 
689 	switch (a_in->mangle.htype) {
690 	case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
691 		*csum |= TCA_CSUM_UPDATE_FLAG_IPV4HDR;
692 		if (ip_proto == IPPROTO_TCP)
693 			*csum |= TCA_CSUM_UPDATE_FLAG_TCP;
694 		else if (ip_proto == IPPROTO_UDP)
695 			*csum |= TCA_CSUM_UPDATE_FLAG_UDP;
696 		break;
697 	case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
698 		*csum |= TCA_CSUM_UPDATE_FLAG_TCP;
699 		break;
700 	case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
701 		*csum |= TCA_CSUM_UPDATE_FLAG_UDP;
702 		break;
703 	default:
704 		break;
705 	}
706 }
707 
708 static int nfp_fl_merge_actions_offload(struct flow_rule **rules,
709 					struct nfp_flower_priv *priv,
710 					struct net_device *netdev,
711 					struct nfp_fl_payload *flow_pay,
712 					int num_rules)
713 {
714 	enum flow_action_hw_stats tmp_stats = FLOW_ACTION_HW_STATS_DONT_CARE;
715 	struct flow_action_entry *a_in;
716 	int i, j, id, num_actions = 0;
717 	struct flow_rule *a_rule;
718 	int err = 0, offset = 0;
719 
720 	for (i = 0; i < num_rules; i++)
721 		num_actions += rules[i]->action.num_entries;
722 
723 	/* Add one action to make sure there is enough room to add an checksum action
724 	 * when do nat.
725 	 */
726 	a_rule = flow_rule_alloc(num_actions + (num_rules / 2));
727 	if (!a_rule)
728 		return -ENOMEM;
729 
730 	/* post_ct entry have one action at least. */
731 	if (rules[num_rules - 1]->action.num_entries != 0)
732 		tmp_stats = rules[num_rules - 1]->action.entries[0].hw_stats;
733 
734 	/* Actions need a BASIC dissector. */
735 	a_rule->match = rules[0]->match;
736 
737 	/* Copy actions */
738 	for (j = 0; j < num_rules; j++) {
739 		u32 csum_updated = 0;
740 		u8 ip_proto = 0;
741 
742 		if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) {
743 			struct flow_match_basic match;
744 
745 			/* ip_proto is the only field that is needed in later compile_action,
746 			 * needed to set the correct checksum flags. It doesn't really matter
747 			 * which input rule's ip_proto field we take as the earlier merge checks
748 			 * would have made sure that they don't conflict. We do not know which
749 			 * of the subflows would have the ip_proto filled in, so we need to iterate
750 			 * through the subflows and assign the proper subflow to a_rule
751 			 */
752 			flow_rule_match_basic(rules[j], &match);
753 			if (match.mask->ip_proto) {
754 				a_rule->match = rules[j]->match;
755 				ip_proto = match.key->ip_proto;
756 			}
757 		}
758 
759 		for (i = 0; i < rules[j]->action.num_entries; i++) {
760 			a_in = &rules[j]->action.entries[i];
761 			id = a_in->id;
762 
763 			/* Ignore CT related actions as these would already have
764 			 * been taken care of by previous checks, and we do not send
765 			 * any CT actions to the firmware.
766 			 */
767 			switch (id) {
768 			case FLOW_ACTION_CT:
769 			case FLOW_ACTION_GOTO:
770 			case FLOW_ACTION_CT_METADATA:
771 				continue;
772 			default:
773 				/* nft entry is generated by tc ct, which mangle action do not care
774 				 * the stats, inherit the post entry stats to meet the
775 				 * flow_action_hw_stats_check.
776 				 * nft entry flow rules are at odd array index.
777 				 */
778 				if (j & 0x01) {
779 					if (a_in->hw_stats == FLOW_ACTION_HW_STATS_DONT_CARE)
780 						a_in->hw_stats = tmp_stats;
781 					nfp_fl_get_csum_flag(a_in, ip_proto, &csum_updated);
782 				}
783 				memcpy(&a_rule->action.entries[offset++],
784 				       a_in, sizeof(struct flow_action_entry));
785 				break;
786 			}
787 		}
788 		/* nft entry have mangle action, but do not have checksum action when do NAT,
789 		 * hardware will automatically fix IPv4 and TCP/UDP checksum. so add an csum action
790 		 * to meet csum action check.
791 		 */
792 		if (csum_updated) {
793 			struct flow_action_entry *csum_action;
794 
795 			csum_action = &a_rule->action.entries[offset++];
796 			csum_action->id = FLOW_ACTION_CSUM;
797 			csum_action->csum_flags = csum_updated;
798 			csum_action->hw_stats = tmp_stats;
799 		}
800 	}
801 
802 	/* Some actions would have been ignored, so update the num_entries field */
803 	a_rule->action.num_entries = offset;
804 	err = nfp_flower_compile_action(priv->app, a_rule, netdev, flow_pay, NULL);
805 	kfree(a_rule);
806 
807 	return err;
808 }
809 
810 static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
811 {
812 	enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
813 	struct nfp_fl_ct_zone_entry *zt = m_entry->zt;
814 	struct flow_rule *rules[NFP_MAX_ENTRY_RULES];
815 	struct nfp_fl_ct_flow_entry *pre_ct_entry;
816 	struct nfp_fl_key_ls key_layer, tmp_layer;
817 	struct nfp_flower_priv *priv = zt->priv;
818 	u16 key_map[_FLOW_PAY_LAYERS_MAX];
819 	struct nfp_fl_payload *flow_pay;
820 	u8 *key, *msk, *kdata, *mdata;
821 	struct nfp_port *port = NULL;
822 	int num_rules, err, i, j = 0;
823 	struct net_device *netdev;
824 	bool qinq_sup;
825 	u32 port_id;
826 	u16 offset;
827 
828 	netdev = m_entry->netdev;
829 	qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
830 
831 	pre_ct_entry = m_entry->tc_m_parent->pre_ct_parent;
832 	num_rules = pre_ct_entry->num_prev_m_entries * 2 + _CT_TYPE_MAX;
833 
834 	for (i = 0; i < pre_ct_entry->num_prev_m_entries; i++) {
835 		rules[j++] = pre_ct_entry->prev_m_entries[i]->tc_m_parent->pre_ct_parent->rule;
836 		rules[j++] = pre_ct_entry->prev_m_entries[i]->nft_parent->rule;
837 	}
838 
839 	rules[j++] = m_entry->tc_m_parent->pre_ct_parent->rule;
840 	rules[j++] = m_entry->nft_parent->rule;
841 	rules[j++] = m_entry->tc_m_parent->post_ct_parent->rule;
842 
843 	memset(&key_layer, 0, sizeof(struct nfp_fl_key_ls));
844 	memset(&key_map, 0, sizeof(key_map));
845 
846 	/* Calculate the resultant key layer and size for offload */
847 	for (i = 0; i < num_rules; i++) {
848 		err = nfp_flower_calculate_key_layers(priv->app,
849 						      m_entry->netdev,
850 						      &tmp_layer, rules[i],
851 						      &tun_type, NULL);
852 		if (err)
853 			return err;
854 
855 		key_layer.key_layer |= tmp_layer.key_layer;
856 		key_layer.key_layer_two |= tmp_layer.key_layer_two;
857 	}
858 	key_layer.key_size = nfp_fl_calc_key_layers_sz(key_layer, key_map);
859 
860 	flow_pay = nfp_flower_allocate_new(&key_layer);
861 	if (!flow_pay)
862 		return -ENOMEM;
863 
864 	memset(flow_pay->unmasked_data, 0, key_layer.key_size);
865 	memset(flow_pay->mask_data, 0, key_layer.key_size);
866 
867 	kdata = flow_pay->unmasked_data;
868 	mdata = flow_pay->mask_data;
869 
870 	offset = key_map[FLOW_PAY_META_TCI];
871 	key = kdata + offset;
872 	msk = mdata + offset;
873 	nfp_flower_compile_meta((struct nfp_flower_meta_tci *)key,
874 				(struct nfp_flower_meta_tci *)msk,
875 				key_layer.key_layer);
876 
877 	if (NFP_FLOWER_LAYER_EXT_META & key_layer.key_layer) {
878 		offset =  key_map[FLOW_PAY_EXT_META];
879 		key = kdata + offset;
880 		msk = mdata + offset;
881 		nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)key,
882 					    key_layer.key_layer_two);
883 		nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk,
884 					    key_layer.key_layer_two);
885 	}
886 
887 	/* Using in_port from the -trk rule. The tc merge checks should already
888 	 * be checking that the ingress netdevs are the same
889 	 */
890 	port_id = nfp_flower_get_port_id_from_netdev(priv->app, netdev);
891 	offset = key_map[FLOW_PAY_INPORT];
892 	key = kdata + offset;
893 	msk = mdata + offset;
894 	err = nfp_flower_compile_port((struct nfp_flower_in_port *)key,
895 				      port_id, false, tun_type, NULL);
896 	if (err)
897 		goto ct_offload_err;
898 	err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk,
899 				      port_id, true, tun_type, NULL);
900 	if (err)
901 		goto ct_offload_err;
902 
903 	/* This following part works on the assumption that previous checks has
904 	 * already filtered out flows that has different values for the different
905 	 * layers. Here we iterate through all three rules and merge their respective
906 	 * masked value(cared bits), basic method is:
907 	 * final_key = (r1_key & r1_mask) | (r2_key & r2_mask) | (r3_key & r3_mask)
908 	 * final_mask = r1_mask | r2_mask | r3_mask
909 	 * If none of the rules contains a match that is also fine, that simply means
910 	 * that the layer is not present.
911 	 */
912 	if (!qinq_sup) {
913 		for (i = 0; i < num_rules; i++) {
914 			offset = key_map[FLOW_PAY_META_TCI];
915 			key = kdata + offset;
916 			msk = mdata + offset;
917 			nfp_flower_compile_tci((struct nfp_flower_meta_tci *)key,
918 					       (struct nfp_flower_meta_tci *)msk,
919 					       rules[i]);
920 		}
921 	}
922 
923 	if (NFP_FLOWER_LAYER_MAC & key_layer.key_layer) {
924 		offset = key_map[FLOW_PAY_MAC_MPLS];
925 		key = kdata + offset;
926 		msk = mdata + offset;
927 		for (i = 0; i < num_rules; i++) {
928 			nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)key,
929 					       (struct nfp_flower_mac_mpls *)msk,
930 					       rules[i]);
931 			err = nfp_flower_compile_mpls((struct nfp_flower_mac_mpls *)key,
932 						      (struct nfp_flower_mac_mpls *)msk,
933 						      rules[i], NULL);
934 			if (err)
935 				goto ct_offload_err;
936 		}
937 	}
938 
939 	if (NFP_FLOWER_LAYER_IPV4 & key_layer.key_layer) {
940 		offset = key_map[FLOW_PAY_IPV4];
941 		key = kdata + offset;
942 		msk = mdata + offset;
943 		for (i = 0; i < num_rules; i++) {
944 			nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)key,
945 						(struct nfp_flower_ipv4 *)msk,
946 						rules[i]);
947 		}
948 	}
949 
950 	if (NFP_FLOWER_LAYER_IPV6 & key_layer.key_layer) {
951 		offset = key_map[FLOW_PAY_IPV6];
952 		key = kdata + offset;
953 		msk = mdata + offset;
954 		for (i = 0; i < num_rules; i++) {
955 			nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)key,
956 						(struct nfp_flower_ipv6 *)msk,
957 						rules[i]);
958 		}
959 	}
960 
961 	if (NFP_FLOWER_LAYER_TP & key_layer.key_layer) {
962 		offset = key_map[FLOW_PAY_L4];
963 		key = kdata + offset;
964 		msk = mdata + offset;
965 		for (i = 0; i < num_rules; i++) {
966 			nfp_flower_compile_tport((struct nfp_flower_tp_ports *)key,
967 						 (struct nfp_flower_tp_ports *)msk,
968 						 rules[i]);
969 		}
970 	}
971 
972 	if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
973 		offset = key_map[FLOW_PAY_QINQ];
974 		key = kdata + offset;
975 		msk = mdata + offset;
976 		for (i = 0; i < num_rules; i++) {
977 			nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
978 						(struct nfp_flower_vlan *)msk,
979 						rules[i]);
980 		}
981 	}
982 
983 	if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
984 		offset = key_map[FLOW_PAY_GRE];
985 		key = kdata + offset;
986 		msk = mdata + offset;
987 		if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
988 			struct nfp_flower_ipv6_gre_tun *gre_match;
989 			struct nfp_ipv6_addr_entry *entry;
990 			struct in6_addr *dst;
991 
992 			for (i = 0; i < num_rules; i++) {
993 				nfp_flower_compile_ipv6_gre_tun((void *)key,
994 								(void *)msk, rules[i]);
995 			}
996 			gre_match = (struct nfp_flower_ipv6_gre_tun *)key;
997 			dst = &gre_match->ipv6.dst;
998 
999 			entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
1000 			if (!entry) {
1001 				err = -ENOMEM;
1002 				goto ct_offload_err;
1003 			}
1004 
1005 			flow_pay->nfp_tun_ipv6 = entry;
1006 		} else {
1007 			__be32 dst;
1008 
1009 			for (i = 0; i < num_rules; i++) {
1010 				nfp_flower_compile_ipv4_gre_tun((void *)key,
1011 								(void *)msk, rules[i]);
1012 			}
1013 			dst = ((struct nfp_flower_ipv4_gre_tun *)key)->ipv4.dst;
1014 
1015 			/* Store the tunnel destination in the rule data.
1016 			 * This must be present and be an exact match.
1017 			 */
1018 			flow_pay->nfp_tun_ipv4_addr = dst;
1019 			nfp_tunnel_add_ipv4_off(priv->app, dst);
1020 		}
1021 	}
1022 
1023 	if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN ||
1024 	    key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
1025 		offset = key_map[FLOW_PAY_UDP_TUN];
1026 		key = kdata + offset;
1027 		msk = mdata + offset;
1028 		if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
1029 			struct nfp_flower_ipv6_udp_tun *udp_match;
1030 			struct nfp_ipv6_addr_entry *entry;
1031 			struct in6_addr *dst;
1032 
1033 			for (i = 0; i < num_rules; i++) {
1034 				nfp_flower_compile_ipv6_udp_tun((void *)key,
1035 								(void *)msk, rules[i]);
1036 			}
1037 			udp_match = (struct nfp_flower_ipv6_udp_tun *)key;
1038 			dst = &udp_match->ipv6.dst;
1039 
1040 			entry = nfp_tunnel_add_ipv6_off(priv->app, dst);
1041 			if (!entry) {
1042 				err = -ENOMEM;
1043 				goto ct_offload_err;
1044 			}
1045 
1046 			flow_pay->nfp_tun_ipv6 = entry;
1047 		} else {
1048 			__be32 dst;
1049 
1050 			for (i = 0; i < num_rules; i++) {
1051 				nfp_flower_compile_ipv4_udp_tun((void *)key,
1052 								(void *)msk, rules[i]);
1053 			}
1054 			dst = ((struct nfp_flower_ipv4_udp_tun *)key)->ipv4.dst;
1055 
1056 			/* Store the tunnel destination in the rule data.
1057 			 * This must be present and be an exact match.
1058 			 */
1059 			flow_pay->nfp_tun_ipv4_addr = dst;
1060 			nfp_tunnel_add_ipv4_off(priv->app, dst);
1061 		}
1062 
1063 		if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) {
1064 			offset = key_map[FLOW_PAY_GENEVE_OPT];
1065 			key = kdata + offset;
1066 			msk = mdata + offset;
1067 			for (i = 0; i < num_rules; i++)
1068 				nfp_flower_compile_geneve_opt(key, msk, rules[i]);
1069 		}
1070 	}
1071 
1072 	/* Merge actions into flow_pay */
1073 	err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay, num_rules);
1074 	if (err)
1075 		goto ct_offload_err;
1076 
1077 	/* Use the pointer address as the cookie, but set the last bit to 1.
1078 	 * This is to avoid the 'is_merge_flow' check from detecting this as
1079 	 * an already merged flow. This works since address alignment means
1080 	 * that the last bit for pointer addresses will be 0.
1081 	 */
1082 	flow_pay->tc_flower_cookie = ((unsigned long)flow_pay) | 0x1;
1083 	err = nfp_compile_flow_metadata(priv->app, flow_pay->tc_flower_cookie,
1084 					flow_pay, netdev, NULL);
1085 	if (err)
1086 		goto ct_offload_err;
1087 
1088 	if (nfp_netdev_is_nfp_repr(netdev))
1089 		port = nfp_port_from_netdev(netdev);
1090 
1091 	err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
1092 				     nfp_flower_table_params);
1093 	if (err)
1094 		goto ct_release_offload_meta_err;
1095 
1096 	err = nfp_flower_xmit_flow(priv->app, flow_pay,
1097 				   NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
1098 	if (err)
1099 		goto ct_remove_rhash_err;
1100 
1101 	m_entry->tc_flower_cookie = flow_pay->tc_flower_cookie;
1102 	m_entry->flow_pay = flow_pay;
1103 
1104 	if (port)
1105 		port->tc_offload_cnt++;
1106 
1107 	return err;
1108 
1109 ct_remove_rhash_err:
1110 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1111 					    &flow_pay->fl_node,
1112 					    nfp_flower_table_params));
1113 ct_release_offload_meta_err:
1114 	nfp_modify_flow_metadata(priv->app, flow_pay);
1115 ct_offload_err:
1116 	if (flow_pay->nfp_tun_ipv4_addr)
1117 		nfp_tunnel_del_ipv4_off(priv->app, flow_pay->nfp_tun_ipv4_addr);
1118 	if (flow_pay->nfp_tun_ipv6)
1119 		nfp_tunnel_put_ipv6_off(priv->app, flow_pay->nfp_tun_ipv6);
1120 	kfree(flow_pay->action_data);
1121 	kfree(flow_pay->mask_data);
1122 	kfree(flow_pay->unmasked_data);
1123 	kfree(flow_pay);
1124 	return err;
1125 }
1126 
1127 static int nfp_fl_ct_del_offload(struct nfp_app *app, unsigned long cookie,
1128 				 struct net_device *netdev)
1129 {
1130 	struct nfp_flower_priv *priv = app->priv;
1131 	struct nfp_fl_payload *flow_pay;
1132 	struct nfp_port *port = NULL;
1133 	int err = 0;
1134 
1135 	if (nfp_netdev_is_nfp_repr(netdev))
1136 		port = nfp_port_from_netdev(netdev);
1137 
1138 	flow_pay = nfp_flower_search_fl_table(app, cookie, netdev);
1139 	if (!flow_pay)
1140 		return -ENOENT;
1141 
1142 	err = nfp_modify_flow_metadata(app, flow_pay);
1143 	if (err)
1144 		goto err_free_merge_flow;
1145 
1146 	if (flow_pay->nfp_tun_ipv4_addr)
1147 		nfp_tunnel_del_ipv4_off(app, flow_pay->nfp_tun_ipv4_addr);
1148 
1149 	if (flow_pay->nfp_tun_ipv6)
1150 		nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6);
1151 
1152 	if (!flow_pay->in_hw) {
1153 		err = 0;
1154 		goto err_free_merge_flow;
1155 	}
1156 
1157 	err = nfp_flower_xmit_flow(app, flow_pay,
1158 				   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
1159 
1160 err_free_merge_flow:
1161 	nfp_flower_del_linked_merge_flows(app, flow_pay);
1162 	if (port)
1163 		port->tc_offload_cnt--;
1164 	kfree(flow_pay->action_data);
1165 	kfree(flow_pay->mask_data);
1166 	kfree(flow_pay->unmasked_data);
1167 	WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
1168 					    &flow_pay->fl_node,
1169 					    nfp_flower_table_params));
1170 	kfree_rcu(flow_pay, rcu);
1171 	return err;
1172 }
1173 
1174 static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt,
1175 			       struct nfp_fl_ct_flow_entry *nft_entry,
1176 			       struct nfp_fl_ct_tc_merge *tc_m_entry)
1177 {
1178 	struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
1179 	struct nfp_fl_nft_tc_merge *nft_m_entry;
1180 	unsigned long new_cookie[3];
1181 	int err;
1182 
1183 	pre_ct_entry = tc_m_entry->pre_ct_parent;
1184 	post_ct_entry = tc_m_entry->post_ct_parent;
1185 
1186 	err = nfp_ct_merge_act_check(pre_ct_entry, post_ct_entry, nft_entry);
1187 	if (err)
1188 		return err;
1189 
1190 	/* Check that the two tc flows are also compatible with
1191 	 * the nft entry. No need to check the pre_ct and post_ct
1192 	 * entries as that was already done during pre_merge.
1193 	 * The nft entry does not have a chain populated, so
1194 	 * skip this check.
1195 	 */
1196 	err = nfp_ct_merge_check(pre_ct_entry, nft_entry);
1197 	if (err)
1198 		return err;
1199 	err = nfp_ct_merge_check(nft_entry, post_ct_entry);
1200 	if (err)
1201 		return err;
1202 	err = nfp_ct_check_meta(post_ct_entry, nft_entry);
1203 	if (err)
1204 		return err;
1205 
1206 	if (pre_ct_entry->num_prev_m_entries > 0) {
1207 		err = nfp_ct_merge_extra_check(nft_entry, tc_m_entry);
1208 		if (err)
1209 			return err;
1210 	}
1211 
1212 	/* Combine tc_merge and nft cookies for this cookie. */
1213 	new_cookie[0] = tc_m_entry->cookie[0];
1214 	new_cookie[1] = tc_m_entry->cookie[1];
1215 	new_cookie[2] = nft_entry->cookie;
1216 	nft_m_entry = get_hashentry(&zt->nft_merge_tb,
1217 				    &new_cookie,
1218 				    nfp_nft_ct_merge_params,
1219 				    sizeof(*nft_m_entry));
1220 
1221 	if (IS_ERR(nft_m_entry))
1222 		return PTR_ERR(nft_m_entry);
1223 
1224 	/* nft_m_entry already present, not merging again */
1225 	if (!memcmp(&new_cookie, nft_m_entry->cookie, sizeof(new_cookie)))
1226 		return 0;
1227 
1228 	memcpy(&nft_m_entry->cookie, &new_cookie, sizeof(new_cookie));
1229 	nft_m_entry->zt = zt;
1230 	nft_m_entry->tc_m_parent = tc_m_entry;
1231 	nft_m_entry->nft_parent = nft_entry;
1232 	nft_m_entry->tc_flower_cookie = 0;
1233 	/* Copy the netdev from the pre_ct entry. When the tc_m_entry was created
1234 	 * it only combined them if the netdevs were the same, so can use any of them.
1235 	 */
1236 	nft_m_entry->netdev = pre_ct_entry->netdev;
1237 
1238 	/* Add this entry to the tc_m_list and nft_flow lists */
1239 	list_add(&nft_m_entry->tc_merge_list, &tc_m_entry->children);
1240 	list_add(&nft_m_entry->nft_flow_list, &nft_entry->children);
1241 
1242 	err = rhashtable_insert_fast(&zt->nft_merge_tb, &nft_m_entry->hash_node,
1243 				     nfp_nft_ct_merge_params);
1244 	if (err)
1245 		goto err_nft_ct_merge_insert;
1246 
1247 	zt->nft_merge_count++;
1248 
1249 	if (post_ct_entry->goto_chain_index > 0)
1250 		return nfp_fl_create_new_pre_ct(nft_m_entry);
1251 
1252 	/* Generate offload structure and send to nfp */
1253 	err = nfp_fl_ct_add_offload(nft_m_entry);
1254 	if (err)
1255 		goto err_nft_ct_offload;
1256 
1257 	return err;
1258 
1259 err_nft_ct_offload:
1260 	nfp_fl_ct_del_offload(zt->priv->app, nft_m_entry->tc_flower_cookie,
1261 			      nft_m_entry->netdev);
1262 err_nft_ct_merge_insert:
1263 	list_del(&nft_m_entry->tc_merge_list);
1264 	list_del(&nft_m_entry->nft_flow_list);
1265 	kfree(nft_m_entry);
1266 	return err;
1267 }
1268 
1269 static int nfp_ct_do_tc_merge(struct nfp_fl_ct_zone_entry *zt,
1270 			      struct nfp_fl_ct_flow_entry *ct_entry1,
1271 			      struct nfp_fl_ct_flow_entry *ct_entry2)
1272 {
1273 	struct nfp_fl_ct_flow_entry *post_ct_entry, *pre_ct_entry;
1274 	struct nfp_fl_ct_flow_entry *nft_entry, *nft_tmp;
1275 	struct nfp_fl_ct_tc_merge *m_entry;
1276 	unsigned long new_cookie[2];
1277 	int err;
1278 
1279 	if (ct_entry1->type == CT_TYPE_PRE_CT) {
1280 		pre_ct_entry = ct_entry1;
1281 		post_ct_entry = ct_entry2;
1282 	} else {
1283 		post_ct_entry = ct_entry1;
1284 		pre_ct_entry = ct_entry2;
1285 	}
1286 
1287 	/* Checks that the chain_index of the filter matches the
1288 	 * chain_index of the GOTO action.
1289 	 */
1290 	if (post_ct_entry->chain_index != pre_ct_entry->goto_chain_index)
1291 		return -EINVAL;
1292 
1293 	err = nfp_ct_merge_check(pre_ct_entry, post_ct_entry);
1294 	if (err)
1295 		return err;
1296 
1297 	new_cookie[0] = pre_ct_entry->cookie;
1298 	new_cookie[1] = post_ct_entry->cookie;
1299 	m_entry = get_hashentry(&zt->tc_merge_tb, &new_cookie,
1300 				nfp_tc_ct_merge_params, sizeof(*m_entry));
1301 	if (IS_ERR(m_entry))
1302 		return PTR_ERR(m_entry);
1303 
1304 	/* m_entry already present, not merging again */
1305 	if (!memcmp(&new_cookie, m_entry->cookie, sizeof(new_cookie)))
1306 		return 0;
1307 
1308 	memcpy(&m_entry->cookie, &new_cookie, sizeof(new_cookie));
1309 	m_entry->zt = zt;
1310 	m_entry->post_ct_parent = post_ct_entry;
1311 	m_entry->pre_ct_parent = pre_ct_entry;
1312 
1313 	/* Add this entry to the pre_ct and post_ct lists */
1314 	list_add(&m_entry->post_ct_list, &post_ct_entry->children);
1315 	list_add(&m_entry->pre_ct_list, &pre_ct_entry->children);
1316 	INIT_LIST_HEAD(&m_entry->children);
1317 
1318 	err = rhashtable_insert_fast(&zt->tc_merge_tb, &m_entry->hash_node,
1319 				     nfp_tc_ct_merge_params);
1320 	if (err)
1321 		goto err_ct_tc_merge_insert;
1322 	zt->tc_merge_count++;
1323 
1324 	/* Merge with existing nft flows */
1325 	list_for_each_entry_safe(nft_entry, nft_tmp, &zt->nft_flows_list,
1326 				 list_node) {
1327 		nfp_ct_do_nft_merge(zt, nft_entry, m_entry);
1328 	}
1329 
1330 	return 0;
1331 
1332 err_ct_tc_merge_insert:
1333 	list_del(&m_entry->post_ct_list);
1334 	list_del(&m_entry->pre_ct_list);
1335 	kfree(m_entry);
1336 	return err;
1337 }
1338 
1339 static struct
1340 nfp_fl_ct_zone_entry *get_nfp_zone_entry(struct nfp_flower_priv *priv,
1341 					 u16 zone, bool wildcarded)
1342 {
1343 	struct nfp_fl_ct_zone_entry *zt;
1344 	int err;
1345 
1346 	if (wildcarded && priv->ct_zone_wc)
1347 		return priv->ct_zone_wc;
1348 
1349 	if (!wildcarded) {
1350 		zt = get_hashentry(&priv->ct_zone_table, &zone,
1351 				   nfp_zone_table_params, sizeof(*zt));
1352 
1353 		/* If priv is set this is an existing entry, just return it */
1354 		if (IS_ERR(zt) || zt->priv)
1355 			return zt;
1356 	} else {
1357 		zt = kzalloc(sizeof(*zt), GFP_KERNEL);
1358 		if (!zt)
1359 			return ERR_PTR(-ENOMEM);
1360 	}
1361 
1362 	zt->zone = zone;
1363 	zt->priv = priv;
1364 	zt->nft = NULL;
1365 
1366 	/* init the various hash tables and lists */
1367 	INIT_LIST_HEAD(&zt->pre_ct_list);
1368 	INIT_LIST_HEAD(&zt->post_ct_list);
1369 	INIT_LIST_HEAD(&zt->nft_flows_list);
1370 
1371 	err = rhashtable_init(&zt->tc_merge_tb, &nfp_tc_ct_merge_params);
1372 	if (err)
1373 		goto err_tc_merge_tb_init;
1374 
1375 	err = rhashtable_init(&zt->nft_merge_tb, &nfp_nft_ct_merge_params);
1376 	if (err)
1377 		goto err_nft_merge_tb_init;
1378 
1379 	if (wildcarded) {
1380 		priv->ct_zone_wc = zt;
1381 	} else {
1382 		err = rhashtable_insert_fast(&priv->ct_zone_table,
1383 					     &zt->hash_node,
1384 					     nfp_zone_table_params);
1385 		if (err)
1386 			goto err_zone_insert;
1387 	}
1388 
1389 	return zt;
1390 
1391 err_zone_insert:
1392 	rhashtable_destroy(&zt->nft_merge_tb);
1393 err_nft_merge_tb_init:
1394 	rhashtable_destroy(&zt->tc_merge_tb);
1395 err_tc_merge_tb_init:
1396 	kfree(zt);
1397 	return ERR_PTR(err);
1398 }
1399 
1400 static struct net_device *get_netdev_from_rule(struct flow_rule *rule)
1401 {
1402 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
1403 		struct flow_match_meta match;
1404 
1405 		flow_rule_match_meta(rule, &match);
1406 		if (match.key->ingress_ifindex & match.mask->ingress_ifindex)
1407 			return __dev_get_by_index(&init_net,
1408 						  match.key->ingress_ifindex);
1409 	}
1410 
1411 	return NULL;
1412 }
1413 
1414 static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_action)
1415 {
1416 	if (mangle_action->id != FLOW_ACTION_MANGLE)
1417 		return;
1418 
1419 	switch (mangle_action->mangle.htype) {
1420 	case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
1421 	case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
1422 		mangle_action->mangle.val = (__force u32)cpu_to_be32(mangle_action->mangle.val);
1423 		mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask);
1424 		return;
1425 
1426 	case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
1427 	case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
1428 		mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val);
1429 		mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask);
1430 		return;
1431 
1432 	default:
1433 		return;
1434 	}
1435 }
1436 
1437 static int nfp_nft_ct_set_flow_flag(struct flow_action_entry *act,
1438 				    struct nfp_fl_ct_flow_entry *entry)
1439 {
1440 	switch (act->id) {
1441 	case FLOW_ACTION_CT:
1442 		if (act->ct.action == TCA_CT_ACT_NAT)
1443 			entry->flags |= NFP_FL_ACTION_DO_NAT;
1444 		break;
1445 
1446 	case FLOW_ACTION_MANGLE:
1447 		entry->flags |= NFP_FL_ACTION_DO_MANGLE;
1448 		break;
1449 
1450 	default:
1451 		break;
1452 	}
1453 
1454 	return 0;
1455 }
1456 
1457 static struct
1458 nfp_fl_ct_flow_entry *nfp_fl_ct_add_flow(struct nfp_fl_ct_zone_entry *zt,
1459 					 struct net_device *netdev,
1460 					 struct flow_cls_offload *flow,
1461 					 bool is_nft, struct netlink_ext_ack *extack)
1462 {
1463 	struct nf_flow_match *nft_match = NULL;
1464 	struct nfp_fl_ct_flow_entry *entry;
1465 	struct nfp_fl_ct_map_entry *map;
1466 	struct flow_action_entry *act;
1467 	int err, i;
1468 
1469 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1470 	if (!entry)
1471 		return ERR_PTR(-ENOMEM);
1472 
1473 	entry->rule = flow_rule_alloc(flow->rule->action.num_entries);
1474 	if (!entry->rule) {
1475 		err = -ENOMEM;
1476 		goto err_pre_ct_rule;
1477 	}
1478 
1479 	/* nft flows gets destroyed after callback return, so need
1480 	 * to do a full copy instead of just a reference.
1481 	 */
1482 	if (is_nft) {
1483 		nft_match = kzalloc(sizeof(*nft_match), GFP_KERNEL);
1484 		if (!nft_match) {
1485 			err = -ENOMEM;
1486 			goto err_pre_ct_act;
1487 		}
1488 		memcpy(&nft_match->dissector, flow->rule->match.dissector,
1489 		       sizeof(nft_match->dissector));
1490 		memcpy(&nft_match->mask, flow->rule->match.mask,
1491 		       sizeof(nft_match->mask));
1492 		memcpy(&nft_match->key, flow->rule->match.key,
1493 		       sizeof(nft_match->key));
1494 		entry->rule->match.dissector = &nft_match->dissector;
1495 		entry->rule->match.mask = &nft_match->mask;
1496 		entry->rule->match.key = &nft_match->key;
1497 
1498 		if (!netdev)
1499 			netdev = get_netdev_from_rule(entry->rule);
1500 	} else {
1501 		entry->rule->match.dissector = flow->rule->match.dissector;
1502 		entry->rule->match.mask = flow->rule->match.mask;
1503 		entry->rule->match.key = flow->rule->match.key;
1504 	}
1505 
1506 	entry->zt = zt;
1507 	entry->netdev = netdev;
1508 	entry->cookie = flow->cookie > 0 ? flow->cookie : (unsigned long)entry;
1509 	entry->chain_index = flow->common.chain_index;
1510 	entry->tun_offset = NFP_FL_CT_NO_TUN;
1511 
1512 	/* Copy over action data. Unfortunately we do not get a handle to the
1513 	 * original tcf_action data, and the flow objects gets destroyed, so we
1514 	 * cannot just save a pointer to this either, so need to copy over the
1515 	 * data unfortunately.
1516 	 */
1517 	entry->rule->action.num_entries = flow->rule->action.num_entries;
1518 	flow_action_for_each(i, act, &flow->rule->action) {
1519 		struct flow_action_entry *new_act;
1520 
1521 		new_act = &entry->rule->action.entries[i];
1522 		memcpy(new_act, act, sizeof(struct flow_action_entry));
1523 		/* nft entry mangle field is host byte order, need translate to
1524 		 * network byte order.
1525 		 */
1526 		if (is_nft)
1527 			nfp_nft_ct_translate_mangle_action(new_act);
1528 
1529 		nfp_nft_ct_set_flow_flag(new_act, entry);
1530 		/* Entunnel is a special case, need to allocate and copy
1531 		 * tunnel info.
1532 		 */
1533 		if (act->id == FLOW_ACTION_TUNNEL_ENCAP) {
1534 			struct ip_tunnel_info *tun = act->tunnel;
1535 			size_t tun_size = sizeof(*tun) + tun->options_len;
1536 
1537 			new_act->tunnel = kmemdup(tun, tun_size, GFP_ATOMIC);
1538 			if (!new_act->tunnel) {
1539 				err = -ENOMEM;
1540 				goto err_pre_ct_tun_cp;
1541 			}
1542 			entry->tun_offset = i;
1543 		}
1544 	}
1545 
1546 	INIT_LIST_HEAD(&entry->children);
1547 
1548 	if (flow->cookie == 0)
1549 		return entry;
1550 
1551 	/* Now add a ct map entry to flower-priv */
1552 	map = get_hashentry(&zt->priv->ct_map_table, &flow->cookie,
1553 			    nfp_ct_map_params, sizeof(*map));
1554 	if (IS_ERR(map)) {
1555 		NL_SET_ERR_MSG_MOD(extack,
1556 				   "offload error: ct map entry creation failed");
1557 		err = -ENOMEM;
1558 		goto err_ct_flow_insert;
1559 	}
1560 	map->cookie = flow->cookie;
1561 	map->ct_entry = entry;
1562 	err = rhashtable_insert_fast(&zt->priv->ct_map_table,
1563 				     &map->hash_node,
1564 				     nfp_ct_map_params);
1565 	if (err) {
1566 		NL_SET_ERR_MSG_MOD(extack,
1567 				   "offload error: ct map entry table add failed");
1568 		goto err_map_insert;
1569 	}
1570 
1571 	return entry;
1572 
1573 err_map_insert:
1574 	kfree(map);
1575 err_ct_flow_insert:
1576 	if (entry->tun_offset != NFP_FL_CT_NO_TUN)
1577 		kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
1578 err_pre_ct_tun_cp:
1579 	kfree(nft_match);
1580 err_pre_ct_act:
1581 	kfree(entry->rule);
1582 err_pre_ct_rule:
1583 	kfree(entry);
1584 	return ERR_PTR(err);
1585 }
1586 
1587 static void cleanup_nft_merge_entry(struct nfp_fl_nft_tc_merge *m_entry)
1588 {
1589 	struct nfp_fl_ct_zone_entry *zt;
1590 	int err;
1591 
1592 	zt = m_entry->zt;
1593 
1594 	/* Flow is in HW, need to delete */
1595 	if (m_entry->tc_flower_cookie) {
1596 		err = nfp_fl_ct_del_offload(zt->priv->app, m_entry->tc_flower_cookie,
1597 					    m_entry->netdev);
1598 		if (err)
1599 			return;
1600 	}
1601 
1602 	WARN_ON_ONCE(rhashtable_remove_fast(&zt->nft_merge_tb,
1603 					    &m_entry->hash_node,
1604 					    nfp_nft_ct_merge_params));
1605 	zt->nft_merge_count--;
1606 	list_del(&m_entry->tc_merge_list);
1607 	list_del(&m_entry->nft_flow_list);
1608 
1609 	if (m_entry->next_pre_ct_entry) {
1610 		struct nfp_fl_ct_map_entry pre_ct_map_ent;
1611 
1612 		pre_ct_map_ent.ct_entry = m_entry->next_pre_ct_entry;
1613 		pre_ct_map_ent.cookie = 0;
1614 		nfp_fl_ct_del_flow(&pre_ct_map_ent);
1615 	}
1616 
1617 	kfree(m_entry);
1618 }
1619 
1620 static void nfp_free_nft_merge_children(void *entry, bool is_nft_flow)
1621 {
1622 	struct nfp_fl_nft_tc_merge *m_entry, *tmp;
1623 
1624 	/* These post entries are parts of two lists, one is a list of nft_entries
1625 	 * and the other is of from a list of tc_merge structures. Iterate
1626 	 * through the relevant list and cleanup the entries.
1627 	 */
1628 
1629 	if (is_nft_flow) {
1630 		/* Need to iterate through list of nft_flow entries */
1631 		struct nfp_fl_ct_flow_entry *ct_entry = entry;
1632 
1633 		list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
1634 					 nft_flow_list) {
1635 			cleanup_nft_merge_entry(m_entry);
1636 		}
1637 	} else {
1638 		/* Need to iterate through list of tc_merged_flow entries */
1639 		struct nfp_fl_ct_tc_merge *ct_entry = entry;
1640 
1641 		list_for_each_entry_safe(m_entry, tmp, &ct_entry->children,
1642 					 tc_merge_list) {
1643 			cleanup_nft_merge_entry(m_entry);
1644 		}
1645 	}
1646 }
1647 
1648 static void nfp_del_tc_merge_entry(struct nfp_fl_ct_tc_merge *m_ent)
1649 {
1650 	struct nfp_fl_ct_zone_entry *zt;
1651 	int err;
1652 
1653 	zt = m_ent->zt;
1654 	err = rhashtable_remove_fast(&zt->tc_merge_tb,
1655 				     &m_ent->hash_node,
1656 				     nfp_tc_ct_merge_params);
1657 	if (err)
1658 		pr_warn("WARNING: could not remove merge_entry from hashtable\n");
1659 	zt->tc_merge_count--;
1660 	list_del(&m_ent->post_ct_list);
1661 	list_del(&m_ent->pre_ct_list);
1662 
1663 	if (!list_empty(&m_ent->children))
1664 		nfp_free_nft_merge_children(m_ent, false);
1665 	kfree(m_ent);
1666 }
1667 
1668 static void nfp_free_tc_merge_children(struct nfp_fl_ct_flow_entry *entry)
1669 {
1670 	struct nfp_fl_ct_tc_merge *m_ent, *tmp;
1671 
1672 	switch (entry->type) {
1673 	case CT_TYPE_PRE_CT:
1674 		list_for_each_entry_safe(m_ent, tmp, &entry->children, pre_ct_list) {
1675 			nfp_del_tc_merge_entry(m_ent);
1676 		}
1677 		break;
1678 	case CT_TYPE_POST_CT:
1679 		list_for_each_entry_safe(m_ent, tmp, &entry->children, post_ct_list) {
1680 			nfp_del_tc_merge_entry(m_ent);
1681 		}
1682 		break;
1683 	default:
1684 		break;
1685 	}
1686 }
1687 
1688 void nfp_fl_ct_clean_flow_entry(struct nfp_fl_ct_flow_entry *entry)
1689 {
1690 	list_del(&entry->list_node);
1691 
1692 	if (!list_empty(&entry->children)) {
1693 		if (entry->type == CT_TYPE_NFT)
1694 			nfp_free_nft_merge_children(entry, true);
1695 		else
1696 			nfp_free_tc_merge_children(entry);
1697 	}
1698 
1699 	if (entry->tun_offset != NFP_FL_CT_NO_TUN)
1700 		kfree(entry->rule->action.entries[entry->tun_offset].tunnel);
1701 
1702 	if (entry->type == CT_TYPE_NFT) {
1703 		struct nf_flow_match *nft_match;
1704 
1705 		nft_match = container_of(entry->rule->match.dissector,
1706 					 struct nf_flow_match, dissector);
1707 		kfree(nft_match);
1708 	}
1709 
1710 	kfree(entry->rule);
1711 	kfree(entry);
1712 }
1713 
1714 static struct flow_action_entry *get_flow_act_ct(struct flow_rule *rule)
1715 {
1716 	struct flow_action_entry *act;
1717 	int i;
1718 
1719 	/* More than one ct action may be present in a flow rule,
1720 	 * Return the first one that is not a CT clear action
1721 	 */
1722 	flow_action_for_each(i, act, &rule->action) {
1723 		if (act->id == FLOW_ACTION_CT && act->ct.action != TCA_CT_ACT_CLEAR)
1724 			return act;
1725 	}
1726 
1727 	return NULL;
1728 }
1729 
1730 static struct flow_action_entry *get_flow_act(struct flow_rule *rule,
1731 					      enum flow_action_id act_id)
1732 {
1733 	struct flow_action_entry *act = NULL;
1734 	int i;
1735 
1736 	flow_action_for_each(i, act, &rule->action) {
1737 		if (act->id == act_id)
1738 			return act;
1739 	}
1740 	return NULL;
1741 }
1742 
1743 static void
1744 nfp_ct_merge_tc_entries(struct nfp_fl_ct_flow_entry *ct_entry1,
1745 			struct nfp_fl_ct_zone_entry *zt_src,
1746 			struct nfp_fl_ct_zone_entry *zt_dst)
1747 {
1748 	struct nfp_fl_ct_flow_entry *ct_entry2, *ct_tmp;
1749 	struct list_head *ct_list;
1750 
1751 	if (ct_entry1->type == CT_TYPE_PRE_CT)
1752 		ct_list = &zt_src->post_ct_list;
1753 	else if (ct_entry1->type == CT_TYPE_POST_CT)
1754 		ct_list = &zt_src->pre_ct_list;
1755 	else
1756 		return;
1757 
1758 	list_for_each_entry_safe(ct_entry2, ct_tmp, ct_list,
1759 				 list_node) {
1760 		nfp_ct_do_tc_merge(zt_dst, ct_entry2, ct_entry1);
1761 	}
1762 }
1763 
1764 static void
1765 nfp_ct_merge_nft_with_tc(struct nfp_fl_ct_flow_entry *nft_entry,
1766 			 struct nfp_fl_ct_zone_entry *zt)
1767 {
1768 	struct nfp_fl_ct_tc_merge *tc_merge_entry;
1769 	struct rhashtable_iter iter;
1770 
1771 	rhashtable_walk_enter(&zt->tc_merge_tb, &iter);
1772 	rhashtable_walk_start(&iter);
1773 	while ((tc_merge_entry = rhashtable_walk_next(&iter)) != NULL) {
1774 		if (IS_ERR(tc_merge_entry))
1775 			continue;
1776 		rhashtable_walk_stop(&iter);
1777 		nfp_ct_do_nft_merge(zt, nft_entry, tc_merge_entry);
1778 		rhashtable_walk_start(&iter);
1779 	}
1780 	rhashtable_walk_stop(&iter);
1781 	rhashtable_walk_exit(&iter);
1782 }
1783 
1784 int nfp_fl_ct_handle_pre_ct(struct nfp_flower_priv *priv,
1785 			    struct net_device *netdev,
1786 			    struct flow_cls_offload *flow,
1787 			    struct netlink_ext_ack *extack,
1788 			    struct nfp_fl_nft_tc_merge *m_entry)
1789 {
1790 	struct flow_action_entry *ct_act, *ct_goto;
1791 	struct nfp_fl_ct_flow_entry *ct_entry;
1792 	struct nfp_fl_ct_zone_entry *zt;
1793 	int err;
1794 
1795 	ct_act = get_flow_act_ct(flow->rule);
1796 	if (!ct_act) {
1797 		NL_SET_ERR_MSG_MOD(extack,
1798 				   "unsupported offload: Conntrack action empty in conntrack offload");
1799 		return -EOPNOTSUPP;
1800 	}
1801 
1802 	ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO);
1803 	if (!ct_goto) {
1804 		NL_SET_ERR_MSG_MOD(extack,
1805 				   "unsupported offload: Conntrack requires ACTION_GOTO");
1806 		return -EOPNOTSUPP;
1807 	}
1808 
1809 	zt = get_nfp_zone_entry(priv, ct_act->ct.zone, false);
1810 	if (IS_ERR(zt)) {
1811 		NL_SET_ERR_MSG_MOD(extack,
1812 				   "offload error: Could not create zone table entry");
1813 		return PTR_ERR(zt);
1814 	}
1815 
1816 	if (!zt->nft) {
1817 		zt->nft = ct_act->ct.flow_table;
1818 		err = nf_flow_table_offload_add_cb(zt->nft, nfp_fl_ct_handle_nft_flow, zt);
1819 		if (err) {
1820 			NL_SET_ERR_MSG_MOD(extack,
1821 					   "offload error: Could not register nft_callback");
1822 			return err;
1823 		}
1824 	}
1825 
1826 	/* Add entry to pre_ct_list */
1827 	ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
1828 	if (IS_ERR(ct_entry))
1829 		return PTR_ERR(ct_entry);
1830 	ct_entry->type = CT_TYPE_PRE_CT;
1831 	ct_entry->chain_index = flow->common.chain_index;
1832 	ct_entry->goto_chain_index = ct_goto->chain_index;
1833 
1834 	if (m_entry) {
1835 		struct nfp_fl_ct_flow_entry *pre_ct_entry;
1836 		int i;
1837 
1838 		pre_ct_entry = m_entry->tc_m_parent->pre_ct_parent;
1839 		for (i = 0; i < pre_ct_entry->num_prev_m_entries; i++)
1840 			ct_entry->prev_m_entries[i] = pre_ct_entry->prev_m_entries[i];
1841 		ct_entry->prev_m_entries[i++] = m_entry;
1842 		ct_entry->num_prev_m_entries = i;
1843 
1844 		m_entry->next_pre_ct_entry = ct_entry;
1845 	}
1846 
1847 	list_add(&ct_entry->list_node, &zt->pre_ct_list);
1848 	zt->pre_ct_count++;
1849 
1850 	nfp_ct_merge_tc_entries(ct_entry, zt, zt);
1851 
1852 	/* Need to check and merge with tables in the wc_zone as well */
1853 	if (priv->ct_zone_wc)
1854 		nfp_ct_merge_tc_entries(ct_entry, priv->ct_zone_wc, zt);
1855 
1856 	return 0;
1857 }
1858 
1859 int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv,
1860 			     struct net_device *netdev,
1861 			     struct flow_cls_offload *flow,
1862 			     struct netlink_ext_ack *extack)
1863 {
1864 	struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
1865 	struct nfp_fl_ct_flow_entry *ct_entry;
1866 	struct nfp_fl_ct_zone_entry *zt;
1867 	bool wildcarded = false;
1868 	struct flow_match_ct ct;
1869 	struct flow_action_entry *ct_goto;
1870 
1871 	flow_rule_match_ct(rule, &ct);
1872 	if (!ct.mask->ct_zone) {
1873 		wildcarded = true;
1874 	} else if (ct.mask->ct_zone != U16_MAX) {
1875 		NL_SET_ERR_MSG_MOD(extack,
1876 				   "unsupported offload: partially wildcarded ct_zone is not supported");
1877 		return -EOPNOTSUPP;
1878 	}
1879 
1880 	zt = get_nfp_zone_entry(priv, ct.key->ct_zone, wildcarded);
1881 	if (IS_ERR(zt)) {
1882 		NL_SET_ERR_MSG_MOD(extack,
1883 				   "offload error: Could not create zone table entry");
1884 		return PTR_ERR(zt);
1885 	}
1886 
1887 	/* Add entry to post_ct_list */
1888 	ct_entry = nfp_fl_ct_add_flow(zt, netdev, flow, false, extack);
1889 	if (IS_ERR(ct_entry))
1890 		return PTR_ERR(ct_entry);
1891 
1892 	ct_entry->type = CT_TYPE_POST_CT;
1893 	ct_entry->chain_index = flow->common.chain_index;
1894 	ct_goto = get_flow_act(flow->rule, FLOW_ACTION_GOTO);
1895 	ct_entry->goto_chain_index = ct_goto ? ct_goto->chain_index : 0;
1896 	list_add(&ct_entry->list_node, &zt->post_ct_list);
1897 	zt->post_ct_count++;
1898 
1899 	if (wildcarded) {
1900 		/* Iterate through all zone tables if not empty, look for merges with
1901 		 * pre_ct entries and merge them.
1902 		 */
1903 		struct rhashtable_iter iter;
1904 		struct nfp_fl_ct_zone_entry *zone_table;
1905 
1906 		rhashtable_walk_enter(&priv->ct_zone_table, &iter);
1907 		rhashtable_walk_start(&iter);
1908 		while ((zone_table = rhashtable_walk_next(&iter)) != NULL) {
1909 			if (IS_ERR(zone_table))
1910 				continue;
1911 			rhashtable_walk_stop(&iter);
1912 			nfp_ct_merge_tc_entries(ct_entry, zone_table, zone_table);
1913 			rhashtable_walk_start(&iter);
1914 		}
1915 		rhashtable_walk_stop(&iter);
1916 		rhashtable_walk_exit(&iter);
1917 	} else {
1918 		nfp_ct_merge_tc_entries(ct_entry, zt, zt);
1919 	}
1920 
1921 	return 0;
1922 }
1923 
1924 int nfp_fl_create_new_pre_ct(struct nfp_fl_nft_tc_merge *m_entry)
1925 {
1926 	struct nfp_fl_ct_flow_entry *pre_ct_entry, *post_ct_entry;
1927 	struct flow_cls_offload new_pre_ct_flow;
1928 	int err;
1929 
1930 	pre_ct_entry = m_entry->tc_m_parent->pre_ct_parent;
1931 	if (pre_ct_entry->num_prev_m_entries >= NFP_MAX_RECIRC_CT_ZONES - 1)
1932 		return -1;
1933 
1934 	post_ct_entry = m_entry->tc_m_parent->post_ct_parent;
1935 	memset(&new_pre_ct_flow, 0, sizeof(struct flow_cls_offload));
1936 	new_pre_ct_flow.rule = post_ct_entry->rule;
1937 	new_pre_ct_flow.common.chain_index = post_ct_entry->chain_index;
1938 
1939 	err = nfp_fl_ct_handle_pre_ct(pre_ct_entry->zt->priv,
1940 				      pre_ct_entry->netdev,
1941 				      &new_pre_ct_flow, NULL,
1942 				      m_entry);
1943 	return err;
1944 }
1945 
1946 static void
1947 nfp_fl_ct_sub_stats(struct nfp_fl_nft_tc_merge *nft_merge,
1948 		    enum ct_entry_type type, u64 *m_pkts,
1949 		    u64 *m_bytes, u64 *m_used)
1950 {
1951 	struct nfp_flower_priv *priv = nft_merge->zt->priv;
1952 	struct nfp_fl_payload *nfp_flow;
1953 	u32 ctx_id;
1954 
1955 	nfp_flow = nft_merge->flow_pay;
1956 	if (!nfp_flow)
1957 		return;
1958 
1959 	ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
1960 	*m_pkts += priv->stats[ctx_id].pkts;
1961 	*m_bytes += priv->stats[ctx_id].bytes;
1962 	*m_used = max_t(u64, *m_used, priv->stats[ctx_id].used);
1963 
1964 	/* If request is for a sub_flow which is part of a tunnel merged
1965 	 * flow then update stats from tunnel merged flows first.
1966 	 */
1967 	if (!list_empty(&nfp_flow->linked_flows))
1968 		nfp_flower_update_merge_stats(priv->app, nfp_flow);
1969 
1970 	if (type != CT_TYPE_NFT) {
1971 		/* Update nft cached stats */
1972 		flow_stats_update(&nft_merge->nft_parent->stats,
1973 				  priv->stats[ctx_id].bytes,
1974 				  priv->stats[ctx_id].pkts,
1975 				  0, priv->stats[ctx_id].used,
1976 				  FLOW_ACTION_HW_STATS_DELAYED);
1977 	} else {
1978 		/* Update pre_ct cached stats */
1979 		flow_stats_update(&nft_merge->tc_m_parent->pre_ct_parent->stats,
1980 				  priv->stats[ctx_id].bytes,
1981 				  priv->stats[ctx_id].pkts,
1982 				  0, priv->stats[ctx_id].used,
1983 				  FLOW_ACTION_HW_STATS_DELAYED);
1984 		/* Update post_ct cached stats */
1985 		flow_stats_update(&nft_merge->tc_m_parent->post_ct_parent->stats,
1986 				  priv->stats[ctx_id].bytes,
1987 				  priv->stats[ctx_id].pkts,
1988 				  0, priv->stats[ctx_id].used,
1989 				  FLOW_ACTION_HW_STATS_DELAYED);
1990 	}
1991 
1992 	/* Update previous pre_ct/post_ct/nft flow stats */
1993 	if (nft_merge->tc_m_parent->pre_ct_parent->num_prev_m_entries > 0) {
1994 		struct nfp_fl_nft_tc_merge *tmp_nft_merge;
1995 		int i;
1996 
1997 		for (i = 0; i < nft_merge->tc_m_parent->pre_ct_parent->num_prev_m_entries; i++) {
1998 			tmp_nft_merge = nft_merge->tc_m_parent->pre_ct_parent->prev_m_entries[i];
1999 			flow_stats_update(&tmp_nft_merge->tc_m_parent->pre_ct_parent->stats,
2000 					  priv->stats[ctx_id].bytes,
2001 					  priv->stats[ctx_id].pkts,
2002 					  0, priv->stats[ctx_id].used,
2003 					  FLOW_ACTION_HW_STATS_DELAYED);
2004 			flow_stats_update(&tmp_nft_merge->tc_m_parent->post_ct_parent->stats,
2005 					  priv->stats[ctx_id].bytes,
2006 					  priv->stats[ctx_id].pkts,
2007 					  0, priv->stats[ctx_id].used,
2008 					  FLOW_ACTION_HW_STATS_DELAYED);
2009 			flow_stats_update(&tmp_nft_merge->nft_parent->stats,
2010 					  priv->stats[ctx_id].bytes,
2011 					  priv->stats[ctx_id].pkts,
2012 					  0, priv->stats[ctx_id].used,
2013 					  FLOW_ACTION_HW_STATS_DELAYED);
2014 		}
2015 	}
2016 
2017 	/* Reset stats from the nfp */
2018 	priv->stats[ctx_id].pkts = 0;
2019 	priv->stats[ctx_id].bytes = 0;
2020 }
2021 
2022 int nfp_fl_ct_stats(struct flow_cls_offload *flow,
2023 		    struct nfp_fl_ct_map_entry *ct_map_ent)
2024 {
2025 	struct nfp_fl_ct_flow_entry *ct_entry = ct_map_ent->ct_entry;
2026 	struct nfp_fl_nft_tc_merge *nft_merge, *nft_m_tmp;
2027 	struct nfp_fl_ct_tc_merge *tc_merge, *tc_m_tmp;
2028 
2029 	u64 pkts = 0, bytes = 0, used = 0;
2030 	u64 m_pkts, m_bytes, m_used;
2031 
2032 	spin_lock_bh(&ct_entry->zt->priv->stats_lock);
2033 
2034 	if (ct_entry->type == CT_TYPE_PRE_CT) {
2035 		/* Iterate tc_merge entries associated with this flow */
2036 		list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
2037 					 pre_ct_list) {
2038 			m_pkts = 0;
2039 			m_bytes = 0;
2040 			m_used = 0;
2041 			/* Iterate nft_merge entries associated with this tc_merge flow */
2042 			list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
2043 						 tc_merge_list) {
2044 				nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_PRE_CT,
2045 						    &m_pkts, &m_bytes, &m_used);
2046 			}
2047 			pkts += m_pkts;
2048 			bytes += m_bytes;
2049 			used = max_t(u64, used, m_used);
2050 			/* Update post_ct partner */
2051 			flow_stats_update(&tc_merge->post_ct_parent->stats,
2052 					  m_bytes, m_pkts, 0, m_used,
2053 					  FLOW_ACTION_HW_STATS_DELAYED);
2054 		}
2055 	} else if (ct_entry->type == CT_TYPE_POST_CT) {
2056 		/* Iterate tc_merge entries associated with this flow */
2057 		list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children,
2058 					 post_ct_list) {
2059 			m_pkts = 0;
2060 			m_bytes = 0;
2061 			m_used = 0;
2062 			/* Iterate nft_merge entries associated with this tc_merge flow */
2063 			list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children,
2064 						 tc_merge_list) {
2065 				nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_POST_CT,
2066 						    &m_pkts, &m_bytes, &m_used);
2067 			}
2068 			pkts += m_pkts;
2069 			bytes += m_bytes;
2070 			used = max_t(u64, used, m_used);
2071 			/* Update pre_ct partner */
2072 			flow_stats_update(&tc_merge->pre_ct_parent->stats,
2073 					  m_bytes, m_pkts, 0, m_used,
2074 					  FLOW_ACTION_HW_STATS_DELAYED);
2075 		}
2076 	} else  {
2077 		/* Iterate nft_merge entries associated with this nft flow */
2078 		list_for_each_entry_safe(nft_merge, nft_m_tmp, &ct_entry->children,
2079 					 nft_flow_list) {
2080 			nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_NFT,
2081 					    &pkts, &bytes, &used);
2082 		}
2083 	}
2084 
2085 	/* Add stats from this request to stats potentially cached by
2086 	 * previous requests.
2087 	 */
2088 	flow_stats_update(&ct_entry->stats, bytes, pkts, 0, used,
2089 			  FLOW_ACTION_HW_STATS_DELAYED);
2090 	/* Finally update the flow stats from the original stats request */
2091 	flow_stats_update(&flow->stats, ct_entry->stats.bytes,
2092 			  ct_entry->stats.pkts, 0,
2093 			  ct_entry->stats.lastused,
2094 			  FLOW_ACTION_HW_STATS_DELAYED);
2095 	/* Stats has been synced to original flow, can now clear
2096 	 * the cache.
2097 	 */
2098 	ct_entry->stats.pkts = 0;
2099 	ct_entry->stats.bytes = 0;
2100 	spin_unlock_bh(&ct_entry->zt->priv->stats_lock);
2101 
2102 	return 0;
2103 }
2104 
2105 static bool
2106 nfp_fl_ct_offload_nft_supported(struct flow_cls_offload *flow)
2107 {
2108 	struct flow_rule *flow_rule = flow->rule;
2109 	struct flow_action *flow_action =
2110 		&flow_rule->action;
2111 	struct flow_action_entry *act;
2112 	int i;
2113 
2114 	flow_action_for_each(i, act, flow_action) {
2115 		if (act->id == FLOW_ACTION_CT_METADATA) {
2116 			enum ip_conntrack_info ctinfo =
2117 				act->ct_metadata.cookie & NFCT_INFOMASK;
2118 
2119 			return ctinfo != IP_CT_NEW;
2120 		}
2121 	}
2122 
2123 	return false;
2124 }
2125 
2126 static int
2127 nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow)
2128 {
2129 	struct nfp_fl_ct_map_entry *ct_map_ent;
2130 	struct nfp_fl_ct_flow_entry *ct_entry;
2131 	struct netlink_ext_ack *extack = NULL;
2132 
2133 	ASSERT_RTNL();
2134 
2135 	extack = flow->common.extack;
2136 	switch (flow->command) {
2137 	case FLOW_CLS_REPLACE:
2138 		if (!nfp_fl_ct_offload_nft_supported(flow))
2139 			return -EOPNOTSUPP;
2140 
2141 		/* Netfilter can request offload multiple times for the same
2142 		 * flow - protect against adding duplicates.
2143 		 */
2144 		ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
2145 						    nfp_ct_map_params);
2146 		if (!ct_map_ent) {
2147 			ct_entry = nfp_fl_ct_add_flow(zt, NULL, flow, true, extack);
2148 			if (IS_ERR(ct_entry))
2149 				return PTR_ERR(ct_entry);
2150 			ct_entry->type = CT_TYPE_NFT;
2151 			list_add(&ct_entry->list_node, &zt->nft_flows_list);
2152 			zt->nft_flows_count++;
2153 			nfp_ct_merge_nft_with_tc(ct_entry, zt);
2154 		}
2155 		return 0;
2156 	case FLOW_CLS_DESTROY:
2157 		ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
2158 						    nfp_ct_map_params);
2159 		return nfp_fl_ct_del_flow(ct_map_ent);
2160 	case FLOW_CLS_STATS:
2161 		ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie,
2162 						    nfp_ct_map_params);
2163 		if (ct_map_ent)
2164 			return nfp_fl_ct_stats(flow, ct_map_ent);
2165 		break;
2166 	default:
2167 		break;
2168 	}
2169 	return -EINVAL;
2170 }
2171 
2172 int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb_priv)
2173 {
2174 	struct flow_cls_offload *flow = type_data;
2175 	struct nfp_fl_ct_zone_entry *zt = cb_priv;
2176 	int err = -EOPNOTSUPP;
2177 
2178 	switch (type) {
2179 	case TC_SETUP_CLSFLOWER:
2180 		rtnl_lock();
2181 		err = nfp_fl_ct_offload_nft_flow(zt, flow);
2182 		rtnl_unlock();
2183 		break;
2184 	default:
2185 		return -EOPNOTSUPP;
2186 	}
2187 	return err;
2188 }
2189 
2190 static void
2191 nfp_fl_ct_clean_nft_entries(struct nfp_fl_ct_zone_entry *zt)
2192 {
2193 	struct nfp_fl_ct_flow_entry *nft_entry, *ct_tmp;
2194 	struct nfp_fl_ct_map_entry *ct_map_ent;
2195 
2196 	list_for_each_entry_safe(nft_entry, ct_tmp, &zt->nft_flows_list,
2197 				 list_node) {
2198 		ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table,
2199 						    &nft_entry->cookie,
2200 						    nfp_ct_map_params);
2201 		nfp_fl_ct_del_flow(ct_map_ent);
2202 	}
2203 }
2204 
2205 int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
2206 {
2207 	struct nfp_fl_ct_flow_entry *ct_entry;
2208 	struct nfp_fl_ct_zone_entry *zt;
2209 	struct rhashtable *m_table;
2210 
2211 	if (!ct_map_ent)
2212 		return -ENOENT;
2213 
2214 	zt = ct_map_ent->ct_entry->zt;
2215 	ct_entry = ct_map_ent->ct_entry;
2216 	m_table = &zt->priv->ct_map_table;
2217 
2218 	switch (ct_entry->type) {
2219 	case CT_TYPE_PRE_CT:
2220 		zt->pre_ct_count--;
2221 		if (ct_map_ent->cookie > 0)
2222 			rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
2223 					       nfp_ct_map_params);
2224 		nfp_fl_ct_clean_flow_entry(ct_entry);
2225 		if (ct_map_ent->cookie > 0)
2226 			kfree(ct_map_ent);
2227 
2228 		if (!zt->pre_ct_count) {
2229 			zt->nft = NULL;
2230 			nfp_fl_ct_clean_nft_entries(zt);
2231 		}
2232 		break;
2233 	case CT_TYPE_POST_CT:
2234 		zt->post_ct_count--;
2235 		rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
2236 				       nfp_ct_map_params);
2237 		nfp_fl_ct_clean_flow_entry(ct_entry);
2238 		kfree(ct_map_ent);
2239 		break;
2240 	case CT_TYPE_NFT:
2241 		zt->nft_flows_count--;
2242 		rhashtable_remove_fast(m_table, &ct_map_ent->hash_node,
2243 				       nfp_ct_map_params);
2244 		nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry);
2245 		kfree(ct_map_ent);
2246 		break;
2247 	default:
2248 		break;
2249 	}
2250 
2251 	return 0;
2252 }
2253