xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c (revision 3fd6c59042dbba50391e30862beac979491145fe)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2019 Mellanox Technologies. */
3 
4 #include <net/netfilter/nf_conntrack.h>
5 #include <net/netfilter/nf_conntrack_core.h>
6 #include <net/netfilter/nf_conntrack_zones.h>
7 #include <net/netfilter/nf_conntrack_labels.h>
8 #include <net/netfilter/nf_conntrack_helper.h>
9 #include <net/netfilter/nf_conntrack_acct.h>
10 #include <uapi/linux/tc_act/tc_pedit.h>
11 #include <net/tc_act/tc_ct.h>
12 #include <net/flow_offload.h>
13 #include <net/netfilter/nf_flow_table.h>
14 #include <linux/workqueue.h>
15 #include <linux/refcount.h>
16 #include <linux/xarray.h>
17 #include <linux/if_macvlan.h>
18 #include <linux/debugfs.h>
19 
20 #include "lib/fs_chains.h"
21 #include "en/tc_ct.h"
22 #include "en/tc/ct_fs.h"
23 #include "en/tc_priv.h"
24 #include "en/mod_hdr.h"
25 #include "en/mapping.h"
26 #include "en/tc/post_act.h"
27 #include "en.h"
28 #include "en_tc.h"
29 #include "en_rep.h"
30 #include "fs_core.h"
31 
32 #define MLX5_CT_STATE_ESTABLISHED_BIT BIT(1)
33 #define MLX5_CT_STATE_TRK_BIT BIT(2)
34 #define MLX5_CT_STATE_NAT_BIT BIT(3)
35 #define MLX5_CT_STATE_REPLY_BIT BIT(4)
36 #define MLX5_CT_STATE_RELATED_BIT BIT(5)
37 #define MLX5_CT_STATE_INVALID_BIT BIT(6)
38 #define MLX5_CT_STATE_NEW_BIT BIT(7)
39 
40 #define MLX5_CT_LABELS_BITS MLX5_REG_MAPPING_MBITS(LABELS_TO_REG)
41 #define MLX5_CT_LABELS_MASK MLX5_REG_MAPPING_MASK(LABELS_TO_REG)
42 
43 /* Statically allocate modify actions for
44  * ipv6 and port nat (5) + tuple fields (4) + nic mode zone restore (1) = 10.
45  * This will be increased dynamically if needed (for the ipv6 snat + dnat).
46  */
47 #define MLX5_CT_MIN_MOD_ACTS 10
48 
49 #define ct_dbg(fmt, args...)\
50 	netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
51 
52 struct mlx5_tc_ct_debugfs {
53 	struct {
54 		atomic_t offloaded;
55 		atomic_t rx_dropped;
56 	} stats;
57 
58 	struct dentry *root;
59 };
60 
61 struct mlx5_tc_ct_priv {
62 	struct mlx5_core_dev *dev;
63 	struct mlx5e_priv *priv;
64 	const struct net_device *netdev;
65 	struct mod_hdr_tbl *mod_hdr_tbl;
66 	struct xarray tuple_ids;
67 	struct rhashtable zone_ht;
68 	struct rhashtable ct_tuples_ht;
69 	struct rhashtable ct_tuples_nat_ht;
70 	struct mlx5_flow_table *ct;
71 	struct mlx5_flow_table *ct_nat;
72 	struct mlx5_flow_group *ct_nat_miss_group;
73 	struct mlx5_flow_handle *ct_nat_miss_rule;
74 	struct mlx5e_post_act *post_act;
75 	struct mutex control_lock; /* guards parallel adds/dels */
76 	struct mapping_ctx *zone_mapping;
77 	struct mapping_ctx *labels_mapping;
78 	enum mlx5_flow_namespace_type ns_type;
79 	struct mlx5_fs_chains *chains;
80 	struct mlx5_ct_fs *fs;
81 	struct mlx5_ct_fs_ops *fs_ops;
82 	spinlock_t ht_lock; /* protects ft entries */
83 	struct workqueue_struct *wq;
84 
85 	struct mlx5_tc_ct_debugfs debugfs;
86 };
87 
88 struct mlx5_ct_zone_rule {
89 	struct mlx5_ct_fs_rule *rule;
90 	struct mlx5e_mod_hdr_handle *mh;
91 	struct mlx5_flow_attr *attr;
92 	bool nat;
93 };
94 
95 struct mlx5_tc_ct_pre {
96 	struct mlx5_flow_table *ft;
97 	struct mlx5_flow_group *flow_grp;
98 	struct mlx5_flow_group *miss_grp;
99 	struct mlx5_flow_handle *flow_rule;
100 	struct mlx5_flow_handle *miss_rule;
101 	struct mlx5_modify_hdr *modify_hdr;
102 };
103 
104 struct mlx5_ct_ft {
105 	struct rhash_head node;
106 	u16 zone;
107 	u32 zone_restore_id;
108 	refcount_t refcount;
109 	struct nf_flowtable *nf_ft;
110 	struct mlx5_tc_ct_priv *ct_priv;
111 	struct rhashtable ct_entries_ht;
112 	struct mlx5_tc_ct_pre pre_ct;
113 	struct mlx5_tc_ct_pre pre_ct_nat;
114 };
115 
116 struct mlx5_ct_tuple {
117 	u16 addr_type;
118 	__be16 n_proto;
119 	u8 ip_proto;
120 	struct {
121 		union {
122 			__be32 src_v4;
123 			struct in6_addr src_v6;
124 		};
125 		union {
126 			__be32 dst_v4;
127 			struct in6_addr dst_v6;
128 		};
129 	} ip;
130 	struct {
131 		__be16 src;
132 		__be16 dst;
133 	} port;
134 
135 	u16 zone;
136 };
137 
138 struct mlx5_ct_counter {
139 	struct mlx5_fc *counter;
140 	refcount_t refcount;
141 	bool is_shared;
142 };
143 
144 enum {
145 	MLX5_CT_ENTRY_FLAG_VALID,
146 	MLX5_CT_ENTRY_IN_CT_TABLE,
147 	MLX5_CT_ENTRY_IN_CT_NAT_TABLE,
148 };
149 
150 struct mlx5_ct_entry {
151 	struct rhash_head node;
152 	struct rhash_head tuple_node;
153 	struct rhash_head tuple_nat_node;
154 	struct mlx5_ct_counter *counter;
155 	unsigned long cookie;
156 	unsigned long restore_cookie;
157 	struct mlx5_ct_tuple tuple;
158 	struct mlx5_ct_tuple tuple_nat;
159 	struct mlx5_ct_zone_rule zone_rules[2];
160 
161 	struct mlx5_tc_ct_priv *ct_priv;
162 	struct work_struct work;
163 
164 	refcount_t refcnt;
165 	unsigned long flags;
166 };
167 
168 static void
169 mlx5_tc_ct_entry_destroy_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
170 				 struct mlx5_flow_attr *attr,
171 				 struct mlx5e_mod_hdr_handle *mh);
172 
173 static const struct rhashtable_params cts_ht_params = {
174 	.head_offset = offsetof(struct mlx5_ct_entry, node),
175 	.key_offset = offsetof(struct mlx5_ct_entry, cookie),
176 	.key_len = sizeof(((struct mlx5_ct_entry *)0)->cookie),
177 	.automatic_shrinking = true,
178 	.min_size = 16 * 1024,
179 };
180 
181 static const struct rhashtable_params zone_params = {
182 	.head_offset = offsetof(struct mlx5_ct_ft, node),
183 	.key_offset = offsetof(struct mlx5_ct_ft, zone),
184 	.key_len = sizeof(((struct mlx5_ct_ft *)0)->zone),
185 	.automatic_shrinking = true,
186 };
187 
188 static const struct rhashtable_params tuples_ht_params = {
189 	.head_offset = offsetof(struct mlx5_ct_entry, tuple_node),
190 	.key_offset = offsetof(struct mlx5_ct_entry, tuple),
191 	.key_len = sizeof(((struct mlx5_ct_entry *)0)->tuple),
192 	.automatic_shrinking = true,
193 	.min_size = 16 * 1024,
194 };
195 
196 static const struct rhashtable_params tuples_nat_ht_params = {
197 	.head_offset = offsetof(struct mlx5_ct_entry, tuple_nat_node),
198 	.key_offset = offsetof(struct mlx5_ct_entry, tuple_nat),
199 	.key_len = sizeof(((struct mlx5_ct_entry *)0)->tuple_nat),
200 	.automatic_shrinking = true,
201 	.min_size = 16 * 1024,
202 };
203 
204 static bool
mlx5_tc_ct_entry_in_ct_table(struct mlx5_ct_entry * entry)205 mlx5_tc_ct_entry_in_ct_table(struct mlx5_ct_entry *entry)
206 {
207 	return test_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags);
208 }
209 
210 static bool
mlx5_tc_ct_entry_in_ct_nat_table(struct mlx5_ct_entry * entry)211 mlx5_tc_ct_entry_in_ct_nat_table(struct mlx5_ct_entry *entry)
212 {
213 	return test_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags);
214 }
215 
216 static int
mlx5_get_label_mapping(struct mlx5_tc_ct_priv * ct_priv,u32 * labels,u32 * id)217 mlx5_get_label_mapping(struct mlx5_tc_ct_priv *ct_priv,
218 		       u32 *labels, u32 *id)
219 {
220 	if (!memchr_inv(labels, 0, sizeof(u32) * 4)) {
221 		*id = 0;
222 		return 0;
223 	}
224 
225 	if (mapping_add(ct_priv->labels_mapping, labels, id))
226 		return -EOPNOTSUPP;
227 
228 	return 0;
229 }
230 
231 static void
mlx5_put_label_mapping(struct mlx5_tc_ct_priv * ct_priv,u32 id)232 mlx5_put_label_mapping(struct mlx5_tc_ct_priv *ct_priv, u32 id)
233 {
234 	if (id)
235 		mapping_remove(ct_priv->labels_mapping, id);
236 }
237 
238 static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple * tuple,struct flow_rule * rule)239 mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
240 {
241 	struct flow_match_control control;
242 	struct flow_match_basic basic;
243 
244 	flow_rule_match_basic(rule, &basic);
245 	flow_rule_match_control(rule, &control);
246 
247 	tuple->n_proto = basic.key->n_proto;
248 	tuple->ip_proto = basic.key->ip_proto;
249 	tuple->addr_type = control.key->addr_type;
250 
251 	if (tuple->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
252 		struct flow_match_ipv4_addrs match;
253 
254 		flow_rule_match_ipv4_addrs(rule, &match);
255 		tuple->ip.src_v4 = match.key->src;
256 		tuple->ip.dst_v4 = match.key->dst;
257 	} else if (tuple->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
258 		struct flow_match_ipv6_addrs match;
259 
260 		flow_rule_match_ipv6_addrs(rule, &match);
261 		tuple->ip.src_v6 = match.key->src;
262 		tuple->ip.dst_v6 = match.key->dst;
263 	} else {
264 		return -EOPNOTSUPP;
265 	}
266 
267 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
268 		struct flow_match_ports match;
269 
270 		flow_rule_match_ports(rule, &match);
271 		switch (tuple->ip_proto) {
272 		case IPPROTO_TCP:
273 		case IPPROTO_UDP:
274 			tuple->port.src = match.key->src;
275 			tuple->port.dst = match.key->dst;
276 			break;
277 		default:
278 			return -EOPNOTSUPP;
279 		}
280 	} else {
281 		if (tuple->ip_proto != IPPROTO_GRE)
282 			return -EOPNOTSUPP;
283 	}
284 
285 	return 0;
286 }
287 
288 static int
mlx5_tc_ct_rule_to_tuple_nat(struct mlx5_ct_tuple * tuple,struct flow_rule * rule)289 mlx5_tc_ct_rule_to_tuple_nat(struct mlx5_ct_tuple *tuple,
290 			     struct flow_rule *rule)
291 {
292 	struct flow_action *flow_action = &rule->action;
293 	struct flow_action_entry *act;
294 	u32 offset, val, ip6_offset;
295 	int i;
296 
297 	flow_action_for_each(i, act, flow_action) {
298 		if (act->id != FLOW_ACTION_MANGLE)
299 			continue;
300 
301 		offset = act->mangle.offset;
302 		val = act->mangle.val;
303 		switch (act->mangle.htype) {
304 		case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
305 			if (offset == offsetof(struct iphdr, saddr))
306 				tuple->ip.src_v4 = cpu_to_be32(val);
307 			else if (offset == offsetof(struct iphdr, daddr))
308 				tuple->ip.dst_v4 = cpu_to_be32(val);
309 			else
310 				return -EOPNOTSUPP;
311 			break;
312 
313 		case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
314 			ip6_offset = (offset - offsetof(struct ipv6hdr, saddr));
315 			ip6_offset /= 4;
316 			if (ip6_offset < 4)
317 				tuple->ip.src_v6.s6_addr32[ip6_offset] = cpu_to_be32(val);
318 			else if (ip6_offset < 8)
319 				tuple->ip.dst_v6.s6_addr32[ip6_offset - 4] = cpu_to_be32(val);
320 			else
321 				return -EOPNOTSUPP;
322 			break;
323 
324 		case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
325 			if (offset == offsetof(struct tcphdr, source))
326 				tuple->port.src = cpu_to_be16(val);
327 			else if (offset == offsetof(struct tcphdr, dest))
328 				tuple->port.dst = cpu_to_be16(val);
329 			else
330 				return -EOPNOTSUPP;
331 			break;
332 
333 		case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
334 			if (offset == offsetof(struct udphdr, source))
335 				tuple->port.src = cpu_to_be16(val);
336 			else if (offset == offsetof(struct udphdr, dest))
337 				tuple->port.dst = cpu_to_be16(val);
338 			else
339 				return -EOPNOTSUPP;
340 			break;
341 
342 		default:
343 			return -EOPNOTSUPP;
344 		}
345 	}
346 
347 	return 0;
348 }
349 
350 static int
mlx5_tc_ct_get_flow_source_match(struct mlx5_tc_ct_priv * ct_priv,struct net_device * ndev)351 mlx5_tc_ct_get_flow_source_match(struct mlx5_tc_ct_priv *ct_priv,
352 				 struct net_device *ndev)
353 {
354 	struct mlx5e_priv *other_priv = netdev_priv(ndev);
355 	struct mlx5_core_dev *mdev = ct_priv->dev;
356 	bool vf_rep, uplink_rep;
357 
358 	vf_rep = mlx5e_eswitch_vf_rep(ndev) && mlx5_same_hw_devs(mdev, other_priv->mdev);
359 	uplink_rep = mlx5e_eswitch_uplink_rep(ndev) && mlx5_same_hw_devs(mdev, other_priv->mdev);
360 
361 	if (vf_rep)
362 		return MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
363 	if (uplink_rep)
364 		return MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
365 	if (is_vlan_dev(ndev))
366 		return mlx5_tc_ct_get_flow_source_match(ct_priv, vlan_dev_real_dev(ndev));
367 	if (netif_is_macvlan(ndev))
368 		return mlx5_tc_ct_get_flow_source_match(ct_priv, macvlan_dev_real_dev(ndev));
369 	if (mlx5e_get_tc_tun(ndev) || netif_is_lag_master(ndev))
370 		return MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
371 
372 	return MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT;
373 }
374 
375 static int
mlx5_tc_ct_set_tuple_match(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_flow_spec * spec,struct flow_rule * rule)376 mlx5_tc_ct_set_tuple_match(struct mlx5_tc_ct_priv *ct_priv,
377 			   struct mlx5_flow_spec *spec,
378 			   struct flow_rule *rule)
379 {
380 	void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
381 				       outer_headers);
382 	void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
383 				       outer_headers);
384 	u16 addr_type = 0;
385 	u8 ip_proto = 0;
386 
387 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
388 		struct flow_match_basic match;
389 
390 		flow_rule_match_basic(rule, &match);
391 
392 		mlx5e_tc_set_ethertype(ct_priv->dev, &match, true, headers_c, headers_v);
393 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
394 			 match.mask->ip_proto);
395 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
396 			 match.key->ip_proto);
397 
398 		ip_proto = match.key->ip_proto;
399 	}
400 
401 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
402 		struct flow_match_control match;
403 
404 		flow_rule_match_control(rule, &match);
405 		addr_type = match.key->addr_type;
406 	}
407 
408 	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
409 		struct flow_match_ipv4_addrs match;
410 
411 		flow_rule_match_ipv4_addrs(rule, &match);
412 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
413 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
414 		       &match.mask->src, sizeof(match.mask->src));
415 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
416 				    src_ipv4_src_ipv6.ipv4_layout.ipv4),
417 		       &match.key->src, sizeof(match.key->src));
418 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
419 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
420 		       &match.mask->dst, sizeof(match.mask->dst));
421 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
422 				    dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
423 		       &match.key->dst, sizeof(match.key->dst));
424 	}
425 
426 	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
427 		struct flow_match_ipv6_addrs match;
428 
429 		flow_rule_match_ipv6_addrs(rule, &match);
430 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
431 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
432 		       &match.mask->src, sizeof(match.mask->src));
433 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
434 				    src_ipv4_src_ipv6.ipv6_layout.ipv6),
435 		       &match.key->src, sizeof(match.key->src));
436 
437 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
438 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
439 		       &match.mask->dst, sizeof(match.mask->dst));
440 		memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
441 				    dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
442 		       &match.key->dst, sizeof(match.key->dst));
443 	}
444 
445 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
446 		struct flow_match_ports match;
447 
448 		flow_rule_match_ports(rule, &match);
449 		switch (ip_proto) {
450 		case IPPROTO_TCP:
451 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
452 				 tcp_sport, ntohs(match.mask->src));
453 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
454 				 tcp_sport, ntohs(match.key->src));
455 
456 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
457 				 tcp_dport, ntohs(match.mask->dst));
458 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
459 				 tcp_dport, ntohs(match.key->dst));
460 			break;
461 
462 		case IPPROTO_UDP:
463 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
464 				 udp_sport, ntohs(match.mask->src));
465 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
466 				 udp_sport, ntohs(match.key->src));
467 
468 			MLX5_SET(fte_match_set_lyr_2_4, headers_c,
469 				 udp_dport, ntohs(match.mask->dst));
470 			MLX5_SET(fte_match_set_lyr_2_4, headers_v,
471 				 udp_dport, ntohs(match.key->dst));
472 			break;
473 		default:
474 			break;
475 		}
476 	}
477 
478 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
479 		struct flow_match_tcp match;
480 
481 		flow_rule_match_tcp(rule, &match);
482 		MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
483 			 ntohs(match.mask->flags));
484 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
485 			 ntohs(match.key->flags));
486 	}
487 
488 	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
489 		struct flow_match_meta match;
490 
491 		flow_rule_match_meta(rule, &match);
492 
493 		if (match.key->ingress_ifindex & match.mask->ingress_ifindex) {
494 			struct net_device *dev;
495 
496 			dev = dev_get_by_index(&init_net, match.key->ingress_ifindex);
497 			if (dev && MLX5_CAP_ESW_FLOWTABLE(ct_priv->dev, flow_source))
498 				spec->flow_context.flow_source =
499 					mlx5_tc_ct_get_flow_source_match(ct_priv, dev);
500 
501 			dev_put(dev);
502 		}
503 	}
504 
505 	return 0;
506 }
507 
508 static void
mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_ct_entry * entry)509 mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
510 {
511 	if (entry->counter->is_shared &&
512 	    !refcount_dec_and_test(&entry->counter->refcount))
513 		return;
514 
515 	mlx5_fc_destroy(ct_priv->dev, entry->counter->counter);
516 	kfree(entry->counter);
517 }
518 
519 static void
mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_ct_entry * entry,bool nat)520 mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
521 			  struct mlx5_ct_entry *entry,
522 			  bool nat)
523 {
524 	struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
525 	struct mlx5_flow_attr *attr = zone_rule->attr;
526 
527 	ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
528 
529 	ct_priv->fs_ops->ct_rule_del(ct_priv->fs, zone_rule->rule);
530 	mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, zone_rule->mh);
531 	mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
532 	kfree(attr);
533 }
534 
535 static void
mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_ct_entry * entry)536 mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
537 			   struct mlx5_ct_entry *entry)
538 {
539 	if (mlx5_tc_ct_entry_in_ct_nat_table(entry))
540 		mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
541 	if (mlx5_tc_ct_entry_in_ct_table(entry))
542 		mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
543 
544 	atomic_dec(&ct_priv->debugfs.stats.offloaded);
545 }
546 
547 static struct flow_action_entry *
mlx5_tc_ct_get_ct_metadata_action(struct flow_rule * flow_rule)548 mlx5_tc_ct_get_ct_metadata_action(struct flow_rule *flow_rule)
549 {
550 	struct flow_action *flow_action = &flow_rule->action;
551 	struct flow_action_entry *act;
552 	int i;
553 
554 	flow_action_for_each(i, act, flow_action) {
555 		if (act->id == FLOW_ACTION_CT_METADATA)
556 			return act;
557 	}
558 
559 	return NULL;
560 }
561 
562 static int
mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv * ct_priv,struct mlx5e_tc_mod_hdr_acts * mod_acts,u8 ct_state,u32 mark,u32 labels_id,u8 zone_restore_id)563 mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
564 			       struct mlx5e_tc_mod_hdr_acts *mod_acts,
565 			       u8 ct_state,
566 			       u32 mark,
567 			       u32 labels_id,
568 			       u8 zone_restore_id)
569 {
570 	enum mlx5_flow_namespace_type ns = ct_priv->ns_type;
571 	struct mlx5_core_dev *dev = ct_priv->dev;
572 	int err;
573 
574 	err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
575 					CTSTATE_TO_REG, ct_state);
576 	if (err)
577 		return err;
578 
579 	err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
580 					MARK_TO_REG, mark);
581 	if (err)
582 		return err;
583 
584 	err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
585 					LABELS_TO_REG, labels_id);
586 	if (err)
587 		return err;
588 
589 	err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
590 					ZONE_RESTORE_TO_REG, zone_restore_id);
591 	if (err)
592 		return err;
593 
594 	/* Make another copy of zone id in reg_b for
595 	 * NIC rx flows since we don't copy reg_c1 to
596 	 * reg_b upon miss.
597 	 */
598 	if (ns != MLX5_FLOW_NAMESPACE_FDB) {
599 		err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
600 						NIC_ZONE_RESTORE_TO_REG, zone_restore_id);
601 		if (err)
602 			return err;
603 	}
604 	return 0;
605 }
606 
607 static int
mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry * act,char * modact)608 mlx5_tc_ct_parse_mangle_to_mod_act(struct flow_action_entry *act,
609 				   char *modact)
610 {
611 	u32 offset = act->mangle.offset, field;
612 
613 	switch (act->mangle.htype) {
614 	case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
615 		MLX5_SET(set_action_in, modact, length, 0);
616 		if (offset == offsetof(struct iphdr, saddr))
617 			field = MLX5_ACTION_IN_FIELD_OUT_SIPV4;
618 		else if (offset == offsetof(struct iphdr, daddr))
619 			field = MLX5_ACTION_IN_FIELD_OUT_DIPV4;
620 		else
621 			return -EOPNOTSUPP;
622 		break;
623 
624 	case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
625 		MLX5_SET(set_action_in, modact, length, 0);
626 		if (offset == offsetof(struct ipv6hdr, saddr) + 12)
627 			field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0;
628 		else if (offset == offsetof(struct ipv6hdr, saddr) + 8)
629 			field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32;
630 		else if (offset == offsetof(struct ipv6hdr, saddr) + 4)
631 			field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64;
632 		else if (offset == offsetof(struct ipv6hdr, saddr))
633 			field = MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96;
634 		else if (offset == offsetof(struct ipv6hdr, daddr) + 12)
635 			field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0;
636 		else if (offset == offsetof(struct ipv6hdr, daddr) + 8)
637 			field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32;
638 		else if (offset == offsetof(struct ipv6hdr, daddr) + 4)
639 			field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64;
640 		else if (offset == offsetof(struct ipv6hdr, daddr))
641 			field = MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96;
642 		else
643 			return -EOPNOTSUPP;
644 		break;
645 
646 	case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
647 		MLX5_SET(set_action_in, modact, length, 16);
648 		if (offset == offsetof(struct tcphdr, source))
649 			field = MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT;
650 		else if (offset == offsetof(struct tcphdr, dest))
651 			field = MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT;
652 		else
653 			return -EOPNOTSUPP;
654 		break;
655 
656 	case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
657 		MLX5_SET(set_action_in, modact, length, 16);
658 		if (offset == offsetof(struct udphdr, source))
659 			field = MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT;
660 		else if (offset == offsetof(struct udphdr, dest))
661 			field = MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT;
662 		else
663 			return -EOPNOTSUPP;
664 		break;
665 
666 	default:
667 		return -EOPNOTSUPP;
668 	}
669 
670 	MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
671 	MLX5_SET(set_action_in, modact, offset, 0);
672 	MLX5_SET(set_action_in, modact, field, field);
673 	MLX5_SET(set_action_in, modact, data, act->mangle.val);
674 
675 	return 0;
676 }
677 
678 static int
mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv * ct_priv,struct flow_rule * flow_rule,struct mlx5e_tc_mod_hdr_acts * mod_acts)679 mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
680 			    struct flow_rule *flow_rule,
681 			    struct mlx5e_tc_mod_hdr_acts *mod_acts)
682 {
683 	struct flow_action *flow_action = &flow_rule->action;
684 	struct mlx5_core_dev *mdev = ct_priv->dev;
685 	struct flow_action_entry *act;
686 	char *modact;
687 	int err, i;
688 
689 	flow_action_for_each(i, act, flow_action) {
690 		switch (act->id) {
691 		case FLOW_ACTION_MANGLE: {
692 			modact = mlx5e_mod_hdr_alloc(mdev, ct_priv->ns_type, mod_acts);
693 			if (IS_ERR(modact))
694 				return PTR_ERR(modact);
695 
696 			err = mlx5_tc_ct_parse_mangle_to_mod_act(act, modact);
697 			if (err)
698 				return err;
699 
700 			mod_acts->num_actions++;
701 		}
702 		break;
703 
704 		case FLOW_ACTION_CT_METADATA:
705 			/* Handled earlier */
706 			continue;
707 		default:
708 			return -EOPNOTSUPP;
709 		}
710 	}
711 
712 	return 0;
713 }
714 
715 static int
mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_flow_attr * attr,struct flow_rule * flow_rule,struct mlx5e_mod_hdr_handle ** mh,u8 zone_restore_id,bool nat_table,bool has_nat)716 mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
717 				struct mlx5_flow_attr *attr,
718 				struct flow_rule *flow_rule,
719 				struct mlx5e_mod_hdr_handle **mh,
720 				u8 zone_restore_id, bool nat_table, bool has_nat)
721 {
722 	DECLARE_MOD_HDR_ACTS_ACTIONS(actions_arr, MLX5_CT_MIN_MOD_ACTS);
723 	DECLARE_MOD_HDR_ACTS(mod_acts, actions_arr);
724 	struct flow_action_entry *meta;
725 	enum ip_conntrack_info ctinfo;
726 	u16 ct_state = 0;
727 	int err;
728 
729 	meta = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
730 	if (!meta)
731 		return -EOPNOTSUPP;
732 	ctinfo = meta->ct_metadata.cookie & NFCT_INFOMASK;
733 
734 	err = mlx5_get_label_mapping(ct_priv, meta->ct_metadata.labels,
735 				     &attr->ct_attr.ct_labels_id);
736 	if (err)
737 		return -EOPNOTSUPP;
738 	if (nat_table) {
739 		if (has_nat) {
740 			err = mlx5_tc_ct_entry_create_nat(ct_priv, flow_rule, &mod_acts);
741 			if (err)
742 				goto err_mapping;
743 		}
744 
745 		ct_state |= MLX5_CT_STATE_NAT_BIT;
746 	}
747 
748 	ct_state |= MLX5_CT_STATE_TRK_BIT;
749 	ct_state |= ctinfo == IP_CT_NEW ? MLX5_CT_STATE_NEW_BIT : MLX5_CT_STATE_ESTABLISHED_BIT;
750 	ct_state |= meta->ct_metadata.orig_dir ? 0 : MLX5_CT_STATE_REPLY_BIT;
751 	err = mlx5_tc_ct_entry_set_registers(ct_priv, &mod_acts,
752 					     ct_state,
753 					     meta->ct_metadata.mark,
754 					     attr->ct_attr.ct_labels_id,
755 					     zone_restore_id);
756 	if (err)
757 		goto err_mapping;
758 
759 	if (nat_table && has_nat) {
760 		attr->modify_hdr = mlx5_modify_header_alloc(ct_priv->dev, ct_priv->ns_type,
761 							    mod_acts.num_actions,
762 							    mod_acts.actions);
763 		if (IS_ERR(attr->modify_hdr)) {
764 			err = PTR_ERR(attr->modify_hdr);
765 			goto err_mapping;
766 		}
767 
768 		*mh = NULL;
769 	} else {
770 		*mh = mlx5e_mod_hdr_attach(ct_priv->dev,
771 					   ct_priv->mod_hdr_tbl,
772 					   ct_priv->ns_type,
773 					   &mod_acts);
774 		if (IS_ERR(*mh)) {
775 			err = PTR_ERR(*mh);
776 			goto err_mapping;
777 		}
778 		attr->modify_hdr = mlx5e_mod_hdr_get(*mh);
779 	}
780 
781 	mlx5e_mod_hdr_dealloc(&mod_acts);
782 	return 0;
783 
784 err_mapping:
785 	mlx5e_mod_hdr_dealloc(&mod_acts);
786 	mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
787 	return err;
788 }
789 
790 static void
mlx5_tc_ct_entry_destroy_mod_hdr(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_flow_attr * attr,struct mlx5e_mod_hdr_handle * mh)791 mlx5_tc_ct_entry_destroy_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
792 				 struct mlx5_flow_attr *attr,
793 				 struct mlx5e_mod_hdr_handle *mh)
794 {
795 	if (mh)
796 		mlx5e_mod_hdr_detach(ct_priv->dev, ct_priv->mod_hdr_tbl, mh);
797 	else
798 		mlx5_modify_header_dealloc(ct_priv->dev, attr->modify_hdr);
799 }
800 
801 static int
mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv * ct_priv,struct flow_rule * flow_rule,struct mlx5_ct_entry * entry,bool nat,u8 zone_restore_id)802 mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
803 			  struct flow_rule *flow_rule,
804 			  struct mlx5_ct_entry *entry,
805 			  bool nat, u8 zone_restore_id)
806 {
807 	struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
808 	struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
809 	struct mlx5_flow_spec *spec = NULL;
810 	struct mlx5_flow_attr *attr;
811 	int err;
812 
813 	zone_rule->nat = nat;
814 
815 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
816 	if (!spec)
817 		return -ENOMEM;
818 
819 	attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
820 	if (!attr) {
821 		err = -ENOMEM;
822 		goto err_attr;
823 	}
824 
825 	err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
826 					      &zone_rule->mh,
827 					      zone_restore_id,
828 					      nat,
829 					      mlx5_tc_ct_entry_in_ct_nat_table(entry));
830 	if (err) {
831 		ct_dbg("Failed to create ct entry mod hdr");
832 		goto err_mod_hdr;
833 	}
834 
835 	attr->action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
836 		       MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
837 		       MLX5_FLOW_CONTEXT_ACTION_COUNT;
838 	attr->dest_chain = 0;
839 	attr->dest_ft = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
840 	attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
841 	if (entry->tuple.ip_proto == IPPROTO_TCP ||
842 	    entry->tuple.ip_proto == IPPROTO_UDP)
843 		attr->outer_match_level = MLX5_MATCH_L4;
844 	else
845 		attr->outer_match_level = MLX5_MATCH_L3;
846 	attr->counter = entry->counter->counter;
847 	attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
848 	if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB)
849 		attr->esw_attr->in_mdev = priv->mdev;
850 
851 	mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
852 	mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
853 
854 	zone_rule->rule = ct_priv->fs_ops->ct_rule_add(ct_priv->fs, spec, attr, flow_rule);
855 	if (IS_ERR(zone_rule->rule)) {
856 		err = PTR_ERR(zone_rule->rule);
857 		ct_dbg("Failed to add ct entry rule, nat: %d", nat);
858 		goto err_rule;
859 	}
860 
861 	zone_rule->attr = attr;
862 
863 	kvfree(spec);
864 	ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone);
865 
866 	return 0;
867 
868 err_rule:
869 	mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, attr, zone_rule->mh);
870 	mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
871 err_mod_hdr:
872 	kfree(attr);
873 err_attr:
874 	kvfree(spec);
875 	return err;
876 }
877 
878 static int
mlx5_tc_ct_entry_update_rule(struct mlx5_tc_ct_priv * ct_priv,struct flow_rule * flow_rule,struct mlx5_ct_entry * entry,bool nat,u8 zone_restore_id)879 mlx5_tc_ct_entry_update_rule(struct mlx5_tc_ct_priv *ct_priv,
880 			     struct flow_rule *flow_rule,
881 			     struct mlx5_ct_entry *entry,
882 			     bool nat, u8 zone_restore_id)
883 {
884 	struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
885 	struct mlx5_flow_attr *attr = zone_rule->attr, *old_attr;
886 	struct mlx5e_mod_hdr_handle *mh;
887 	struct mlx5_flow_spec *spec;
888 	int err;
889 
890 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
891 	if (!spec)
892 		return -ENOMEM;
893 
894 	old_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
895 	if (!old_attr) {
896 		err = -ENOMEM;
897 		goto err_attr;
898 	}
899 	*old_attr = *attr;
900 
901 	err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
902 					      nat, mlx5_tc_ct_entry_in_ct_nat_table(entry));
903 	if (err) {
904 		ct_dbg("Failed to create ct entry mod hdr, err: %d", err);
905 		goto err_mod_hdr;
906 	}
907 
908 	mlx5_tc_ct_set_tuple_match(ct_priv, spec, flow_rule);
909 	mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
910 
911 	err = ct_priv->fs_ops->ct_rule_update(ct_priv->fs, zone_rule->rule, spec, attr);
912 	if (err) {
913 		ct_dbg("Failed to update ct entry rule, nat: %d, err: %d", nat, err);
914 		goto err_rule;
915 	}
916 
917 	mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, old_attr, zone_rule->mh);
918 	zone_rule->mh = mh;
919 	mlx5_put_label_mapping(ct_priv, old_attr->ct_attr.ct_labels_id);
920 
921 	kfree(old_attr);
922 	kvfree(spec);
923 	ct_dbg("Updated ct entry rule in zone %d", entry->tuple.zone);
924 
925 	return 0;
926 
927 err_rule:
928 	mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
929 	mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
930 err_mod_hdr:
931 	*attr = *old_attr;
932 	kfree(old_attr);
933 err_attr:
934 	kvfree(spec);
935 	return err;
936 }
937 
938 static bool
mlx5_tc_ct_entry_valid(struct mlx5_ct_entry * entry)939 mlx5_tc_ct_entry_valid(struct mlx5_ct_entry *entry)
940 {
941 	return test_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags);
942 }
943 
944 static struct mlx5_ct_entry *
mlx5_tc_ct_entry_get(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_ct_tuple * tuple)945 mlx5_tc_ct_entry_get(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_tuple *tuple)
946 {
947 	struct mlx5_ct_entry *entry;
948 
949 	entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, tuple,
950 				       tuples_ht_params);
951 	if (entry && mlx5_tc_ct_entry_valid(entry) &&
952 	    refcount_inc_not_zero(&entry->refcnt)) {
953 		return entry;
954 	} else if (!entry) {
955 		entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_nat_ht,
956 					       tuple, tuples_nat_ht_params);
957 		if (entry && mlx5_tc_ct_entry_valid(entry) &&
958 		    refcount_inc_not_zero(&entry->refcnt))
959 			return entry;
960 	}
961 
962 	return entry ? ERR_PTR(-EINVAL) : NULL;
963 }
964 
mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry * entry)965 static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry)
966 {
967 	struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
968 
969 	if (mlx5_tc_ct_entry_in_ct_nat_table(entry))
970 		rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
971 				       &entry->tuple_nat_node,
972 				       tuples_nat_ht_params);
973 	if (mlx5_tc_ct_entry_in_ct_table(entry))
974 		rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
975 				       tuples_ht_params);
976 }
977 
mlx5_tc_ct_entry_del(struct mlx5_ct_entry * entry)978 static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry)
979 {
980 	struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
981 
982 	mlx5_tc_ct_entry_del_rules(ct_priv, entry);
983 
984 	spin_lock_bh(&ct_priv->ht_lock);
985 	mlx5_tc_ct_entry_remove_from_tuples(entry);
986 	spin_unlock_bh(&ct_priv->ht_lock);
987 
988 	mlx5_tc_ct_counter_put(ct_priv, entry);
989 	kfree(entry);
990 }
991 
992 static void
mlx5_tc_ct_entry_put(struct mlx5_ct_entry * entry)993 mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
994 {
995 	if (!refcount_dec_and_test(&entry->refcnt))
996 		return;
997 
998 	mlx5_tc_ct_entry_del(entry);
999 }
1000 
mlx5_tc_ct_entry_del_work(struct work_struct * work)1001 static void mlx5_tc_ct_entry_del_work(struct work_struct *work)
1002 {
1003 	struct mlx5_ct_entry *entry = container_of(work, struct mlx5_ct_entry, work);
1004 
1005 	mlx5_tc_ct_entry_del(entry);
1006 }
1007 
1008 static void
__mlx5_tc_ct_entry_put(struct mlx5_ct_entry * entry)1009 __mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)
1010 {
1011 	if (!refcount_dec_and_test(&entry->refcnt))
1012 		return;
1013 
1014 	INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work);
1015 	queue_work(entry->ct_priv->wq, &entry->work);
1016 }
1017 
1018 static struct mlx5_ct_counter *
mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv * ct_priv)1019 mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
1020 {
1021 	struct mlx5_ct_counter *counter;
1022 	int ret;
1023 
1024 	counter = kzalloc(sizeof(*counter), GFP_KERNEL);
1025 	if (!counter)
1026 		return ERR_PTR(-ENOMEM);
1027 
1028 	counter->is_shared = false;
1029 	counter->counter = mlx5_fc_create(ct_priv->dev, true);
1030 	if (IS_ERR(counter->counter)) {
1031 		ct_dbg("Failed to create counter for ct entry");
1032 		ret = PTR_ERR(counter->counter);
1033 		kfree(counter);
1034 		return ERR_PTR(ret);
1035 	}
1036 
1037 	return counter;
1038 }
1039 
1040 static struct mlx5_ct_counter *
mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_ct_entry * entry)1041 mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
1042 			      struct mlx5_ct_entry *entry)
1043 {
1044 	struct mlx5_ct_tuple rev_tuple = entry->tuple;
1045 	struct mlx5_ct_counter *shared_counter;
1046 	struct mlx5_ct_entry *rev_entry;
1047 
1048 	/* get the reversed tuple */
1049 	swap(rev_tuple.port.src, rev_tuple.port.dst);
1050 
1051 	if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1052 		__be32 tmp_addr = rev_tuple.ip.src_v4;
1053 
1054 		rev_tuple.ip.src_v4 = rev_tuple.ip.dst_v4;
1055 		rev_tuple.ip.dst_v4 = tmp_addr;
1056 	} else if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1057 		struct in6_addr tmp_addr = rev_tuple.ip.src_v6;
1058 
1059 		rev_tuple.ip.src_v6 = rev_tuple.ip.dst_v6;
1060 		rev_tuple.ip.dst_v6 = tmp_addr;
1061 	} else {
1062 		return ERR_PTR(-EOPNOTSUPP);
1063 	}
1064 
1065 	/* Use the same counter as the reverse direction */
1066 	spin_lock_bh(&ct_priv->ht_lock);
1067 	rev_entry = mlx5_tc_ct_entry_get(ct_priv, &rev_tuple);
1068 
1069 	if (IS_ERR(rev_entry)) {
1070 		spin_unlock_bh(&ct_priv->ht_lock);
1071 		goto create_counter;
1072 	}
1073 
1074 	if (rev_entry && refcount_inc_not_zero(&rev_entry->counter->refcount)) {
1075 		ct_dbg("Using shared counter entry=0x%p rev=0x%p", entry, rev_entry);
1076 		shared_counter = rev_entry->counter;
1077 		spin_unlock_bh(&ct_priv->ht_lock);
1078 
1079 		mlx5_tc_ct_entry_put(rev_entry);
1080 		return shared_counter;
1081 	}
1082 
1083 	spin_unlock_bh(&ct_priv->ht_lock);
1084 
1085 create_counter:
1086 
1087 	shared_counter = mlx5_tc_ct_counter_create(ct_priv);
1088 	if (IS_ERR(shared_counter))
1089 		return shared_counter;
1090 
1091 	shared_counter->is_shared = true;
1092 	refcount_set(&shared_counter->refcount, 1);
1093 	return shared_counter;
1094 }
1095 
1096 static int
mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv * ct_priv,struct flow_rule * flow_rule,struct mlx5_ct_entry * entry,u8 zone_restore_id)1097 mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
1098 			   struct flow_rule *flow_rule,
1099 			   struct mlx5_ct_entry *entry,
1100 			   u8 zone_restore_id)
1101 {
1102 	int err;
1103 
1104 	if (nf_ct_acct_enabled(dev_net(ct_priv->netdev)))
1105 		entry->counter = mlx5_tc_ct_counter_create(ct_priv);
1106 	else
1107 		entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
1108 
1109 	if (IS_ERR(entry->counter)) {
1110 		err = PTR_ERR(entry->counter);
1111 		return err;
1112 	}
1113 
1114 	if (mlx5_tc_ct_entry_in_ct_table(entry)) {
1115 		err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false,
1116 						zone_restore_id);
1117 		if (err)
1118 			goto err_orig;
1119 	}
1120 
1121 	if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
1122 		err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true,
1123 						zone_restore_id);
1124 		if (err)
1125 			goto err_nat;
1126 	}
1127 
1128 	atomic_inc(&ct_priv->debugfs.stats.offloaded);
1129 	return 0;
1130 
1131 err_nat:
1132 	if (mlx5_tc_ct_entry_in_ct_table(entry))
1133 		mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
1134 err_orig:
1135 	mlx5_tc_ct_counter_put(ct_priv, entry);
1136 	return err;
1137 }
1138 
1139 static int
mlx5_tc_ct_entry_update_rules(struct mlx5_tc_ct_priv * ct_priv,struct flow_rule * flow_rule,struct mlx5_ct_entry * entry,u8 zone_restore_id)1140 mlx5_tc_ct_entry_update_rules(struct mlx5_tc_ct_priv *ct_priv,
1141 			      struct flow_rule *flow_rule,
1142 			      struct mlx5_ct_entry *entry,
1143 			      u8 zone_restore_id)
1144 {
1145 	int err = 0;
1146 
1147 	if (mlx5_tc_ct_entry_in_ct_table(entry)) {
1148 		err = mlx5_tc_ct_entry_update_rule(ct_priv, flow_rule, entry, false,
1149 						   zone_restore_id);
1150 		if (err)
1151 			return err;
1152 	}
1153 
1154 	if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
1155 		err = mlx5_tc_ct_entry_update_rule(ct_priv, flow_rule, entry, true,
1156 						   zone_restore_id);
1157 		if (err && mlx5_tc_ct_entry_in_ct_table(entry))
1158 			mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
1159 	}
1160 	return err;
1161 }
1162 
1163 static int
mlx5_tc_ct_block_flow_offload_update(struct mlx5_ct_ft * ft,struct flow_rule * flow_rule,struct mlx5_ct_entry * entry,unsigned long cookie)1164 mlx5_tc_ct_block_flow_offload_update(struct mlx5_ct_ft *ft, struct flow_rule *flow_rule,
1165 				     struct mlx5_ct_entry *entry, unsigned long cookie)
1166 {
1167 	struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
1168 	int err;
1169 
1170 	err = mlx5_tc_ct_entry_update_rules(ct_priv, flow_rule, entry, ft->zone_restore_id);
1171 	if (!err)
1172 		return 0;
1173 
1174 	/* If failed to update the entry, then look it up again under ht_lock
1175 	 * protection and properly delete it.
1176 	 */
1177 	spin_lock_bh(&ct_priv->ht_lock);
1178 	entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
1179 	if (entry) {
1180 		rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
1181 		spin_unlock_bh(&ct_priv->ht_lock);
1182 		mlx5_tc_ct_entry_put(entry);
1183 	} else {
1184 		spin_unlock_bh(&ct_priv->ht_lock);
1185 	}
1186 	return err;
1187 }
1188 
1189 static int
mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft * ft,struct flow_cls_offload * flow)1190 mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
1191 				  struct flow_cls_offload *flow)
1192 {
1193 	struct flow_rule *flow_rule = flow_cls_offload_flow_rule(flow);
1194 	struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
1195 	struct flow_action_entry *meta_action;
1196 	unsigned long cookie = flow->cookie;
1197 	struct mlx5_ct_entry *entry;
1198 	int err;
1199 
1200 	meta_action = mlx5_tc_ct_get_ct_metadata_action(flow_rule);
1201 	if (!meta_action)
1202 		return -EOPNOTSUPP;
1203 
1204 	spin_lock_bh(&ct_priv->ht_lock);
1205 	entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
1206 	if (entry && refcount_inc_not_zero(&entry->refcnt)) {
1207 		if (entry->restore_cookie == meta_action->ct_metadata.cookie) {
1208 			spin_unlock_bh(&ct_priv->ht_lock);
1209 			mlx5_tc_ct_entry_put(entry);
1210 			return -EEXIST;
1211 		}
1212 		entry->restore_cookie = meta_action->ct_metadata.cookie;
1213 		spin_unlock_bh(&ct_priv->ht_lock);
1214 
1215 		err = mlx5_tc_ct_block_flow_offload_update(ft, flow_rule, entry, cookie);
1216 		mlx5_tc_ct_entry_put(entry);
1217 		return err;
1218 	}
1219 	spin_unlock_bh(&ct_priv->ht_lock);
1220 
1221 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1222 	if (!entry)
1223 		return -ENOMEM;
1224 
1225 	entry->tuple.zone = ft->zone;
1226 	entry->cookie = flow->cookie;
1227 	entry->restore_cookie = meta_action->ct_metadata.cookie;
1228 	refcount_set(&entry->refcnt, 2);
1229 	entry->ct_priv = ct_priv;
1230 
1231 	err = mlx5_tc_ct_rule_to_tuple(&entry->tuple, flow_rule);
1232 	if (err)
1233 		goto err_set;
1234 
1235 	memcpy(&entry->tuple_nat, &entry->tuple, sizeof(entry->tuple));
1236 	err = mlx5_tc_ct_rule_to_tuple_nat(&entry->tuple_nat, flow_rule);
1237 	if (err)
1238 		goto err_set;
1239 
1240 	spin_lock_bh(&ct_priv->ht_lock);
1241 
1242 	err = rhashtable_lookup_insert_fast(&ft->ct_entries_ht, &entry->node,
1243 					    cts_ht_params);
1244 	if (err)
1245 		goto err_entries;
1246 
1247 	if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
1248 		err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht,
1249 						    &entry->tuple_nat_node,
1250 						    tuples_nat_ht_params);
1251 		if (err)
1252 			goto err_tuple_nat;
1253 
1254 		set_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags);
1255 	}
1256 
1257 	if (!mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
1258 		err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
1259 						    &entry->tuple_node,
1260 						    tuples_ht_params);
1261 		if (err)
1262 			goto err_tuple;
1263 
1264 		set_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags);
1265 	}
1266 	spin_unlock_bh(&ct_priv->ht_lock);
1267 
1268 	err = mlx5_tc_ct_entry_add_rules(ct_priv, flow_rule, entry,
1269 					 ft->zone_restore_id);
1270 	if (err)
1271 		goto err_rules;
1272 
1273 	set_bit(MLX5_CT_ENTRY_FLAG_VALID, &entry->flags);
1274 	mlx5_tc_ct_entry_put(entry); /* this function reference */
1275 
1276 	return 0;
1277 
1278 err_rules:
1279 	spin_lock_bh(&ct_priv->ht_lock);
1280 err_tuple:
1281 	mlx5_tc_ct_entry_remove_from_tuples(entry);
1282 err_tuple_nat:
1283 	rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
1284 err_entries:
1285 	spin_unlock_bh(&ct_priv->ht_lock);
1286 err_set:
1287 	kfree(entry);
1288 	if (err != -EEXIST)
1289 		netdev_warn(ct_priv->netdev, "Failed to offload ct entry, err: %d\n", err);
1290 	return err;
1291 }
1292 
1293 static int
mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft * ft,struct flow_cls_offload * flow)1294 mlx5_tc_ct_block_flow_offload_del(struct mlx5_ct_ft *ft,
1295 				  struct flow_cls_offload *flow)
1296 {
1297 	struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
1298 	unsigned long cookie = flow->cookie;
1299 	struct mlx5_ct_entry *entry;
1300 
1301 	spin_lock_bh(&ct_priv->ht_lock);
1302 	entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
1303 	if (!entry) {
1304 		spin_unlock_bh(&ct_priv->ht_lock);
1305 		return -ENOENT;
1306 	}
1307 
1308 	if (!mlx5_tc_ct_entry_valid(entry)) {
1309 		spin_unlock_bh(&ct_priv->ht_lock);
1310 		return -EINVAL;
1311 	}
1312 
1313 	rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
1314 	spin_unlock_bh(&ct_priv->ht_lock);
1315 
1316 	mlx5_tc_ct_entry_put(entry);
1317 
1318 	return 0;
1319 }
1320 
1321 static int
mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft * ft,struct flow_cls_offload * f)1322 mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
1323 				    struct flow_cls_offload *f)
1324 {
1325 	struct mlx5_tc_ct_priv *ct_priv = ft->ct_priv;
1326 	unsigned long cookie = f->cookie;
1327 	struct mlx5_ct_entry *entry;
1328 	u64 lastuse, packets, bytes;
1329 
1330 	spin_lock_bh(&ct_priv->ht_lock);
1331 	entry = rhashtable_lookup_fast(&ft->ct_entries_ht, &cookie, cts_ht_params);
1332 	if (!entry) {
1333 		spin_unlock_bh(&ct_priv->ht_lock);
1334 		return -ENOENT;
1335 	}
1336 
1337 	if (!mlx5_tc_ct_entry_valid(entry) || !refcount_inc_not_zero(&entry->refcnt)) {
1338 		spin_unlock_bh(&ct_priv->ht_lock);
1339 		return -EINVAL;
1340 	}
1341 
1342 	spin_unlock_bh(&ct_priv->ht_lock);
1343 
1344 	mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
1345 	flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
1346 			  FLOW_ACTION_HW_STATS_DELAYED);
1347 
1348 	mlx5_tc_ct_entry_put(entry);
1349 	return 0;
1350 }
1351 
1352 static int
mlx5_tc_ct_block_flow_offload(enum tc_setup_type type,void * type_data,void * cb_priv)1353 mlx5_tc_ct_block_flow_offload(enum tc_setup_type type, void *type_data,
1354 			      void *cb_priv)
1355 {
1356 	struct flow_cls_offload *f = type_data;
1357 	struct mlx5_ct_ft *ft = cb_priv;
1358 
1359 	if (type != TC_SETUP_CLSFLOWER)
1360 		return -EOPNOTSUPP;
1361 
1362 	switch (f->command) {
1363 	case FLOW_CLS_REPLACE:
1364 		return mlx5_tc_ct_block_flow_offload_add(ft, f);
1365 	case FLOW_CLS_DESTROY:
1366 		return mlx5_tc_ct_block_flow_offload_del(ft, f);
1367 	case FLOW_CLS_STATS:
1368 		return mlx5_tc_ct_block_flow_offload_stats(ft, f);
1369 	default:
1370 		break;
1371 	}
1372 
1373 	return -EOPNOTSUPP;
1374 }
1375 
1376 static bool
mlx5_tc_ct_skb_to_tuple(struct sk_buff * skb,struct mlx5_ct_tuple * tuple,u16 zone)1377 mlx5_tc_ct_skb_to_tuple(struct sk_buff *skb, struct mlx5_ct_tuple *tuple,
1378 			u16 zone)
1379 {
1380 	struct flow_keys flow_keys;
1381 
1382 	skb_reset_network_header(skb);
1383 	skb_flow_dissect_flow_keys(skb, &flow_keys, FLOW_DISSECTOR_F_STOP_BEFORE_ENCAP);
1384 
1385 	tuple->zone = zone;
1386 
1387 	if (flow_keys.basic.ip_proto != IPPROTO_TCP &&
1388 	    flow_keys.basic.ip_proto != IPPROTO_UDP &&
1389 	    flow_keys.basic.ip_proto != IPPROTO_GRE)
1390 		return false;
1391 
1392 	if (flow_keys.basic.ip_proto == IPPROTO_TCP ||
1393 	    flow_keys.basic.ip_proto == IPPROTO_UDP) {
1394 		tuple->port.src = flow_keys.ports.src;
1395 		tuple->port.dst = flow_keys.ports.dst;
1396 	}
1397 	tuple->n_proto = flow_keys.basic.n_proto;
1398 	tuple->ip_proto = flow_keys.basic.ip_proto;
1399 
1400 	switch (flow_keys.basic.n_proto) {
1401 	case htons(ETH_P_IP):
1402 		tuple->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1403 		tuple->ip.src_v4 = flow_keys.addrs.v4addrs.src;
1404 		tuple->ip.dst_v4 = flow_keys.addrs.v4addrs.dst;
1405 		break;
1406 
1407 	case htons(ETH_P_IPV6):
1408 		tuple->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1409 		tuple->ip.src_v6 = flow_keys.addrs.v6addrs.src;
1410 		tuple->ip.dst_v6 = flow_keys.addrs.v6addrs.dst;
1411 		break;
1412 	default:
1413 		goto out;
1414 	}
1415 
1416 	return true;
1417 
1418 out:
1419 	return false;
1420 }
1421 
mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec * spec)1422 int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
1423 {
1424 	u32 ctstate = 0, ctstate_mask = 0;
1425 
1426 	mlx5e_tc_match_to_reg_get_match(spec, CTSTATE_TO_REG,
1427 					&ctstate, &ctstate_mask);
1428 
1429 	if ((ctstate & ctstate_mask) == MLX5_CT_STATE_TRK_BIT)
1430 		return -EOPNOTSUPP;
1431 
1432 	ctstate_mask |= MLX5_CT_STATE_TRK_BIT;
1433 	mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG,
1434 				    ctstate, ctstate_mask);
1435 
1436 	return 0;
1437 }
1438 
mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv * priv,struct mlx5_ct_attr * ct_attr)1439 void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_attr)
1440 {
1441 	if (!priv || !ct_attr->ct_labels_id)
1442 		return;
1443 
1444 	mlx5_put_label_mapping(priv, ct_attr->ct_labels_id);
1445 }
1446 
1447 int
mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv * priv,struct mlx5_flow_spec * spec,struct flow_cls_offload * f,struct mlx5_ct_attr * ct_attr,struct netlink_ext_ack * extack)1448 mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
1449 		     struct mlx5_flow_spec *spec,
1450 		     struct flow_cls_offload *f,
1451 		     struct mlx5_ct_attr *ct_attr,
1452 		     struct netlink_ext_ack *extack)
1453 {
1454 	bool trk, est, untrk, unnew, unest, new, rpl, unrpl, rel, unrel, inv, uninv;
1455 	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1456 	struct flow_dissector_key_ct *mask, *key;
1457 	u32 ctstate = 0, ctstate_mask = 0;
1458 	u16 ct_state_on, ct_state_off;
1459 	u16 ct_state, ct_state_mask;
1460 	struct flow_match_ct match;
1461 	u32 ct_labels[4];
1462 
1463 	if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
1464 		return 0;
1465 
1466 	if (!priv) {
1467 		NL_SET_ERR_MSG_MOD(extack,
1468 				   "offload of ct matching isn't available");
1469 		return -EOPNOTSUPP;
1470 	}
1471 
1472 	flow_rule_match_ct(rule, &match);
1473 
1474 	key = match.key;
1475 	mask = match.mask;
1476 
1477 	ct_state = key->ct_state;
1478 	ct_state_mask = mask->ct_state;
1479 
1480 	if (ct_state_mask & ~(TCA_FLOWER_KEY_CT_FLAGS_TRACKED |
1481 			      TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED |
1482 			      TCA_FLOWER_KEY_CT_FLAGS_NEW |
1483 			      TCA_FLOWER_KEY_CT_FLAGS_REPLY |
1484 			      TCA_FLOWER_KEY_CT_FLAGS_RELATED |
1485 			      TCA_FLOWER_KEY_CT_FLAGS_INVALID)) {
1486 		NL_SET_ERR_MSG_MOD(extack,
1487 				   "only ct_state trk, est, new and rpl are supported for offload");
1488 		return -EOPNOTSUPP;
1489 	}
1490 
1491 	ct_state_on = ct_state & ct_state_mask;
1492 	ct_state_off = (ct_state & ct_state_mask) ^ ct_state_mask;
1493 	trk = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
1494 	new = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_NEW;
1495 	est = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
1496 	rpl = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
1497 	rel = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
1498 	inv = ct_state_on & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
1499 	untrk = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_TRACKED;
1500 	unnew = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_NEW;
1501 	unest = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_ESTABLISHED;
1502 	unrpl = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_REPLY;
1503 	unrel = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_RELATED;
1504 	uninv = ct_state_off & TCA_FLOWER_KEY_CT_FLAGS_INVALID;
1505 
1506 	ctstate |= trk ? MLX5_CT_STATE_TRK_BIT : 0;
1507 	ctstate |= new ? MLX5_CT_STATE_NEW_BIT : 0;
1508 	ctstate |= est ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
1509 	ctstate |= rpl ? MLX5_CT_STATE_REPLY_BIT : 0;
1510 	ctstate_mask |= (untrk || trk) ? MLX5_CT_STATE_TRK_BIT : 0;
1511 	ctstate_mask |= (unnew || new) ? MLX5_CT_STATE_NEW_BIT : 0;
1512 	ctstate_mask |= (unest || est) ? MLX5_CT_STATE_ESTABLISHED_BIT : 0;
1513 	ctstate_mask |= (unrpl || rpl) ? MLX5_CT_STATE_REPLY_BIT : 0;
1514 	ctstate_mask |= unrel ? MLX5_CT_STATE_RELATED_BIT : 0;
1515 	ctstate_mask |= uninv ? MLX5_CT_STATE_INVALID_BIT : 0;
1516 
1517 	if (rel) {
1518 		NL_SET_ERR_MSG_MOD(extack,
1519 				   "matching on ct_state +rel isn't supported");
1520 		return -EOPNOTSUPP;
1521 	}
1522 
1523 	if (inv) {
1524 		NL_SET_ERR_MSG_MOD(extack,
1525 				   "matching on ct_state +inv isn't supported");
1526 		return -EOPNOTSUPP;
1527 	}
1528 
1529 	if (mask->ct_zone)
1530 		mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
1531 					    key->ct_zone, MLX5_CT_ZONE_MASK);
1532 	if (ctstate_mask)
1533 		mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG,
1534 					    ctstate, ctstate_mask);
1535 	if (mask->ct_mark)
1536 		mlx5e_tc_match_to_reg_match(spec, MARK_TO_REG,
1537 					    key->ct_mark, mask->ct_mark);
1538 	if (mask->ct_labels[0] || mask->ct_labels[1] || mask->ct_labels[2] ||
1539 	    mask->ct_labels[3]) {
1540 		ct_labels[0] = key->ct_labels[0] & mask->ct_labels[0];
1541 		ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
1542 		ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
1543 		ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
1544 		if (mlx5_get_label_mapping(priv, ct_labels, &ct_attr->ct_labels_id))
1545 			return -EOPNOTSUPP;
1546 		mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
1547 					    MLX5_CT_LABELS_MASK);
1548 	}
1549 
1550 	return 0;
1551 }
1552 
1553 int
mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv * priv,struct mlx5_flow_attr * attr,const struct flow_action_entry * act,struct netlink_ext_ack * extack)1554 mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
1555 			struct mlx5_flow_attr *attr,
1556 			const struct flow_action_entry *act,
1557 			struct netlink_ext_ack *extack)
1558 {
1559 	if (!priv) {
1560 		NL_SET_ERR_MSG_MOD(extack,
1561 				   "offload of ct action isn't available");
1562 		return -EOPNOTSUPP;
1563 	}
1564 
1565 	attr->ct_attr.ct_action |= act->ct.action; /* So we can have clear + ct */
1566 	attr->ct_attr.zone = act->ct.zone;
1567 	if (!(act->ct.action & TCA_CT_ACT_CLEAR))
1568 		attr->ct_attr.nf_ft = act->ct.flow_table;
1569 	attr->ct_attr.act_miss_cookie = act->miss_cookie;
1570 
1571 	return 0;
1572 }
1573 
tc_ct_pre_ct_add_rules(struct mlx5_ct_ft * ct_ft,struct mlx5_tc_ct_pre * pre_ct,bool nat)1574 static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
1575 				  struct mlx5_tc_ct_pre *pre_ct,
1576 				  bool nat)
1577 {
1578 	struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
1579 	struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
1580 	struct mlx5_core_dev *dev = ct_priv->dev;
1581 	struct mlx5_flow_table *ft = pre_ct->ft;
1582 	struct mlx5_flow_destination dest = {};
1583 	struct mlx5_flow_act flow_act = {};
1584 	struct mlx5_modify_hdr *mod_hdr;
1585 	struct mlx5_flow_handle *rule;
1586 	struct mlx5_flow_spec *spec;
1587 	u32 ctstate;
1588 	u16 zone;
1589 	int err;
1590 
1591 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1592 	if (!spec)
1593 		return -ENOMEM;
1594 
1595 	zone = ct_ft->zone & MLX5_CT_ZONE_MASK;
1596 	err = mlx5e_tc_match_to_reg_set(dev, &pre_mod_acts, ct_priv->ns_type,
1597 					ZONE_TO_REG, zone);
1598 	if (err) {
1599 		ct_dbg("Failed to set zone register mapping");
1600 		goto err_mapping;
1601 	}
1602 
1603 	mod_hdr = mlx5_modify_header_alloc(dev, ct_priv->ns_type,
1604 					   pre_mod_acts.num_actions,
1605 					   pre_mod_acts.actions);
1606 
1607 	if (IS_ERR(mod_hdr)) {
1608 		err = PTR_ERR(mod_hdr);
1609 		ct_dbg("Failed to create pre ct mod hdr");
1610 		goto err_mapping;
1611 	}
1612 	pre_ct->modify_hdr = mod_hdr;
1613 
1614 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1615 			  MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1616 	flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1617 	flow_act.modify_hdr = mod_hdr;
1618 	dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1619 
1620 	/* add flow rule */
1621 	mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG,
1622 				    zone, MLX5_CT_ZONE_MASK);
1623 	ctstate = MLX5_CT_STATE_TRK_BIT;
1624 	if (nat)
1625 		ctstate |= MLX5_CT_STATE_NAT_BIT;
1626 	mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, ctstate, ctstate);
1627 
1628 	dest.ft = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
1629 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1630 	if (IS_ERR(rule)) {
1631 		err = PTR_ERR(rule);
1632 		ct_dbg("Failed to add pre ct flow rule zone %d", zone);
1633 		goto err_flow_rule;
1634 	}
1635 	pre_ct->flow_rule = rule;
1636 
1637 	/* add miss rule */
1638 	dest.ft = nat ? ct_priv->ct_nat : ct_priv->ct;
1639 	rule = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
1640 	if (IS_ERR(rule)) {
1641 		err = PTR_ERR(rule);
1642 		ct_dbg("Failed to add pre ct miss rule zone %d", zone);
1643 		goto err_miss_rule;
1644 	}
1645 	pre_ct->miss_rule = rule;
1646 
1647 	mlx5e_mod_hdr_dealloc(&pre_mod_acts);
1648 	kvfree(spec);
1649 	return 0;
1650 
1651 err_miss_rule:
1652 	mlx5_del_flow_rules(pre_ct->flow_rule);
1653 err_flow_rule:
1654 	mlx5_modify_header_dealloc(dev, pre_ct->modify_hdr);
1655 err_mapping:
1656 	mlx5e_mod_hdr_dealloc(&pre_mod_acts);
1657 	kvfree(spec);
1658 	return err;
1659 }
1660 
1661 static void
tc_ct_pre_ct_del_rules(struct mlx5_ct_ft * ct_ft,struct mlx5_tc_ct_pre * pre_ct)1662 tc_ct_pre_ct_del_rules(struct mlx5_ct_ft *ct_ft,
1663 		       struct mlx5_tc_ct_pre *pre_ct)
1664 {
1665 	struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
1666 	struct mlx5_core_dev *dev = ct_priv->dev;
1667 
1668 	mlx5_del_flow_rules(pre_ct->flow_rule);
1669 	mlx5_del_flow_rules(pre_ct->miss_rule);
1670 	mlx5_modify_header_dealloc(dev, pre_ct->modify_hdr);
1671 }
1672 
1673 static int
mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft * ct_ft,struct mlx5_tc_ct_pre * pre_ct,bool nat)1674 mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
1675 			struct mlx5_tc_ct_pre *pre_ct,
1676 			bool nat)
1677 {
1678 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1679 	struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
1680 	struct mlx5_core_dev *dev = ct_priv->dev;
1681 	struct mlx5_flow_table_attr ft_attr = {};
1682 	struct mlx5_flow_namespace *ns;
1683 	struct mlx5_flow_table *ft;
1684 	struct mlx5_flow_group *g;
1685 	u32 metadata_reg_c_2_mask;
1686 	u32 *flow_group_in;
1687 	void *misc;
1688 	int err;
1689 
1690 	ns = mlx5_get_flow_namespace(dev, ct_priv->ns_type);
1691 	if (!ns) {
1692 		err = -EOPNOTSUPP;
1693 		ct_dbg("Failed to get flow namespace");
1694 		return err;
1695 	}
1696 
1697 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1698 	if (!flow_group_in)
1699 		return -ENOMEM;
1700 
1701 	ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
1702 	ft_attr.prio =  ct_priv->ns_type ==  MLX5_FLOW_NAMESPACE_FDB ?
1703 			FDB_TC_OFFLOAD : MLX5E_TC_PRIO;
1704 	ft_attr.max_fte = 2;
1705 	ft_attr.level = 1;
1706 	ft = mlx5_create_flow_table(ns, &ft_attr);
1707 	if (IS_ERR(ft)) {
1708 		err = PTR_ERR(ft);
1709 		ct_dbg("Failed to create pre ct table");
1710 		goto out_free;
1711 	}
1712 	pre_ct->ft = ft;
1713 
1714 	/* create flow group */
1715 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1716 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
1717 	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1718 		 MLX5_MATCH_MISC_PARAMETERS_2);
1719 
1720 	misc = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1721 			    match_criteria.misc_parameters_2);
1722 
1723 	metadata_reg_c_2_mask = MLX5_CT_ZONE_MASK;
1724 	metadata_reg_c_2_mask |= (MLX5_CT_STATE_TRK_BIT << 16);
1725 	if (nat)
1726 		metadata_reg_c_2_mask |= (MLX5_CT_STATE_NAT_BIT << 16);
1727 
1728 	MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_2,
1729 		 metadata_reg_c_2_mask);
1730 
1731 	g = mlx5_create_flow_group(ft, flow_group_in);
1732 	if (IS_ERR(g)) {
1733 		err = PTR_ERR(g);
1734 		ct_dbg("Failed to create pre ct group");
1735 		goto err_flow_grp;
1736 	}
1737 	pre_ct->flow_grp = g;
1738 
1739 	/* create miss group */
1740 	memset(flow_group_in, 0, inlen);
1741 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
1742 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
1743 	g = mlx5_create_flow_group(ft, flow_group_in);
1744 	if (IS_ERR(g)) {
1745 		err = PTR_ERR(g);
1746 		ct_dbg("Failed to create pre ct miss group");
1747 		goto err_miss_grp;
1748 	}
1749 	pre_ct->miss_grp = g;
1750 
1751 	err = tc_ct_pre_ct_add_rules(ct_ft, pre_ct, nat);
1752 	if (err)
1753 		goto err_add_rules;
1754 
1755 	kvfree(flow_group_in);
1756 	return 0;
1757 
1758 err_add_rules:
1759 	mlx5_destroy_flow_group(pre_ct->miss_grp);
1760 err_miss_grp:
1761 	mlx5_destroy_flow_group(pre_ct->flow_grp);
1762 err_flow_grp:
1763 	mlx5_destroy_flow_table(ft);
1764 out_free:
1765 	kvfree(flow_group_in);
1766 	return err;
1767 }
1768 
1769 static void
mlx5_tc_ct_free_pre_ct(struct mlx5_ct_ft * ct_ft,struct mlx5_tc_ct_pre * pre_ct)1770 mlx5_tc_ct_free_pre_ct(struct mlx5_ct_ft *ct_ft,
1771 		       struct mlx5_tc_ct_pre *pre_ct)
1772 {
1773 	tc_ct_pre_ct_del_rules(ct_ft, pre_ct);
1774 	mlx5_destroy_flow_group(pre_ct->miss_grp);
1775 	mlx5_destroy_flow_group(pre_ct->flow_grp);
1776 	mlx5_destroy_flow_table(pre_ct->ft);
1777 }
1778 
1779 static int
mlx5_tc_ct_alloc_pre_ct_tables(struct mlx5_ct_ft * ft)1780 mlx5_tc_ct_alloc_pre_ct_tables(struct mlx5_ct_ft *ft)
1781 {
1782 	int err;
1783 
1784 	err = mlx5_tc_ct_alloc_pre_ct(ft, &ft->pre_ct, false);
1785 	if (err)
1786 		return err;
1787 
1788 	err = mlx5_tc_ct_alloc_pre_ct(ft, &ft->pre_ct_nat, true);
1789 	if (err)
1790 		goto err_pre_ct_nat;
1791 
1792 	return 0;
1793 
1794 err_pre_ct_nat:
1795 	mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct);
1796 	return err;
1797 }
1798 
1799 static void
mlx5_tc_ct_free_pre_ct_tables(struct mlx5_ct_ft * ft)1800 mlx5_tc_ct_free_pre_ct_tables(struct mlx5_ct_ft *ft)
1801 {
1802 	mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct_nat);
1803 	mlx5_tc_ct_free_pre_ct(ft, &ft->pre_ct);
1804 }
1805 
1806 /* To avoid false lock dependency warning set the ct_entries_ht lock
1807  * class different than the lock class of the ht being used when deleting
1808  * last flow from a group and then deleting a group, we get into del_sw_flow_group()
1809  * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
1810  * it's different than the ht->mutex here.
1811  */
1812 static struct lock_class_key ct_entries_ht_lock_key;
1813 
1814 static struct mlx5_ct_ft *
mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv * ct_priv,u16 zone,struct nf_flowtable * nf_ft)1815 mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
1816 		     struct nf_flowtable *nf_ft)
1817 {
1818 	struct mlx5_ct_ft *ft;
1819 	int err;
1820 
1821 	ft = rhashtable_lookup_fast(&ct_priv->zone_ht, &zone, zone_params);
1822 	if (ft) {
1823 		refcount_inc(&ft->refcount);
1824 		return ft;
1825 	}
1826 
1827 	ft = kzalloc(sizeof(*ft), GFP_KERNEL);
1828 	if (!ft)
1829 		return ERR_PTR(-ENOMEM);
1830 
1831 	err = mapping_add(ct_priv->zone_mapping, &zone, &ft->zone_restore_id);
1832 	if (err)
1833 		goto err_mapping;
1834 
1835 	ft->zone = zone;
1836 	ft->nf_ft = nf_ft;
1837 	ft->ct_priv = ct_priv;
1838 	refcount_set(&ft->refcount, 1);
1839 
1840 	err = mlx5_tc_ct_alloc_pre_ct_tables(ft);
1841 	if (err)
1842 		goto err_alloc_pre_ct;
1843 
1844 	err = rhashtable_init(&ft->ct_entries_ht, &cts_ht_params);
1845 	if (err)
1846 		goto err_init;
1847 
1848 	lockdep_set_class(&ft->ct_entries_ht.mutex, &ct_entries_ht_lock_key);
1849 
1850 	err = rhashtable_insert_fast(&ct_priv->zone_ht, &ft->node,
1851 				     zone_params);
1852 	if (err)
1853 		goto err_insert;
1854 
1855 	err = nf_flow_table_offload_add_cb(ft->nf_ft,
1856 					   mlx5_tc_ct_block_flow_offload, ft);
1857 	if (err)
1858 		goto err_add_cb;
1859 
1860 	return ft;
1861 
1862 err_add_cb:
1863 	rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
1864 err_insert:
1865 	rhashtable_destroy(&ft->ct_entries_ht);
1866 err_init:
1867 	mlx5_tc_ct_free_pre_ct_tables(ft);
1868 err_alloc_pre_ct:
1869 	mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
1870 err_mapping:
1871 	kfree(ft);
1872 	return ERR_PTR(err);
1873 }
1874 
1875 static void
mlx5_tc_ct_flush_ft_entry(void * ptr,void * arg)1876 mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
1877 {
1878 	struct mlx5_ct_entry *entry = ptr;
1879 
1880 	mlx5_tc_ct_entry_put(entry);
1881 }
1882 
1883 static void
mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_ct_ft * ft)1884 mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
1885 {
1886 	if (!refcount_dec_and_test(&ft->refcount))
1887 		return;
1888 
1889 	flush_workqueue(ct_priv->wq);
1890 	nf_flow_table_offload_del_cb(ft->nf_ft,
1891 				     mlx5_tc_ct_block_flow_offload, ft);
1892 	rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);
1893 	rhashtable_free_and_destroy(&ft->ct_entries_ht,
1894 				    mlx5_tc_ct_flush_ft_entry,
1895 				    ct_priv);
1896 	mlx5_tc_ct_free_pre_ct_tables(ft);
1897 	mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);
1898 	kfree(ft);
1899 }
1900 
1901 /* We translate the tc filter with CT action to the following HW model:
1902  *
1903  *	+-----------------------+
1904  *	+ rule (either original +
1905  *	+ or post_act rule)     +
1906  *	+-----------------------+
1907  *		 | set act_miss_cookie mapping
1908  *		 | set fte_id
1909  *		 | set tunnel_id
1910  *		 | rest of actions before the CT action (for this orig/post_act rule)
1911  *		 |
1912  * +-------------+
1913  * | Chain 0	 |
1914  * | optimization|
1915  * |		 v
1916  * |	+---------------------+
1917  * |	+ pre_ct/pre_ct_nat   +  if matches     +----------------------+
1918  * |	+ zone+nat match      +---------------->+ post_act (see below) +
1919  * |	+---------------------+  set zone       +----------------------+
1920  * |		 |
1921  * +-------------+ set zone
1922  *		 |
1923  *		 v
1924  *	+--------------------+
1925  *	+ CT (nat or no nat) +
1926  *	+ tuple + zone match +
1927  *	+--------------------+
1928  *		 | set mark
1929  *		 | set labels_id
1930  *		 | set established
1931  *		 | set zone_restore
1932  *		 | do nat (if needed)
1933  *		 v
1934  *	+--------------+
1935  *	+ post_act     + rest of parsed filter's actions
1936  *	+ fte_id match +------------------------>
1937  *	+--------------+
1938  *
1939  */
1940 static int
__mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_flow_attr * attr)1941 __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
1942 			  struct mlx5_flow_attr *attr)
1943 {
1944 	bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
1945 	struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
1946 	int act_miss_mapping = 0, err;
1947 	struct mlx5_ct_ft *ft;
1948 	u16 zone;
1949 
1950 	/* Register for CT established events */
1951 	ft = mlx5_tc_ct_add_ft_cb(ct_priv, attr->ct_attr.zone,
1952 				  attr->ct_attr.nf_ft);
1953 	if (IS_ERR(ft)) {
1954 		err = PTR_ERR(ft);
1955 		ct_dbg("Failed to register to ft callback");
1956 		goto err_ft;
1957 	}
1958 	attr->ct_attr.ft = ft;
1959 
1960 	err = mlx5e_tc_action_miss_mapping_get(ct_priv->priv, attr, attr->ct_attr.act_miss_cookie,
1961 					       &act_miss_mapping);
1962 	if (err) {
1963 		ct_dbg("Failed to get register mapping for act miss");
1964 		goto err_get_act_miss;
1965 	}
1966 
1967 	err = mlx5e_tc_match_to_reg_set(priv->mdev, &attr->parse_attr->mod_hdr_acts,
1968 					ct_priv->ns_type, MAPPED_OBJ_TO_REG, act_miss_mapping);
1969 	if (err) {
1970 		ct_dbg("Failed to set act miss register mapping");
1971 		goto err_mapping;
1972 	}
1973 
1974 	/* Chain 0 sets the zone and jumps to ct table
1975 	 * Other chains jump to pre_ct table to align with act_ct cached logic
1976 	 */
1977 	if (!attr->chain) {
1978 		zone = ft->zone & MLX5_CT_ZONE_MASK;
1979 		err = mlx5e_tc_match_to_reg_set(priv->mdev, &attr->parse_attr->mod_hdr_acts,
1980 						ct_priv->ns_type, ZONE_TO_REG, zone);
1981 		if (err) {
1982 			ct_dbg("Failed to set zone register mapping");
1983 			goto err_mapping;
1984 		}
1985 
1986 		attr->dest_ft = nat ? ct_priv->ct_nat : ct_priv->ct;
1987 	} else {
1988 		attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
1989 	}
1990 
1991 	attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1992 	attr->ct_attr.act_miss_mapping = act_miss_mapping;
1993 
1994 	return 0;
1995 
1996 err_mapping:
1997 	mlx5e_tc_action_miss_mapping_put(ct_priv->priv, attr, act_miss_mapping);
1998 err_get_act_miss:
1999 	mlx5_tc_ct_del_ft_cb(ct_priv, ft);
2000 err_ft:
2001 	netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err);
2002 	return err;
2003 }
2004 
2005 int
mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv * priv,struct mlx5_flow_attr * attr)2006 mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *attr)
2007 {
2008 	int err;
2009 
2010 	if (!priv)
2011 		return -EOPNOTSUPP;
2012 
2013 	if (attr->ct_attr.offloaded)
2014 		return 0;
2015 
2016 	if (attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR) {
2017 		err = mlx5_tc_ct_entry_set_registers(priv, &attr->parse_attr->mod_hdr_acts,
2018 						     0, 0, 0, 0);
2019 		if (err)
2020 			return err;
2021 
2022 		attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2023 	}
2024 
2025 	if (!attr->ct_attr.nf_ft) { /* means only ct clear action, and not ct_clear,ct() */
2026 		attr->ct_attr.offloaded = true;
2027 		return 0;
2028 	}
2029 
2030 	mutex_lock(&priv->control_lock);
2031 	err = __mlx5_tc_ct_flow_offload(priv, attr);
2032 	if (!err)
2033 		attr->ct_attr.offloaded = true;
2034 	mutex_unlock(&priv->control_lock);
2035 
2036 	return err;
2037 }
2038 
2039 static void
__mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv * ct_priv,struct mlx5_flow_attr * attr)2040 __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
2041 			 struct mlx5_flow_attr *attr)
2042 {
2043 	mlx5e_tc_action_miss_mapping_put(ct_priv->priv, attr, attr->ct_attr.act_miss_mapping);
2044 	mlx5_tc_ct_del_ft_cb(ct_priv, attr->ct_attr.ft);
2045 }
2046 
2047 void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv * priv,struct mlx5_flow_attr * attr)2048 mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
2049 		       struct mlx5_flow_attr *attr)
2050 {
2051 	if (!attr->ct_attr.offloaded) /* no ct action, return */
2052 		return;
2053 	if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */
2054 		return;
2055 
2056 	mutex_lock(&priv->control_lock);
2057 	__mlx5_tc_ct_delete_flow(priv, attr);
2058 	mutex_unlock(&priv->control_lock);
2059 }
2060 
2061 static int
mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv * ct_priv)2062 mlx5_tc_ct_fs_init(struct mlx5_tc_ct_priv *ct_priv)
2063 {
2064 	struct mlx5_flow_table *post_ct = mlx5e_tc_post_act_get_ft(ct_priv->post_act);
2065 	struct mlx5_ct_fs_ops *fs_ops = mlx5_ct_fs_dmfs_ops_get();
2066 	int err;
2067 
2068 	if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB &&
2069 	    ct_priv->dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) {
2070 		ct_dbg("Using SMFS ct flow steering provider");
2071 		fs_ops = mlx5_ct_fs_smfs_ops_get();
2072 	}
2073 
2074 	ct_priv->fs = kzalloc(sizeof(*ct_priv->fs) + fs_ops->priv_size, GFP_KERNEL);
2075 	if (!ct_priv->fs)
2076 		return -ENOMEM;
2077 
2078 	ct_priv->fs->netdev = ct_priv->netdev;
2079 	ct_priv->fs->dev = ct_priv->dev;
2080 	ct_priv->fs_ops = fs_ops;
2081 
2082 	err = ct_priv->fs_ops->init(ct_priv->fs, ct_priv->ct, ct_priv->ct_nat, post_ct);
2083 	if (err)
2084 		goto err_init;
2085 
2086 	return 0;
2087 
2088 err_init:
2089 	kfree(ct_priv->fs);
2090 	return err;
2091 }
2092 
2093 static int
mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch * esw,const char ** err_msg)2094 mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw,
2095 				  const char **err_msg)
2096 {
2097 	if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) {
2098 		/* vlan workaround should be avoided for multi chain rules.
2099 		 * This is just a sanity check as pop vlan action should
2100 		 * be supported by any FW that supports ignore_flow_level
2101 		 */
2102 
2103 		*err_msg = "firmware vlan actions support is missing";
2104 		return -EOPNOTSUPP;
2105 	}
2106 
2107 	if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev,
2108 				    fdb_modify_header_fwd_to_table)) {
2109 		/* CT always writes to registers which are mod header actions.
2110 		 * Therefore, mod header and goto is required
2111 		 */
2112 
2113 		*err_msg = "firmware fwd and modify support is missing";
2114 		return -EOPNOTSUPP;
2115 	}
2116 
2117 	if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
2118 		*err_msg = "register loopback isn't supported";
2119 		return -EOPNOTSUPP;
2120 	}
2121 
2122 	return 0;
2123 }
2124 
2125 static int
mlx5_tc_ct_init_check_support(struct mlx5e_priv * priv,enum mlx5_flow_namespace_type ns_type,struct mlx5e_post_act * post_act)2126 mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
2127 			      enum mlx5_flow_namespace_type ns_type,
2128 			      struct mlx5e_post_act *post_act)
2129 {
2130 	struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2131 	const char *err_msg = NULL;
2132 	int err = 0;
2133 
2134 	if (IS_ERR_OR_NULL(post_act)) {
2135 		/* Ignore_flow_level support isn't supported by default for VFs and so post_act
2136 		 * won't be supported. Skip showing error msg.
2137 		 */
2138 		if (priv->mdev->coredev_type == MLX5_COREDEV_PF)
2139 			err_msg = "post action is missing";
2140 		err = -EOPNOTSUPP;
2141 		goto out_err;
2142 	}
2143 
2144 	if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
2145 		err = mlx5_tc_ct_init_check_esw_support(esw, &err_msg);
2146 
2147 out_err:
2148 	if (err && err_msg)
2149 		netdev_dbg(priv->netdev, "tc ct offload not supported, %s\n", err_msg);
2150 	return err;
2151 }
2152 
2153 static void
mlx5_ct_tc_create_dbgfs(struct mlx5_tc_ct_priv * ct_priv)2154 mlx5_ct_tc_create_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
2155 {
2156 	struct mlx5_tc_ct_debugfs *ct_dbgfs = &ct_priv->debugfs;
2157 
2158 	ct_dbgfs->root = debugfs_create_dir("ct", mlx5_debugfs_get_dev_root(ct_priv->dev));
2159 	debugfs_create_atomic_t("offloaded", 0400, ct_dbgfs->root,
2160 				&ct_dbgfs->stats.offloaded);
2161 	debugfs_create_atomic_t("rx_dropped", 0400, ct_dbgfs->root,
2162 				&ct_dbgfs->stats.rx_dropped);
2163 }
2164 
2165 static void
mlx5_ct_tc_remove_dbgfs(struct mlx5_tc_ct_priv * ct_priv)2166 mlx5_ct_tc_remove_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
2167 {
2168 	debugfs_remove_recursive(ct_priv->debugfs.root);
2169 }
2170 
2171 static struct mlx5_flow_handle *
tc_ct_add_miss_rule(struct mlx5_flow_table * ft,struct mlx5_flow_table * next_ft)2172 tc_ct_add_miss_rule(struct mlx5_flow_table *ft,
2173 		    struct mlx5_flow_table *next_ft)
2174 {
2175 	struct mlx5_flow_destination dest = {};
2176 	struct mlx5_flow_act act = {};
2177 
2178 	act.flags  = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
2179 	act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2180 	dest.type  = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2181 	dest.ft = next_ft;
2182 
2183 	return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
2184 }
2185 
2186 static int
tc_ct_add_ct_table_miss_rule(struct mlx5_flow_table * from,struct mlx5_flow_table * to,struct mlx5_flow_group ** miss_group,struct mlx5_flow_handle ** miss_rule)2187 tc_ct_add_ct_table_miss_rule(struct mlx5_flow_table *from,
2188 			     struct mlx5_flow_table *to,
2189 			     struct mlx5_flow_group **miss_group,
2190 			     struct mlx5_flow_handle **miss_rule)
2191 {
2192 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2193 	struct mlx5_flow_group *group;
2194 	struct mlx5_flow_handle *rule;
2195 	unsigned int max_fte = from->max_fte;
2196 	u32 *flow_group_in;
2197 	int err = 0;
2198 
2199 	flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2200 	if (!flow_group_in)
2201 		return -ENOMEM;
2202 
2203 	/* create miss group */
2204 	MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
2205 		 max_fte - 2);
2206 	MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
2207 		 max_fte - 1);
2208 	group = mlx5_create_flow_group(from, flow_group_in);
2209 	if (IS_ERR(group)) {
2210 		err = PTR_ERR(group);
2211 		goto err_miss_grp;
2212 	}
2213 
2214 	/* add miss rule to next fdb */
2215 	rule = tc_ct_add_miss_rule(from, to);
2216 	if (IS_ERR(rule)) {
2217 		err = PTR_ERR(rule);
2218 		goto err_miss_rule;
2219 	}
2220 
2221 	*miss_group = group;
2222 	*miss_rule = rule;
2223 	kvfree(flow_group_in);
2224 	return 0;
2225 
2226 err_miss_rule:
2227 	mlx5_destroy_flow_group(group);
2228 err_miss_grp:
2229 	kvfree(flow_group_in);
2230 	return err;
2231 }
2232 
2233 static void
tc_ct_del_ct_table_miss_rule(struct mlx5_flow_group * miss_group,struct mlx5_flow_handle * miss_rule)2234 tc_ct_del_ct_table_miss_rule(struct mlx5_flow_group *miss_group,
2235 			     struct mlx5_flow_handle *miss_rule)
2236 {
2237 	mlx5_del_flow_rules(miss_rule);
2238 	mlx5_destroy_flow_group(miss_group);
2239 }
2240 
2241 #define INIT_ERR_PREFIX "tc ct offload init failed"
2242 
2243 struct mlx5_tc_ct_priv *
mlx5_tc_ct_init(struct mlx5e_priv * priv,struct mlx5_fs_chains * chains,struct mod_hdr_tbl * mod_hdr,enum mlx5_flow_namespace_type ns_type,struct mlx5e_post_act * post_act)2244 mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
2245 		struct mod_hdr_tbl *mod_hdr,
2246 		enum mlx5_flow_namespace_type ns_type,
2247 		struct mlx5e_post_act *post_act)
2248 {
2249 	struct mlx5_tc_ct_priv *ct_priv;
2250 	struct mlx5_core_dev *dev;
2251 	u64 mapping_id;
2252 	int err;
2253 
2254 	dev = priv->mdev;
2255 	err = mlx5_tc_ct_init_check_support(priv, ns_type, post_act);
2256 	if (err)
2257 		goto err_support;
2258 
2259 	ct_priv = kzalloc(sizeof(*ct_priv), GFP_KERNEL);
2260 	if (!ct_priv)
2261 		goto err_alloc;
2262 
2263 	mapping_id = mlx5_query_nic_system_image_guid(dev);
2264 
2265 	ct_priv->zone_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_ZONE,
2266 						      sizeof(u16), 0, true);
2267 	if (IS_ERR(ct_priv->zone_mapping)) {
2268 		err = PTR_ERR(ct_priv->zone_mapping);
2269 		goto err_mapping_zone;
2270 	}
2271 
2272 	ct_priv->labels_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_LABELS,
2273 							sizeof(u32) * 4, 0, true);
2274 	if (IS_ERR(ct_priv->labels_mapping)) {
2275 		err = PTR_ERR(ct_priv->labels_mapping);
2276 		goto err_mapping_labels;
2277 	}
2278 
2279 	spin_lock_init(&ct_priv->ht_lock);
2280 	ct_priv->priv = priv;
2281 	ct_priv->ns_type = ns_type;
2282 	ct_priv->chains = chains;
2283 	ct_priv->netdev = priv->netdev;
2284 	ct_priv->dev = priv->mdev;
2285 	ct_priv->mod_hdr_tbl = mod_hdr;
2286 	ct_priv->ct = mlx5_chains_create_global_table(chains);
2287 	if (IS_ERR(ct_priv->ct)) {
2288 		err = PTR_ERR(ct_priv->ct);
2289 		mlx5_core_warn(dev,
2290 			       "%s, failed to create ct table err: %d\n",
2291 			       INIT_ERR_PREFIX, err);
2292 		goto err_ct_tbl;
2293 	}
2294 
2295 	ct_priv->ct_nat = mlx5_chains_create_global_table(chains);
2296 	if (IS_ERR(ct_priv->ct_nat)) {
2297 		err = PTR_ERR(ct_priv->ct_nat);
2298 		mlx5_core_warn(dev,
2299 			       "%s, failed to create ct nat table err: %d\n",
2300 			       INIT_ERR_PREFIX, err);
2301 		goto err_ct_nat_tbl;
2302 	}
2303 
2304 	err = tc_ct_add_ct_table_miss_rule(ct_priv->ct_nat, ct_priv->ct,
2305 					   &ct_priv->ct_nat_miss_group,
2306 					   &ct_priv->ct_nat_miss_rule);
2307 	if (err)
2308 		goto err_ct_zone_ht;
2309 
2310 	ct_priv->post_act = post_act;
2311 	mutex_init(&ct_priv->control_lock);
2312 	if (rhashtable_init(&ct_priv->zone_ht, &zone_params))
2313 		goto err_ct_zone_ht;
2314 	if (rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params))
2315 		goto err_ct_tuples_ht;
2316 	if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
2317 		goto err_ct_tuples_nat_ht;
2318 
2319 	ct_priv->wq = alloc_ordered_workqueue("mlx5e_ct_priv_wq", 0);
2320 	if (!ct_priv->wq) {
2321 		err = -ENOMEM;
2322 		goto err_wq;
2323 	}
2324 
2325 	err = mlx5_tc_ct_fs_init(ct_priv);
2326 	if (err)
2327 		goto err_init_fs;
2328 
2329 	mlx5_ct_tc_create_dbgfs(ct_priv);
2330 	return ct_priv;
2331 
2332 err_init_fs:
2333 	destroy_workqueue(ct_priv->wq);
2334 err_wq:
2335 	rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
2336 err_ct_tuples_nat_ht:
2337 	rhashtable_destroy(&ct_priv->ct_tuples_ht);
2338 err_ct_tuples_ht:
2339 	rhashtable_destroy(&ct_priv->zone_ht);
2340 err_ct_zone_ht:
2341 	mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
2342 err_ct_nat_tbl:
2343 	mlx5_chains_destroy_global_table(chains, ct_priv->ct);
2344 err_ct_tbl:
2345 	mapping_destroy(ct_priv->labels_mapping);
2346 err_mapping_labels:
2347 	mapping_destroy(ct_priv->zone_mapping);
2348 err_mapping_zone:
2349 	kfree(ct_priv);
2350 err_alloc:
2351 err_support:
2352 
2353 	return NULL;
2354 }
2355 
2356 void
mlx5_tc_ct_clean(struct mlx5_tc_ct_priv * ct_priv)2357 mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
2358 {
2359 	struct mlx5_fs_chains *chains;
2360 
2361 	if (!ct_priv)
2362 		return;
2363 
2364 	destroy_workqueue(ct_priv->wq);
2365 	mlx5_ct_tc_remove_dbgfs(ct_priv);
2366 	chains = ct_priv->chains;
2367 
2368 	ct_priv->fs_ops->destroy(ct_priv->fs);
2369 	kfree(ct_priv->fs);
2370 
2371 	tc_ct_del_ct_table_miss_rule(ct_priv->ct_nat_miss_group, ct_priv->ct_nat_miss_rule);
2372 	mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
2373 	mlx5_chains_destroy_global_table(chains, ct_priv->ct);
2374 	mapping_destroy(ct_priv->zone_mapping);
2375 	mapping_destroy(ct_priv->labels_mapping);
2376 
2377 	rhashtable_destroy(&ct_priv->ct_tuples_ht);
2378 	rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
2379 	rhashtable_destroy(&ct_priv->zone_ht);
2380 	mutex_destroy(&ct_priv->control_lock);
2381 	kfree(ct_priv);
2382 }
2383 
2384 bool
mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv * ct_priv,struct sk_buff * skb,u8 zone_restore_id)2385 mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
2386 			 struct sk_buff *skb, u8 zone_restore_id)
2387 {
2388 	struct mlx5_ct_tuple tuple = {};
2389 	struct mlx5_ct_entry *entry;
2390 	u16 zone;
2391 
2392 	if (!ct_priv || !zone_restore_id)
2393 		return true;
2394 
2395 	if (mapping_find(ct_priv->zone_mapping, zone_restore_id, &zone))
2396 		goto out_inc_drop;
2397 
2398 	if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone))
2399 		goto out_inc_drop;
2400 
2401 	spin_lock(&ct_priv->ht_lock);
2402 
2403 	entry = mlx5_tc_ct_entry_get(ct_priv, &tuple);
2404 	if (!entry) {
2405 		spin_unlock(&ct_priv->ht_lock);
2406 		goto out_inc_drop;
2407 	}
2408 
2409 	if (IS_ERR(entry)) {
2410 		spin_unlock(&ct_priv->ht_lock);
2411 		goto out_inc_drop;
2412 	}
2413 	spin_unlock(&ct_priv->ht_lock);
2414 
2415 	tcf_ct_flow_table_restore_skb(skb, entry->restore_cookie);
2416 	__mlx5_tc_ct_entry_put(entry);
2417 
2418 	return true;
2419 
2420 out_inc_drop:
2421 	atomic_inc(&ct_priv->debugfs.stats.rx_dropped);
2422 	return false;
2423 }
2424