xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c (revision e72e9e6933071fbbb3076811d3a0cc20e8720a5b)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
3 
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/tcp.h>
7 #include <linux/mlx5/fs.h>
8 #include <linux/mlx5/driver.h>
9 #include "mlx5_core.h"
10 #include "lib/fs_ttc.h"
11 
12 #define MLX5_TTC_MAX_NUM_GROUPS		4
13 #define MLX5_TTC_GROUP_TCPUDP_SIZE	(MLX5_TT_IPV6_UDP + 1)
14 
15 struct mlx5_fs_ttc_groups {
16 	bool use_l4_type;
17 	int num_groups;
18 	int group_size[MLX5_TTC_MAX_NUM_GROUPS];
19 };
20 
mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups * groups)21 static int mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups *groups)
22 {
23 	int i, sz = 0;
24 
25 	for (i = 0; i < groups->num_groups; i++)
26 		sz += groups->group_size[i];
27 
28 	return sz;
29 }
30 
31 /* L3/L4 traffic type classifier */
32 struct mlx5_ttc_table {
33 	int num_groups;
34 	struct mlx5_flow_table *t;
35 	struct mlx5_flow_group **g;
36 	struct mlx5_ttc_rule rules[MLX5_NUM_TT];
37 	struct mlx5_flow_handle *tunnel_rules[MLX5_NUM_TUNNEL_TT];
38 };
39 
mlx5_get_ttc_flow_table(struct mlx5_ttc_table * ttc)40 struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc)
41 {
42 	return ttc->t;
43 }
44 
mlx5_cleanup_ttc_rules(struct mlx5_ttc_table * ttc)45 static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc)
46 {
47 	int i;
48 
49 	for (i = 0; i < MLX5_NUM_TT; i++) {
50 		if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) {
51 			mlx5_del_flow_rules(ttc->rules[i].rule);
52 			ttc->rules[i].rule = NULL;
53 		}
54 	}
55 
56 	for (i = 0; i < MLX5_NUM_TUNNEL_TT; i++) {
57 		if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
58 			mlx5_del_flow_rules(ttc->tunnel_rules[i]);
59 			ttc->tunnel_rules[i] = NULL;
60 		}
61 	}
62 }
63 
64 static const char *mlx5_traffic_types_names[MLX5_NUM_TT] = {
65 	[MLX5_TT_IPV4_TCP] =  "TT_IPV4_TCP",
66 	[MLX5_TT_IPV6_TCP] =  "TT_IPV6_TCP",
67 	[MLX5_TT_IPV4_UDP] =  "TT_IPV4_UDP",
68 	[MLX5_TT_IPV6_UDP] =  "TT_IPV6_UDP",
69 	[MLX5_TT_IPV4_IPSEC_AH] = "TT_IPV4_IPSEC_AH",
70 	[MLX5_TT_IPV6_IPSEC_AH] = "TT_IPV6_IPSEC_AH",
71 	[MLX5_TT_IPV4_IPSEC_ESP] = "TT_IPV4_IPSEC_ESP",
72 	[MLX5_TT_IPV6_IPSEC_ESP] = "TT_IPV6_IPSEC_ESP",
73 	[MLX5_TT_IPV4] = "TT_IPV4",
74 	[MLX5_TT_IPV6] = "TT_IPV6",
75 	[MLX5_TT_ANY] = "TT_ANY"
76 };
77 
mlx5_ttc_get_name(enum mlx5_traffic_types tt)78 const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt)
79 {
80 	return mlx5_traffic_types_names[tt];
81 }
82 
83 struct mlx5_etype_proto {
84 	u16 etype;
85 	u8 proto;
86 };
87 
88 static struct mlx5_etype_proto ttc_rules[] = {
89 	[MLX5_TT_IPV4_TCP] = {
90 		.etype = ETH_P_IP,
91 		.proto = IPPROTO_TCP,
92 	},
93 	[MLX5_TT_IPV6_TCP] = {
94 		.etype = ETH_P_IPV6,
95 		.proto = IPPROTO_TCP,
96 	},
97 	[MLX5_TT_IPV4_UDP] = {
98 		.etype = ETH_P_IP,
99 		.proto = IPPROTO_UDP,
100 	},
101 	[MLX5_TT_IPV6_UDP] = {
102 		.etype = ETH_P_IPV6,
103 		.proto = IPPROTO_UDP,
104 	},
105 	[MLX5_TT_IPV4_IPSEC_AH] = {
106 		.etype = ETH_P_IP,
107 		.proto = IPPROTO_AH,
108 	},
109 	[MLX5_TT_IPV6_IPSEC_AH] = {
110 		.etype = ETH_P_IPV6,
111 		.proto = IPPROTO_AH,
112 	},
113 	[MLX5_TT_IPV4_IPSEC_ESP] = {
114 		.etype = ETH_P_IP,
115 		.proto = IPPROTO_ESP,
116 	},
117 	[MLX5_TT_IPV6_IPSEC_ESP] = {
118 		.etype = ETH_P_IPV6,
119 		.proto = IPPROTO_ESP,
120 	},
121 	[MLX5_TT_IPV4] = {
122 		.etype = ETH_P_IP,
123 		.proto = 0,
124 	},
125 	[MLX5_TT_IPV6] = {
126 		.etype = ETH_P_IPV6,
127 		.proto = 0,
128 	},
129 	[MLX5_TT_ANY] = {
130 		.etype = 0,
131 		.proto = 0,
132 	},
133 };
134 
135 static struct mlx5_etype_proto ttc_tunnel_rules[] = {
136 	[MLX5_TT_IPV4_GRE] = {
137 		.etype = ETH_P_IP,
138 		.proto = IPPROTO_GRE,
139 	},
140 	[MLX5_TT_IPV6_GRE] = {
141 		.etype = ETH_P_IPV6,
142 		.proto = IPPROTO_GRE,
143 	},
144 	[MLX5_TT_IPV4_IPIP] = {
145 		.etype = ETH_P_IP,
146 		.proto = IPPROTO_IPIP,
147 	},
148 	[MLX5_TT_IPV6_IPIP] = {
149 		.etype = ETH_P_IPV6,
150 		.proto = IPPROTO_IPIP,
151 	},
152 	[MLX5_TT_IPV4_IPV6] = {
153 		.etype = ETH_P_IP,
154 		.proto = IPPROTO_IPV6,
155 	},
156 	[MLX5_TT_IPV6_IPV6] = {
157 		.etype = ETH_P_IPV6,
158 		.proto = IPPROTO_IPV6,
159 	},
160 
161 };
162 
163 enum TTC_GROUP_TYPE {
164 	TTC_GROUPS_DEFAULT = 0,
165 	TTC_GROUPS_USE_L4_TYPE = 1,
166 };
167 
168 static const struct mlx5_fs_ttc_groups ttc_groups[] = {
169 	[TTC_GROUPS_DEFAULT] = {
170 		.num_groups = 3,
171 		.group_size = {
172 			BIT(3) + MLX5_NUM_TUNNEL_TT,
173 			BIT(1),
174 			BIT(0),
175 		},
176 	},
177 	[TTC_GROUPS_USE_L4_TYPE] = {
178 		.use_l4_type = true,
179 		.num_groups = 4,
180 		.group_size = {
181 			MLX5_TTC_GROUP_TCPUDP_SIZE,
182 			BIT(3) + MLX5_NUM_TUNNEL_TT - MLX5_TTC_GROUP_TCPUDP_SIZE,
183 			BIT(1),
184 			BIT(0),
185 		},
186 	},
187 };
188 
189 static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = {
190 	[TTC_GROUPS_DEFAULT] = {
191 		.num_groups = 3,
192 		.group_size = {
193 			BIT(3),
194 			BIT(1),
195 			BIT(0),
196 		},
197 	},
198 	[TTC_GROUPS_USE_L4_TYPE] = {
199 		.use_l4_type = true,
200 		.num_groups = 4,
201 		.group_size = {
202 			MLX5_TTC_GROUP_TCPUDP_SIZE,
203 			BIT(3) - MLX5_TTC_GROUP_TCPUDP_SIZE,
204 			BIT(1),
205 			BIT(0),
206 		},
207 	},
208 };
209 
mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)210 u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)
211 {
212 	return ttc_tunnel_rules[tt].proto;
213 }
214 
mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev * mdev,u8 proto_type)215 static bool mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev,
216 					   u8 proto_type)
217 {
218 	switch (proto_type) {
219 	case IPPROTO_GRE:
220 		return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
221 	case IPPROTO_IPIP:
222 	case IPPROTO_IPV6:
223 		return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
224 			MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx));
225 	default:
226 		return false;
227 	}
228 }
229 
mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev * mdev)230 static bool mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev)
231 {
232 	int tt;
233 
234 	for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
235 		if (mlx5_tunnel_proto_supported_rx(mdev,
236 						   ttc_tunnel_rules[tt].proto))
237 			return true;
238 	}
239 	return false;
240 }
241 
mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev * mdev)242 bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
243 {
244 	return (mlx5_tunnel_any_rx_proto_supported(mdev) &&
245 		MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
246 					  ft_field_support.inner_ip_version));
247 }
248 
mlx5_etype_to_ipv(u16 ethertype)249 static u8 mlx5_etype_to_ipv(u16 ethertype)
250 {
251 	if (ethertype == ETH_P_IP)
252 		return 4;
253 
254 	if (ethertype == ETH_P_IPV6)
255 		return 6;
256 
257 	return 0;
258 }
259 
mlx5_fs_ttc_set_match_proto(void * headers_c,void * headers_v,u8 proto,bool use_l4_type)260 static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v,
261 					u8 proto, bool use_l4_type)
262 {
263 	int l4_type;
264 
265 	if (use_l4_type && (proto == IPPROTO_TCP || proto == IPPROTO_UDP)) {
266 		if (proto == IPPROTO_TCP)
267 			l4_type = MLX5_PACKET_L4_TYPE_TCP;
268 		else
269 			l4_type = MLX5_PACKET_L4_TYPE_UDP;
270 
271 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, l4_type);
272 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_type, l4_type);
273 	} else {
274 		MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
275 		MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, proto);
276 	}
277 }
278 
279 static struct mlx5_flow_handle *
mlx5_generate_ttc_rule(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto,bool use_l4_type)280 mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
281 		       struct mlx5_flow_destination *dest, u16 etype, u8 proto,
282 		       bool use_l4_type)
283 {
284 	int match_ipv_outer =
285 		MLX5_CAP_FLOWTABLE_NIC_RX(dev,
286 					  ft_field_support.outer_ip_version);
287 	MLX5_DECLARE_FLOW_ACT(flow_act);
288 	struct mlx5_flow_handle *rule;
289 	struct mlx5_flow_spec *spec;
290 	int err = 0;
291 	u8 ipv;
292 
293 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
294 	if (!spec)
295 		return ERR_PTR(-ENOMEM);
296 
297 	if (proto) {
298 		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
299 		mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
300 							 spec->match_criteria,
301 							 outer_headers),
302 					    MLX5_ADDR_OF(fte_match_param,
303 							 spec->match_value,
304 							 outer_headers),
305 					    proto, use_l4_type);
306 	}
307 
308 	ipv = mlx5_etype_to_ipv(etype);
309 	if (match_ipv_outer && ipv) {
310 		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
311 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
312 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
313 	} else if (etype) {
314 		spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
315 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
316 		MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
317 	}
318 
319 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
320 	if (IS_ERR(rule)) {
321 		err = PTR_ERR(rule);
322 		mlx5_core_err(dev, "%s: add rule failed\n", __func__);
323 	}
324 
325 	kvfree(spec);
326 	return err ? ERR_PTR(err) : rule;
327 }
328 
mlx5_generate_ttc_table_rules(struct mlx5_core_dev * dev,struct ttc_params * params,struct mlx5_ttc_table * ttc,bool use_l4_type)329 static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
330 					 struct ttc_params *params,
331 					 struct mlx5_ttc_table *ttc,
332 					 bool use_l4_type)
333 {
334 	struct mlx5_flow_handle **trules;
335 	struct mlx5_ttc_rule *rules;
336 	struct mlx5_flow_table *ft;
337 	int tt;
338 	int err;
339 
340 	ft = ttc->t;
341 	rules = ttc->rules;
342 	for (tt = 0; tt < MLX5_NUM_TT; tt++) {
343 		struct mlx5_ttc_rule *rule = &rules[tt];
344 
345 		if (test_bit(tt, params->ignore_dests))
346 			continue;
347 		rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt],
348 						    ttc_rules[tt].etype,
349 						    ttc_rules[tt].proto,
350 						    use_l4_type);
351 		if (IS_ERR(rule->rule)) {
352 			err = PTR_ERR(rule->rule);
353 			rule->rule = NULL;
354 			goto del_rules;
355 		}
356 		rule->default_dest = params->dests[tt];
357 	}
358 
359 	if (!params->inner_ttc || !mlx5_tunnel_inner_ft_supported(dev))
360 		return 0;
361 
362 	trules    = ttc->tunnel_rules;
363 	for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
364 		if (!mlx5_tunnel_proto_supported_rx(dev,
365 						    ttc_tunnel_rules[tt].proto))
366 			continue;
367 		if (test_bit(tt, params->ignore_tunnel_dests))
368 			continue;
369 		trules[tt] = mlx5_generate_ttc_rule(dev, ft,
370 						    &params->tunnel_dests[tt],
371 						    ttc_tunnel_rules[tt].etype,
372 						    ttc_tunnel_rules[tt].proto,
373 						    use_l4_type);
374 		if (IS_ERR(trules[tt])) {
375 			err = PTR_ERR(trules[tt]);
376 			trules[tt] = NULL;
377 			goto del_rules;
378 		}
379 	}
380 
381 	return 0;
382 
383 del_rules:
384 	mlx5_cleanup_ttc_rules(ttc);
385 	return err;
386 }
387 
mlx5_create_ttc_table_groups(struct mlx5_ttc_table * ttc,bool use_ipv,const struct mlx5_fs_ttc_groups * groups)388 static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
389 					bool use_ipv,
390 					const struct mlx5_fs_ttc_groups *groups)
391 {
392 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
393 	int ix = 0;
394 	u32 *in;
395 	int err;
396 	u8 *mc;
397 
398 	ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL);
399 	if (!ttc->g)
400 		return -ENOMEM;
401 	in = kvzalloc(inlen, GFP_KERNEL);
402 	if (!in) {
403 		kfree(ttc->g);
404 		ttc->g = NULL;
405 		return -ENOMEM;
406 	}
407 
408 	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
409 	if (use_ipv)
410 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
411 	else
412 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
413 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
414 
415 	/* TCP UDP group */
416 	if (groups->use_l4_type) {
417 		MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.l4_type);
418 		MLX5_SET_CFG(in, start_flow_index, ix);
419 		ix += groups->group_size[ttc->num_groups];
420 		MLX5_SET_CFG(in, end_flow_index, ix - 1);
421 		ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
422 		if (IS_ERR(ttc->g[ttc->num_groups]))
423 			goto err;
424 		ttc->num_groups++;
425 
426 		MLX5_SET(fte_match_param, mc, outer_headers.l4_type, 0);
427 	}
428 
429 	/* L4 Group */
430 	MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
431 	MLX5_SET_CFG(in, start_flow_index, ix);
432 	ix += groups->group_size[ttc->num_groups];
433 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
434 	ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
435 	if (IS_ERR(ttc->g[ttc->num_groups]))
436 		goto err;
437 	ttc->num_groups++;
438 
439 	/* L3 Group */
440 	MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
441 	MLX5_SET_CFG(in, start_flow_index, ix);
442 	ix += groups->group_size[ttc->num_groups];
443 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
444 	ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
445 	if (IS_ERR(ttc->g[ttc->num_groups]))
446 		goto err;
447 	ttc->num_groups++;
448 
449 	/* Any Group */
450 	memset(in, 0, inlen);
451 	MLX5_SET_CFG(in, start_flow_index, ix);
452 	ix += groups->group_size[ttc->num_groups];
453 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
454 	ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
455 	if (IS_ERR(ttc->g[ttc->num_groups]))
456 		goto err;
457 	ttc->num_groups++;
458 
459 	kvfree(in);
460 	return 0;
461 
462 err:
463 	err = PTR_ERR(ttc->g[ttc->num_groups]);
464 	ttc->g[ttc->num_groups] = NULL;
465 	kvfree(in);
466 
467 	return err;
468 }
469 
470 static struct mlx5_flow_handle *
mlx5_generate_inner_ttc_rule(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto,bool use_l4_type)471 mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
472 			     struct mlx5_flow_table *ft,
473 			     struct mlx5_flow_destination *dest,
474 			     u16 etype, u8 proto, bool use_l4_type)
475 {
476 	MLX5_DECLARE_FLOW_ACT(flow_act);
477 	struct mlx5_flow_handle *rule;
478 	struct mlx5_flow_spec *spec;
479 	int err = 0;
480 	u8 ipv;
481 
482 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
483 	if (!spec)
484 		return ERR_PTR(-ENOMEM);
485 
486 	ipv = mlx5_etype_to_ipv(etype);
487 	if (etype && ipv) {
488 		spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
489 		MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
490 		MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
491 	}
492 
493 	if (proto) {
494 		spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
495 		mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
496 							 spec->match_criteria,
497 							 inner_headers),
498 					    MLX5_ADDR_OF(fte_match_param,
499 							 spec->match_value,
500 							 inner_headers),
501 					    proto, use_l4_type);
502 	}
503 
504 	rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
505 	if (IS_ERR(rule)) {
506 		err = PTR_ERR(rule);
507 		mlx5_core_err(dev, "%s: add inner TTC rule failed\n", __func__);
508 	}
509 
510 	kvfree(spec);
511 	return err ? ERR_PTR(err) : rule;
512 }
513 
mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev * dev,struct ttc_params * params,struct mlx5_ttc_table * ttc,bool use_l4_type)514 static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
515 					       struct ttc_params *params,
516 					       struct mlx5_ttc_table *ttc,
517 					       bool use_l4_type)
518 {
519 	struct mlx5_ttc_rule *rules;
520 	struct mlx5_flow_table *ft;
521 	int err;
522 	int tt;
523 
524 	ft = ttc->t;
525 	rules = ttc->rules;
526 
527 	for (tt = 0; tt < MLX5_NUM_TT; tt++) {
528 		struct mlx5_ttc_rule *rule = &rules[tt];
529 
530 		if (test_bit(tt, params->ignore_dests))
531 			continue;
532 		rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
533 							  &params->dests[tt],
534 							  ttc_rules[tt].etype,
535 							  ttc_rules[tt].proto,
536 							  use_l4_type);
537 		if (IS_ERR(rule->rule)) {
538 			err = PTR_ERR(rule->rule);
539 			rule->rule = NULL;
540 			goto del_rules;
541 		}
542 		rule->default_dest = params->dests[tt];
543 	}
544 
545 	return 0;
546 
547 del_rules:
548 
549 	mlx5_cleanup_ttc_rules(ttc);
550 	return err;
551 }
552 
mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table * ttc,const struct mlx5_fs_ttc_groups * groups)553 static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc,
554 					      const struct mlx5_fs_ttc_groups *groups)
555 {
556 	int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
557 	int ix = 0;
558 	u32 *in;
559 	int err;
560 	u8 *mc;
561 
562 	ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL);
563 	if (!ttc->g)
564 		return -ENOMEM;
565 	in = kvzalloc(inlen, GFP_KERNEL);
566 	if (!in) {
567 		kfree(ttc->g);
568 		ttc->g = NULL;
569 		return -ENOMEM;
570 	}
571 
572 	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
573 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
574 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
575 
576 	/* TCP UDP group */
577 	if (groups->use_l4_type) {
578 		MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.l4_type);
579 		MLX5_SET_CFG(in, start_flow_index, ix);
580 		ix += groups->group_size[ttc->num_groups];
581 		MLX5_SET_CFG(in, end_flow_index, ix - 1);
582 		ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
583 		if (IS_ERR(ttc->g[ttc->num_groups]))
584 			goto err;
585 		ttc->num_groups++;
586 
587 		MLX5_SET(fte_match_param, mc, inner_headers.l4_type, 0);
588 	}
589 
590 	/* L4 Group */
591 	MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
592 	MLX5_SET_CFG(in, start_flow_index, ix);
593 	ix += groups->group_size[ttc->num_groups];
594 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
595 	ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
596 	if (IS_ERR(ttc->g[ttc->num_groups]))
597 		goto err;
598 	ttc->num_groups++;
599 
600 	/* L3 Group */
601 	MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
602 	MLX5_SET_CFG(in, start_flow_index, ix);
603 	ix += groups->group_size[ttc->num_groups];
604 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
605 	ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
606 	if (IS_ERR(ttc->g[ttc->num_groups]))
607 		goto err;
608 	ttc->num_groups++;
609 
610 	/* Any Group */
611 	memset(in, 0, inlen);
612 	MLX5_SET_CFG(in, start_flow_index, ix);
613 	ix += groups->group_size[ttc->num_groups];
614 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
615 	ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
616 	if (IS_ERR(ttc->g[ttc->num_groups]))
617 		goto err;
618 	ttc->num_groups++;
619 
620 	kvfree(in);
621 	return 0;
622 
623 err:
624 	err = PTR_ERR(ttc->g[ttc->num_groups]);
625 	ttc->g[ttc->num_groups] = NULL;
626 	kvfree(in);
627 
628 	return err;
629 }
630 
mlx5_create_inner_ttc_table(struct mlx5_core_dev * dev,struct ttc_params * params)631 struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
632 						   struct ttc_params *params)
633 {
634 	const struct mlx5_fs_ttc_groups *groups;
635 	struct mlx5_flow_namespace *ns;
636 	struct mlx5_ttc_table *ttc;
637 	bool use_l4_type;
638 	int err;
639 
640 	switch (params->ns_type) {
641 	case MLX5_FLOW_NAMESPACE_PORT_SEL:
642 		use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
643 			MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
644 		break;
645 	case MLX5_FLOW_NAMESPACE_KERNEL:
646 		use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
647 			MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
648 		break;
649 	default:
650 		return ERR_PTR(-EINVAL);
651 	}
652 
653 	ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
654 	if (!ttc)
655 		return ERR_PTR(-ENOMEM);
656 
657 	ns = mlx5_get_flow_namespace(dev, params->ns_type);
658 	if (!ns) {
659 		kvfree(ttc);
660 		return ERR_PTR(-EOPNOTSUPP);
661 	}
662 
663 	groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
664 			       &inner_ttc_groups[TTC_GROUPS_DEFAULT];
665 
666 	WARN_ON_ONCE(params->ft_attr.max_fte);
667 	params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
668 	ttc->t = mlx5_create_flow_table(ns, &params->ft_attr);
669 	if (IS_ERR(ttc->t)) {
670 		err = PTR_ERR(ttc->t);
671 		kvfree(ttc);
672 		return ERR_PTR(err);
673 	}
674 
675 	err = mlx5_create_inner_ttc_table_groups(ttc, groups);
676 	if (err)
677 		goto destroy_ft;
678 
679 	err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc, use_l4_type);
680 	if (err)
681 		goto destroy_ft;
682 
683 	return ttc;
684 
685 destroy_ft:
686 	mlx5_destroy_ttc_table(ttc);
687 	return ERR_PTR(err);
688 }
689 
mlx5_destroy_ttc_table(struct mlx5_ttc_table * ttc)690 void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc)
691 {
692 	int i;
693 
694 	mlx5_cleanup_ttc_rules(ttc);
695 	for (i = ttc->num_groups - 1; i >= 0; i--) {
696 		if (!IS_ERR_OR_NULL(ttc->g[i]))
697 			mlx5_destroy_flow_group(ttc->g[i]);
698 		ttc->g[i] = NULL;
699 	}
700 
701 	kfree(ttc->g);
702 	mlx5_destroy_flow_table(ttc->t);
703 	kvfree(ttc);
704 }
705 
mlx5_create_ttc_table(struct mlx5_core_dev * dev,struct ttc_params * params)706 struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
707 					     struct ttc_params *params)
708 {
709 	bool match_ipv_outer =
710 		MLX5_CAP_FLOWTABLE_NIC_RX(dev,
711 					  ft_field_support.outer_ip_version);
712 	const struct mlx5_fs_ttc_groups *groups;
713 	struct mlx5_flow_namespace *ns;
714 	struct mlx5_ttc_table *ttc;
715 	bool use_l4_type;
716 	int err;
717 
718 	switch (params->ns_type) {
719 	case MLX5_FLOW_NAMESPACE_PORT_SEL:
720 		use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
721 			MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
722 		break;
723 	case MLX5_FLOW_NAMESPACE_KERNEL:
724 		use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
725 			MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
726 		break;
727 	default:
728 		return ERR_PTR(-EINVAL);
729 	}
730 
731 	ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
732 	if (!ttc)
733 		return ERR_PTR(-ENOMEM);
734 
735 	ns = mlx5_get_flow_namespace(dev, params->ns_type);
736 	if (!ns) {
737 		kvfree(ttc);
738 		return ERR_PTR(-EOPNOTSUPP);
739 	}
740 
741 	groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
742 			       &ttc_groups[TTC_GROUPS_DEFAULT];
743 
744 	WARN_ON_ONCE(params->ft_attr.max_fte);
745 	params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
746 	ttc->t = mlx5_create_flow_table(ns, &params->ft_attr);
747 	if (IS_ERR(ttc->t)) {
748 		err = PTR_ERR(ttc->t);
749 		kvfree(ttc);
750 		return ERR_PTR(err);
751 	}
752 
753 	err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer, groups);
754 	if (err)
755 		goto destroy_ft;
756 
757 	err = mlx5_generate_ttc_table_rules(dev, params, ttc, use_l4_type);
758 	if (err)
759 		goto destroy_ft;
760 
761 	return ttc;
762 
763 destroy_ft:
764 	mlx5_destroy_ttc_table(ttc);
765 	return ERR_PTR(err);
766 }
767 
mlx5_ttc_fwd_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type,struct mlx5_flow_destination * new_dest)768 int mlx5_ttc_fwd_dest(struct mlx5_ttc_table *ttc, enum mlx5_traffic_types type,
769 		      struct mlx5_flow_destination *new_dest)
770 {
771 	return mlx5_modify_rule_destination(ttc->rules[type].rule, new_dest,
772 					    NULL);
773 }
774 
775 struct mlx5_flow_destination
mlx5_ttc_get_default_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type)776 mlx5_ttc_get_default_dest(struct mlx5_ttc_table *ttc,
777 			  enum mlx5_traffic_types type)
778 {
779 	struct mlx5_flow_destination *dest = &ttc->rules[type].default_dest;
780 
781 	WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR,
782 		  "TTC[%d] default dest is not setup yet", type);
783 
784 	return *dest;
785 }
786 
mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type)787 int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
788 			      enum mlx5_traffic_types type)
789 {
790 	struct mlx5_flow_destination dest = mlx5_ttc_get_default_dest(ttc, type);
791 
792 	return mlx5_ttc_fwd_dest(ttc, type, &dest);
793 }
794