1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
3
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/tcp.h>
7 #include <linux/mlx5/fs.h>
8 #include <linux/mlx5/driver.h>
9 #include "mlx5_core.h"
10 #include "lib/fs_ttc.h"
11
12 #define MLX5_TTC_MAX_NUM_GROUPS 4
13 #define MLX5_TTC_GROUP_TCPUDP_SIZE (MLX5_TT_IPV6_UDP + 1)
14
15 struct mlx5_fs_ttc_groups {
16 bool use_l4_type;
17 int num_groups;
18 int group_size[MLX5_TTC_MAX_NUM_GROUPS];
19 };
20
mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups * groups)21 static int mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups *groups)
22 {
23 int i, sz = 0;
24
25 for (i = 0; i < groups->num_groups; i++)
26 sz += groups->group_size[i];
27
28 return sz;
29 }
30
31 /* L3/L4 traffic type classifier */
32 struct mlx5_ttc_table {
33 int num_groups;
34 struct mlx5_flow_table *t;
35 struct mlx5_flow_group **g;
36 struct mlx5_ttc_rule rules[MLX5_NUM_TT];
37 struct mlx5_flow_handle *tunnel_rules[MLX5_NUM_TUNNEL_TT];
38 };
39
mlx5_get_ttc_flow_table(struct mlx5_ttc_table * ttc)40 struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc)
41 {
42 return ttc->t;
43 }
44
mlx5_cleanup_ttc_rules(struct mlx5_ttc_table * ttc)45 static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc)
46 {
47 int i;
48
49 for (i = 0; i < MLX5_NUM_TT; i++) {
50 if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) {
51 mlx5_del_flow_rules(ttc->rules[i].rule);
52 ttc->rules[i].rule = NULL;
53 }
54 }
55
56 for (i = 0; i < MLX5_NUM_TUNNEL_TT; i++) {
57 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
58 mlx5_del_flow_rules(ttc->tunnel_rules[i]);
59 ttc->tunnel_rules[i] = NULL;
60 }
61 }
62 }
63
64 struct mlx5_etype_proto {
65 u16 etype;
66 u8 proto;
67 };
68
69 static struct mlx5_etype_proto ttc_rules[] = {
70 [MLX5_TT_IPV4_TCP] = {
71 .etype = ETH_P_IP,
72 .proto = IPPROTO_TCP,
73 },
74 [MLX5_TT_IPV6_TCP] = {
75 .etype = ETH_P_IPV6,
76 .proto = IPPROTO_TCP,
77 },
78 [MLX5_TT_IPV4_UDP] = {
79 .etype = ETH_P_IP,
80 .proto = IPPROTO_UDP,
81 },
82 [MLX5_TT_IPV6_UDP] = {
83 .etype = ETH_P_IPV6,
84 .proto = IPPROTO_UDP,
85 },
86 [MLX5_TT_IPV4_IPSEC_AH] = {
87 .etype = ETH_P_IP,
88 .proto = IPPROTO_AH,
89 },
90 [MLX5_TT_IPV6_IPSEC_AH] = {
91 .etype = ETH_P_IPV6,
92 .proto = IPPROTO_AH,
93 },
94 [MLX5_TT_IPV4_IPSEC_ESP] = {
95 .etype = ETH_P_IP,
96 .proto = IPPROTO_ESP,
97 },
98 [MLX5_TT_IPV6_IPSEC_ESP] = {
99 .etype = ETH_P_IPV6,
100 .proto = IPPROTO_ESP,
101 },
102 [MLX5_TT_IPV4] = {
103 .etype = ETH_P_IP,
104 .proto = 0,
105 },
106 [MLX5_TT_IPV6] = {
107 .etype = ETH_P_IPV6,
108 .proto = 0,
109 },
110 [MLX5_TT_ANY] = {
111 .etype = 0,
112 .proto = 0,
113 },
114 };
115
116 static struct mlx5_etype_proto ttc_tunnel_rules[] = {
117 [MLX5_TT_IPV4_GRE] = {
118 .etype = ETH_P_IP,
119 .proto = IPPROTO_GRE,
120 },
121 [MLX5_TT_IPV6_GRE] = {
122 .etype = ETH_P_IPV6,
123 .proto = IPPROTO_GRE,
124 },
125 [MLX5_TT_IPV4_IPIP] = {
126 .etype = ETH_P_IP,
127 .proto = IPPROTO_IPIP,
128 },
129 [MLX5_TT_IPV6_IPIP] = {
130 .etype = ETH_P_IPV6,
131 .proto = IPPROTO_IPIP,
132 },
133 [MLX5_TT_IPV4_IPV6] = {
134 .etype = ETH_P_IP,
135 .proto = IPPROTO_IPV6,
136 },
137 [MLX5_TT_IPV6_IPV6] = {
138 .etype = ETH_P_IPV6,
139 .proto = IPPROTO_IPV6,
140 },
141
142 };
143
144 enum TTC_GROUP_TYPE {
145 TTC_GROUPS_DEFAULT = 0,
146 TTC_GROUPS_USE_L4_TYPE = 1,
147 };
148
149 static const struct mlx5_fs_ttc_groups ttc_groups[] = {
150 [TTC_GROUPS_DEFAULT] = {
151 .num_groups = 3,
152 .group_size = {
153 BIT(3) + MLX5_NUM_TUNNEL_TT,
154 BIT(1),
155 BIT(0),
156 },
157 },
158 [TTC_GROUPS_USE_L4_TYPE] = {
159 .use_l4_type = true,
160 .num_groups = 4,
161 .group_size = {
162 MLX5_TTC_GROUP_TCPUDP_SIZE,
163 BIT(3) + MLX5_NUM_TUNNEL_TT - MLX5_TTC_GROUP_TCPUDP_SIZE,
164 BIT(1),
165 BIT(0),
166 },
167 },
168 };
169
170 static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = {
171 [TTC_GROUPS_DEFAULT] = {
172 .num_groups = 3,
173 .group_size = {
174 BIT(3),
175 BIT(1),
176 BIT(0),
177 },
178 },
179 [TTC_GROUPS_USE_L4_TYPE] = {
180 .use_l4_type = true,
181 .num_groups = 4,
182 .group_size = {
183 MLX5_TTC_GROUP_TCPUDP_SIZE,
184 BIT(3) - MLX5_TTC_GROUP_TCPUDP_SIZE,
185 BIT(1),
186 BIT(0),
187 },
188 },
189 };
190
mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)191 u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)
192 {
193 return ttc_tunnel_rules[tt].proto;
194 }
195
mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev * mdev,u8 proto_type)196 static bool mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev,
197 u8 proto_type)
198 {
199 switch (proto_type) {
200 case IPPROTO_GRE:
201 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
202 case IPPROTO_IPIP:
203 case IPPROTO_IPV6:
204 return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
205 MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx));
206 default:
207 return false;
208 }
209 }
210
mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev * mdev)211 static bool mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev)
212 {
213 int tt;
214
215 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
216 if (mlx5_tunnel_proto_supported_rx(mdev,
217 ttc_tunnel_rules[tt].proto))
218 return true;
219 }
220 return false;
221 }
222
mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev * mdev)223 bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
224 {
225 return (mlx5_tunnel_any_rx_proto_supported(mdev) &&
226 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
227 ft_field_support.inner_ip_version));
228 }
229
mlx5_etype_to_ipv(u16 ethertype)230 static u8 mlx5_etype_to_ipv(u16 ethertype)
231 {
232 if (ethertype == ETH_P_IP)
233 return 4;
234
235 if (ethertype == ETH_P_IPV6)
236 return 6;
237
238 return 0;
239 }
240
mlx5_fs_ttc_set_match_proto(void * headers_c,void * headers_v,u8 proto,bool use_l4_type)241 static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v,
242 u8 proto, bool use_l4_type)
243 {
244 int l4_type;
245
246 if (use_l4_type && (proto == IPPROTO_TCP || proto == IPPROTO_UDP)) {
247 if (proto == IPPROTO_TCP)
248 l4_type = MLX5_PACKET_L4_TYPE_TCP;
249 else
250 l4_type = MLX5_PACKET_L4_TYPE_UDP;
251
252 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, l4_type);
253 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_type, l4_type);
254 } else {
255 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
256 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, proto);
257 }
258 }
259
260 static struct mlx5_flow_handle *
mlx5_generate_ttc_rule(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto,bool use_l4_type)261 mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
262 struct mlx5_flow_destination *dest, u16 etype, u8 proto,
263 bool use_l4_type)
264 {
265 int match_ipv_outer =
266 MLX5_CAP_FLOWTABLE_NIC_RX(dev,
267 ft_field_support.outer_ip_version);
268 MLX5_DECLARE_FLOW_ACT(flow_act);
269 struct mlx5_flow_handle *rule;
270 struct mlx5_flow_spec *spec;
271 int err = 0;
272 u8 ipv;
273
274 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
275 if (!spec)
276 return ERR_PTR(-ENOMEM);
277
278 if (proto) {
279 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
280 mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
281 spec->match_criteria,
282 outer_headers),
283 MLX5_ADDR_OF(fte_match_param,
284 spec->match_value,
285 outer_headers),
286 proto, use_l4_type);
287 }
288
289 ipv = mlx5_etype_to_ipv(etype);
290 if (match_ipv_outer && ipv) {
291 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
292 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
293 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv);
294 } else if (etype) {
295 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
296 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
297 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
298 }
299
300 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
301 if (IS_ERR(rule)) {
302 err = PTR_ERR(rule);
303 mlx5_core_err(dev, "%s: add rule failed\n", __func__);
304 }
305
306 kvfree(spec);
307 return err ? ERR_PTR(err) : rule;
308 }
309
mlx5_generate_ttc_table_rules(struct mlx5_core_dev * dev,struct ttc_params * params,struct mlx5_ttc_table * ttc,bool use_l4_type)310 static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
311 struct ttc_params *params,
312 struct mlx5_ttc_table *ttc,
313 bool use_l4_type)
314 {
315 struct mlx5_flow_handle **trules;
316 struct mlx5_ttc_rule *rules;
317 struct mlx5_flow_table *ft;
318 int tt;
319 int err;
320
321 ft = ttc->t;
322 rules = ttc->rules;
323 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
324 struct mlx5_ttc_rule *rule = &rules[tt];
325
326 if (test_bit(tt, params->ignore_dests))
327 continue;
328 rule->rule = mlx5_generate_ttc_rule(dev, ft, ¶ms->dests[tt],
329 ttc_rules[tt].etype,
330 ttc_rules[tt].proto,
331 use_l4_type);
332 if (IS_ERR(rule->rule)) {
333 err = PTR_ERR(rule->rule);
334 rule->rule = NULL;
335 goto del_rules;
336 }
337 rule->default_dest = params->dests[tt];
338 }
339
340 if (!params->inner_ttc || !mlx5_tunnel_inner_ft_supported(dev))
341 return 0;
342
343 trules = ttc->tunnel_rules;
344 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
345 if (!mlx5_tunnel_proto_supported_rx(dev,
346 ttc_tunnel_rules[tt].proto))
347 continue;
348 if (test_bit(tt, params->ignore_tunnel_dests))
349 continue;
350 trules[tt] = mlx5_generate_ttc_rule(dev, ft,
351 ¶ms->tunnel_dests[tt],
352 ttc_tunnel_rules[tt].etype,
353 ttc_tunnel_rules[tt].proto,
354 use_l4_type);
355 if (IS_ERR(trules[tt])) {
356 err = PTR_ERR(trules[tt]);
357 trules[tt] = NULL;
358 goto del_rules;
359 }
360 }
361
362 return 0;
363
364 del_rules:
365 mlx5_cleanup_ttc_rules(ttc);
366 return err;
367 }
368
mlx5_create_ttc_table_groups(struct mlx5_ttc_table * ttc,bool use_ipv,const struct mlx5_fs_ttc_groups * groups)369 static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
370 bool use_ipv,
371 const struct mlx5_fs_ttc_groups *groups)
372 {
373 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
374 int ix = 0;
375 u32 *in;
376 int err;
377 u8 *mc;
378
379 ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL);
380 if (!ttc->g)
381 return -ENOMEM;
382 in = kvzalloc(inlen, GFP_KERNEL);
383 if (!in) {
384 kfree(ttc->g);
385 ttc->g = NULL;
386 return -ENOMEM;
387 }
388
389 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
390 if (use_ipv)
391 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
392 else
393 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
394 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
395
396 /* TCP UDP group */
397 if (groups->use_l4_type) {
398 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.l4_type);
399 MLX5_SET_CFG(in, start_flow_index, ix);
400 ix += groups->group_size[ttc->num_groups];
401 MLX5_SET_CFG(in, end_flow_index, ix - 1);
402 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
403 if (IS_ERR(ttc->g[ttc->num_groups]))
404 goto err;
405 ttc->num_groups++;
406
407 MLX5_SET(fte_match_param, mc, outer_headers.l4_type, 0);
408 }
409
410 /* L4 Group */
411 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
412 MLX5_SET_CFG(in, start_flow_index, ix);
413 ix += groups->group_size[ttc->num_groups];
414 MLX5_SET_CFG(in, end_flow_index, ix - 1);
415 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
416 if (IS_ERR(ttc->g[ttc->num_groups]))
417 goto err;
418 ttc->num_groups++;
419
420 /* L3 Group */
421 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
422 MLX5_SET_CFG(in, start_flow_index, ix);
423 ix += groups->group_size[ttc->num_groups];
424 MLX5_SET_CFG(in, end_flow_index, ix - 1);
425 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
426 if (IS_ERR(ttc->g[ttc->num_groups]))
427 goto err;
428 ttc->num_groups++;
429
430 /* Any Group */
431 memset(in, 0, inlen);
432 MLX5_SET_CFG(in, start_flow_index, ix);
433 ix += groups->group_size[ttc->num_groups];
434 MLX5_SET_CFG(in, end_flow_index, ix - 1);
435 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
436 if (IS_ERR(ttc->g[ttc->num_groups]))
437 goto err;
438 ttc->num_groups++;
439
440 kvfree(in);
441 return 0;
442
443 err:
444 err = PTR_ERR(ttc->g[ttc->num_groups]);
445 ttc->g[ttc->num_groups] = NULL;
446 kvfree(in);
447
448 return err;
449 }
450
451 static struct mlx5_flow_handle *
mlx5_generate_inner_ttc_rule(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto,bool use_l4_type)452 mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
453 struct mlx5_flow_table *ft,
454 struct mlx5_flow_destination *dest,
455 u16 etype, u8 proto, bool use_l4_type)
456 {
457 MLX5_DECLARE_FLOW_ACT(flow_act);
458 struct mlx5_flow_handle *rule;
459 struct mlx5_flow_spec *spec;
460 int err = 0;
461 u8 ipv;
462
463 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
464 if (!spec)
465 return ERR_PTR(-ENOMEM);
466
467 ipv = mlx5_etype_to_ipv(etype);
468 if (etype && ipv) {
469 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
470 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
471 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
472 }
473
474 if (proto) {
475 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
476 mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
477 spec->match_criteria,
478 inner_headers),
479 MLX5_ADDR_OF(fte_match_param,
480 spec->match_value,
481 inner_headers),
482 proto, use_l4_type);
483 }
484
485 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
486 if (IS_ERR(rule)) {
487 err = PTR_ERR(rule);
488 mlx5_core_err(dev, "%s: add inner TTC rule failed\n", __func__);
489 }
490
491 kvfree(spec);
492 return err ? ERR_PTR(err) : rule;
493 }
494
mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev * dev,struct ttc_params * params,struct mlx5_ttc_table * ttc,bool use_l4_type)495 static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
496 struct ttc_params *params,
497 struct mlx5_ttc_table *ttc,
498 bool use_l4_type)
499 {
500 struct mlx5_ttc_rule *rules;
501 struct mlx5_flow_table *ft;
502 int err;
503 int tt;
504
505 ft = ttc->t;
506 rules = ttc->rules;
507
508 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
509 struct mlx5_ttc_rule *rule = &rules[tt];
510
511 if (test_bit(tt, params->ignore_dests))
512 continue;
513 rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
514 ¶ms->dests[tt],
515 ttc_rules[tt].etype,
516 ttc_rules[tt].proto,
517 use_l4_type);
518 if (IS_ERR(rule->rule)) {
519 err = PTR_ERR(rule->rule);
520 rule->rule = NULL;
521 goto del_rules;
522 }
523 rule->default_dest = params->dests[tt];
524 }
525
526 return 0;
527
528 del_rules:
529
530 mlx5_cleanup_ttc_rules(ttc);
531 return err;
532 }
533
mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table * ttc,const struct mlx5_fs_ttc_groups * groups)534 static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc,
535 const struct mlx5_fs_ttc_groups *groups)
536 {
537 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
538 int ix = 0;
539 u32 *in;
540 int err;
541 u8 *mc;
542
543 ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL);
544 if (!ttc->g)
545 return -ENOMEM;
546 in = kvzalloc(inlen, GFP_KERNEL);
547 if (!in) {
548 kfree(ttc->g);
549 ttc->g = NULL;
550 return -ENOMEM;
551 }
552
553 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
554 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
555 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
556
557 /* TCP UDP group */
558 if (groups->use_l4_type) {
559 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.l4_type);
560 MLX5_SET_CFG(in, start_flow_index, ix);
561 ix += groups->group_size[ttc->num_groups];
562 MLX5_SET_CFG(in, end_flow_index, ix - 1);
563 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
564 if (IS_ERR(ttc->g[ttc->num_groups]))
565 goto err;
566 ttc->num_groups++;
567
568 MLX5_SET(fte_match_param, mc, inner_headers.l4_type, 0);
569 }
570
571 /* L4 Group */
572 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
573 MLX5_SET_CFG(in, start_flow_index, ix);
574 ix += groups->group_size[ttc->num_groups];
575 MLX5_SET_CFG(in, end_flow_index, ix - 1);
576 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
577 if (IS_ERR(ttc->g[ttc->num_groups]))
578 goto err;
579 ttc->num_groups++;
580
581 /* L3 Group */
582 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
583 MLX5_SET_CFG(in, start_flow_index, ix);
584 ix += groups->group_size[ttc->num_groups];
585 MLX5_SET_CFG(in, end_flow_index, ix - 1);
586 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
587 if (IS_ERR(ttc->g[ttc->num_groups]))
588 goto err;
589 ttc->num_groups++;
590
591 /* Any Group */
592 memset(in, 0, inlen);
593 MLX5_SET_CFG(in, start_flow_index, ix);
594 ix += groups->group_size[ttc->num_groups];
595 MLX5_SET_CFG(in, end_flow_index, ix - 1);
596 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
597 if (IS_ERR(ttc->g[ttc->num_groups]))
598 goto err;
599 ttc->num_groups++;
600
601 kvfree(in);
602 return 0;
603
604 err:
605 err = PTR_ERR(ttc->g[ttc->num_groups]);
606 ttc->g[ttc->num_groups] = NULL;
607 kvfree(in);
608
609 return err;
610 }
611
mlx5_create_inner_ttc_table(struct mlx5_core_dev * dev,struct ttc_params * params)612 struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
613 struct ttc_params *params)
614 {
615 const struct mlx5_fs_ttc_groups *groups;
616 struct mlx5_flow_namespace *ns;
617 struct mlx5_ttc_table *ttc;
618 bool use_l4_type;
619 int err;
620
621 ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
622 if (!ttc)
623 return ERR_PTR(-ENOMEM);
624
625 switch (params->ns_type) {
626 case MLX5_FLOW_NAMESPACE_PORT_SEL:
627 use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
628 MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
629 break;
630 case MLX5_FLOW_NAMESPACE_KERNEL:
631 use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
632 MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
633 break;
634 default:
635 return ERR_PTR(-EINVAL);
636 }
637
638 ns = mlx5_get_flow_namespace(dev, params->ns_type);
639 groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
640 &inner_ttc_groups[TTC_GROUPS_DEFAULT];
641
642 WARN_ON_ONCE(params->ft_attr.max_fte);
643 params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
644 ttc->t = mlx5_create_flow_table(ns, ¶ms->ft_attr);
645 if (IS_ERR(ttc->t)) {
646 err = PTR_ERR(ttc->t);
647 kvfree(ttc);
648 return ERR_PTR(err);
649 }
650
651 err = mlx5_create_inner_ttc_table_groups(ttc, groups);
652 if (err)
653 goto destroy_ft;
654
655 err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc, use_l4_type);
656 if (err)
657 goto destroy_ft;
658
659 return ttc;
660
661 destroy_ft:
662 mlx5_destroy_ttc_table(ttc);
663 return ERR_PTR(err);
664 }
665
mlx5_destroy_ttc_table(struct mlx5_ttc_table * ttc)666 void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc)
667 {
668 int i;
669
670 mlx5_cleanup_ttc_rules(ttc);
671 for (i = ttc->num_groups - 1; i >= 0; i--) {
672 if (!IS_ERR_OR_NULL(ttc->g[i]))
673 mlx5_destroy_flow_group(ttc->g[i]);
674 ttc->g[i] = NULL;
675 }
676
677 kfree(ttc->g);
678 mlx5_destroy_flow_table(ttc->t);
679 kvfree(ttc);
680 }
681
mlx5_create_ttc_table(struct mlx5_core_dev * dev,struct ttc_params * params)682 struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
683 struct ttc_params *params)
684 {
685 bool match_ipv_outer =
686 MLX5_CAP_FLOWTABLE_NIC_RX(dev,
687 ft_field_support.outer_ip_version);
688 const struct mlx5_fs_ttc_groups *groups;
689 struct mlx5_flow_namespace *ns;
690 struct mlx5_ttc_table *ttc;
691 bool use_l4_type;
692 int err;
693
694 ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
695 if (!ttc)
696 return ERR_PTR(-ENOMEM);
697
698 switch (params->ns_type) {
699 case MLX5_FLOW_NAMESPACE_PORT_SEL:
700 use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
701 MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
702 break;
703 case MLX5_FLOW_NAMESPACE_KERNEL:
704 use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
705 MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
706 break;
707 default:
708 return ERR_PTR(-EINVAL);
709 }
710
711 ns = mlx5_get_flow_namespace(dev, params->ns_type);
712 groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
713 &ttc_groups[TTC_GROUPS_DEFAULT];
714
715 WARN_ON_ONCE(params->ft_attr.max_fte);
716 params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
717 ttc->t = mlx5_create_flow_table(ns, ¶ms->ft_attr);
718 if (IS_ERR(ttc->t)) {
719 err = PTR_ERR(ttc->t);
720 kvfree(ttc);
721 return ERR_PTR(err);
722 }
723
724 err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer, groups);
725 if (err)
726 goto destroy_ft;
727
728 err = mlx5_generate_ttc_table_rules(dev, params, ttc, use_l4_type);
729 if (err)
730 goto destroy_ft;
731
732 return ttc;
733
734 destroy_ft:
735 mlx5_destroy_ttc_table(ttc);
736 return ERR_PTR(err);
737 }
738
mlx5_ttc_fwd_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type,struct mlx5_flow_destination * new_dest)739 int mlx5_ttc_fwd_dest(struct mlx5_ttc_table *ttc, enum mlx5_traffic_types type,
740 struct mlx5_flow_destination *new_dest)
741 {
742 return mlx5_modify_rule_destination(ttc->rules[type].rule, new_dest,
743 NULL);
744 }
745
746 struct mlx5_flow_destination
mlx5_ttc_get_default_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type)747 mlx5_ttc_get_default_dest(struct mlx5_ttc_table *ttc,
748 enum mlx5_traffic_types type)
749 {
750 struct mlx5_flow_destination *dest = &ttc->rules[type].default_dest;
751
752 WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR,
753 "TTC[%d] default dest is not setup yet", type);
754
755 return *dest;
756 }
757
mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type)758 int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
759 enum mlx5_traffic_types type)
760 {
761 struct mlx5_flow_destination dest = mlx5_ttc_get_default_dest(ttc, type);
762
763 return mlx5_ttc_fwd_dest(ttc, type, &dest);
764 }
765