1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES.
3
4 #include <linux/ip.h>
5 #include <linux/ipv6.h>
6 #include <linux/tcp.h>
7 #include <linux/mlx5/fs.h>
8 #include <linux/mlx5/driver.h>
9 #include "mlx5_core.h"
10 #include "lib/fs_ttc.h"
11
12 #define MLX5_TTC_MAX_NUM_GROUPS 7
13 #define MLX5_TTC_GROUP_TCPUDP_SIZE (MLX5_TT_IPV6_UDP + 1)
14
15 struct mlx5_fs_ttc_groups {
16 bool use_l4_type;
17 int num_groups;
18 int group_size[MLX5_TTC_MAX_NUM_GROUPS];
19 };
20
mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups * groups)21 static int mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups *groups)
22 {
23 int i, sz = 0;
24
25 for (i = 0; i < groups->num_groups; i++)
26 sz += groups->group_size[i];
27
28 return sz;
29 }
30
31 /* L3/L4 traffic type classifier */
32 struct mlx5_ttc_table {
33 int num_groups;
34 const struct mlx5_fs_ttc_groups *groups;
35 struct mlx5_core_dev *mdev;
36 struct mlx5_flow_table *t;
37 struct mlx5_flow_group **g;
38 struct mlx5_ttc_rule rules[MLX5_NUM_TT];
39 struct mlx5_flow_handle *tunnel_rules[MLX5_NUM_TUNNEL_TT];
40 u32 refcnt;
41 struct mutex mutex; /* Protect adding rules for ipsec crypto offload */
42 };
43
mlx5_get_ttc_flow_table(struct mlx5_ttc_table * ttc)44 struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc)
45 {
46 return ttc->t;
47 }
48
mlx5_cleanup_ttc_rules(struct mlx5_ttc_table * ttc)49 static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc)
50 {
51 int i;
52
53 for (i = 0; i < MLX5_NUM_TT; i++) {
54 if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) {
55 mlx5_del_flow_rules(ttc->rules[i].rule);
56 ttc->rules[i].rule = NULL;
57 }
58 }
59
60 for (i = 0; i < MLX5_NUM_TUNNEL_TT; i++) {
61 if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) {
62 mlx5_del_flow_rules(ttc->tunnel_rules[i]);
63 ttc->tunnel_rules[i] = NULL;
64 }
65 }
66 }
67
68 static const char *mlx5_traffic_types_names[MLX5_NUM_TT] = {
69 [MLX5_TT_IPV4_TCP] = "TT_IPV4_TCP",
70 [MLX5_TT_IPV6_TCP] = "TT_IPV6_TCP",
71 [MLX5_TT_IPV4_UDP] = "TT_IPV4_UDP",
72 [MLX5_TT_IPV6_UDP] = "TT_IPV6_UDP",
73 [MLX5_TT_IPV4_IPSEC_AH] = "TT_IPV4_IPSEC_AH",
74 [MLX5_TT_IPV6_IPSEC_AH] = "TT_IPV6_IPSEC_AH",
75 [MLX5_TT_IPV4_IPSEC_ESP] = "TT_IPV4_IPSEC_ESP",
76 [MLX5_TT_IPV6_IPSEC_ESP] = "TT_IPV6_IPSEC_ESP",
77 [MLX5_TT_IPV4] = "TT_IPV4",
78 [MLX5_TT_IPV6] = "TT_IPV6",
79 [MLX5_TT_ANY] = "TT_ANY"
80 };
81
mlx5_ttc_get_name(enum mlx5_traffic_types tt)82 const char *mlx5_ttc_get_name(enum mlx5_traffic_types tt)
83 {
84 return mlx5_traffic_types_names[tt];
85 }
86
87 struct mlx5_etype_proto {
88 u16 etype;
89 u8 proto;
90 };
91
92 static struct mlx5_etype_proto ttc_rules[] = {
93 [MLX5_TT_IPV4_TCP] = {
94 .etype = ETH_P_IP,
95 .proto = IPPROTO_TCP,
96 },
97 [MLX5_TT_IPV6_TCP] = {
98 .etype = ETH_P_IPV6,
99 .proto = IPPROTO_TCP,
100 },
101 [MLX5_TT_IPV4_UDP] = {
102 .etype = ETH_P_IP,
103 .proto = IPPROTO_UDP,
104 },
105 [MLX5_TT_IPV6_UDP] = {
106 .etype = ETH_P_IPV6,
107 .proto = IPPROTO_UDP,
108 },
109 [MLX5_TT_IPV4_IPSEC_AH] = {
110 .etype = ETH_P_IP,
111 .proto = IPPROTO_AH,
112 },
113 [MLX5_TT_IPV6_IPSEC_AH] = {
114 .etype = ETH_P_IPV6,
115 .proto = IPPROTO_AH,
116 },
117 [MLX5_TT_IPV4_IPSEC_ESP] = {
118 .etype = ETH_P_IP,
119 .proto = IPPROTO_ESP,
120 },
121 [MLX5_TT_IPV6_IPSEC_ESP] = {
122 .etype = ETH_P_IPV6,
123 .proto = IPPROTO_ESP,
124 },
125 [MLX5_TT_IPV4] = {
126 .etype = ETH_P_IP,
127 .proto = 0,
128 },
129 [MLX5_TT_IPV6] = {
130 .etype = ETH_P_IPV6,
131 .proto = 0,
132 },
133 [MLX5_TT_ANY] = {
134 .etype = 0,
135 .proto = 0,
136 },
137 };
138
139 static struct mlx5_etype_proto ttc_tunnel_rules[] = {
140 [MLX5_TT_IPV4_GRE] = {
141 .etype = ETH_P_IP,
142 .proto = IPPROTO_GRE,
143 },
144 [MLX5_TT_IPV6_GRE] = {
145 .etype = ETH_P_IPV6,
146 .proto = IPPROTO_GRE,
147 },
148 [MLX5_TT_IPV4_IPIP] = {
149 .etype = ETH_P_IP,
150 .proto = IPPROTO_IPIP,
151 },
152 [MLX5_TT_IPV6_IPIP] = {
153 .etype = ETH_P_IPV6,
154 .proto = IPPROTO_IPIP,
155 },
156 [MLX5_TT_IPV4_IPV6] = {
157 .etype = ETH_P_IP,
158 .proto = IPPROTO_IPV6,
159 },
160 [MLX5_TT_IPV6_IPV6] = {
161 .etype = ETH_P_IPV6,
162 .proto = IPPROTO_IPV6,
163 },
164
165 };
166
167 enum TTC_GROUP_TYPE {
168 TTC_GROUPS_DEFAULT = 0,
169 TTC_GROUPS_USE_L4_TYPE = 1,
170 TTC_GROUPS_DEFAULT_ESP = 2,
171 TTC_GROUPS_USE_L4_TYPE_ESP = 3,
172 };
173
174 static const struct mlx5_fs_ttc_groups ttc_groups[] = {
175 [TTC_GROUPS_DEFAULT] = {
176 .num_groups = 3,
177 .group_size = {
178 BIT(3) + MLX5_NUM_TUNNEL_TT,
179 BIT(1),
180 BIT(0),
181 },
182 },
183 [TTC_GROUPS_USE_L4_TYPE] = {
184 .use_l4_type = true,
185 .num_groups = 4,
186 .group_size = {
187 MLX5_TTC_GROUP_TCPUDP_SIZE,
188 BIT(3) + MLX5_NUM_TUNNEL_TT - MLX5_TTC_GROUP_TCPUDP_SIZE,
189 BIT(1),
190 BIT(0),
191 },
192 },
193 [TTC_GROUPS_DEFAULT_ESP] = {
194 .num_groups = 6,
195 .group_size = {
196 MLX5_TTC_GROUP_TCPUDP_SIZE + BIT(1) +
197 MLX5_NUM_TUNNEL_TT,
198 BIT(2), /* decrypted outer L4 */
199 BIT(2), /* decrypted inner L4 */
200 BIT(1), /* ESP */
201 BIT(1),
202 BIT(0),
203 },
204 },
205 [TTC_GROUPS_USE_L4_TYPE_ESP] = {
206 .use_l4_type = true,
207 .num_groups = 7,
208 .group_size = {
209 MLX5_TTC_GROUP_TCPUDP_SIZE,
210 BIT(1) + MLX5_NUM_TUNNEL_TT,
211 BIT(2), /* decrypted outer L4 */
212 BIT(2), /* decrypted inner L4 */
213 BIT(1), /* ESP */
214 BIT(1),
215 BIT(0),
216 },
217 },
218 };
219
220 static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = {
221 [TTC_GROUPS_DEFAULT] = {
222 .num_groups = 3,
223 .group_size = {
224 BIT(3),
225 BIT(1),
226 BIT(0),
227 },
228 },
229 [TTC_GROUPS_USE_L4_TYPE] = {
230 .use_l4_type = true,
231 .num_groups = 4,
232 .group_size = {
233 MLX5_TTC_GROUP_TCPUDP_SIZE,
234 BIT(3) - MLX5_TTC_GROUP_TCPUDP_SIZE,
235 BIT(1),
236 BIT(0),
237 },
238 },
239 };
240
241 static const struct mlx5_fs_ttc_groups *
mlx5_ttc_get_fs_groups(bool use_l4_type,bool ipsec_rss)242 mlx5_ttc_get_fs_groups(bool use_l4_type, bool ipsec_rss)
243 {
244 if (!ipsec_rss)
245 return use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
246 &ttc_groups[TTC_GROUPS_DEFAULT];
247
248 return use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE_ESP] :
249 &ttc_groups[TTC_GROUPS_DEFAULT_ESP];
250 }
251
mlx5_ttc_has_esp_flow_group(struct mlx5_ttc_table * ttc)252 bool mlx5_ttc_has_esp_flow_group(struct mlx5_ttc_table *ttc)
253 {
254 return ttc->groups == &ttc_groups[TTC_GROUPS_DEFAULT_ESP] ||
255 ttc->groups == &ttc_groups[TTC_GROUPS_USE_L4_TYPE_ESP];
256 }
257
mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)258 u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)
259 {
260 return ttc_tunnel_rules[tt].proto;
261 }
262
mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev * mdev,u8 proto_type)263 static bool mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev,
264 u8 proto_type)
265 {
266 switch (proto_type) {
267 case IPPROTO_GRE:
268 return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
269 case IPPROTO_IPIP:
270 case IPPROTO_IPV6:
271 return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
272 MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx));
273 default:
274 return false;
275 }
276 }
277
mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev * mdev)278 static bool mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev)
279 {
280 int tt;
281
282 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
283 if (mlx5_tunnel_proto_supported_rx(mdev,
284 ttc_tunnel_rules[tt].proto))
285 return true;
286 }
287 return false;
288 }
289
mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev * mdev)290 bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev)
291 {
292 return (mlx5_tunnel_any_rx_proto_supported(mdev) &&
293 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
294 ft_field_support.inner_ip_version));
295 }
296
mlx5_etype_to_ipv(u16 ethertype)297 static u8 mlx5_etype_to_ipv(u16 ethertype)
298 {
299 if (ethertype == ETH_P_IP)
300 return 4;
301
302 if (ethertype == ETH_P_IPV6)
303 return 6;
304
305 return 0;
306 }
307
mlx5_fs_ttc_set_match_ipv_outer(struct mlx5_core_dev * mdev,struct mlx5_flow_spec * spec,u16 etype)308 static void mlx5_fs_ttc_set_match_ipv_outer(struct mlx5_core_dev *mdev,
309 struct mlx5_flow_spec *spec,
310 u16 etype)
311 {
312 int match_ipv_outer =
313 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
314 ft_field_support.outer_ip_version);
315 u8 ipv;
316
317 ipv = mlx5_etype_to_ipv(etype);
318 if (match_ipv_outer && ipv) {
319 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
320 outer_headers.ip_version);
321 MLX5_SET(fte_match_param, spec->match_value,
322 outer_headers.ip_version, ipv);
323 } else {
324 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
325 outer_headers.ethertype);
326 MLX5_SET(fte_match_param, spec->match_value,
327 outer_headers.ethertype, etype);
328 }
329
330 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
331 }
332
mlx5_fs_ttc_set_match_proto(void * headers_c,void * headers_v,u8 proto,bool use_l4_type)333 static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v,
334 u8 proto, bool use_l4_type)
335 {
336 int l4_type;
337
338 if (use_l4_type && (proto == IPPROTO_TCP || proto == IPPROTO_UDP)) {
339 if (proto == IPPROTO_TCP)
340 l4_type = MLX5_PACKET_L4_TYPE_TCP;
341 else
342 l4_type = MLX5_PACKET_L4_TYPE_UDP;
343
344 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, l4_type);
345 MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_type, l4_type);
346 } else {
347 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
348 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, proto);
349 }
350 }
351
352 static struct mlx5_flow_handle *
mlx5_generate_ttc_rule(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto,bool use_l4_type,bool ipsec_rss)353 mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
354 struct mlx5_flow_destination *dest, u16 etype, u8 proto,
355 bool use_l4_type, bool ipsec_rss)
356 {
357 MLX5_DECLARE_FLOW_ACT(flow_act);
358 struct mlx5_flow_handle *rule;
359 struct mlx5_flow_spec *spec;
360 int err = 0;
361
362 spec = kvzalloc_obj(*spec);
363 if (!spec)
364 return ERR_PTR(-ENOMEM);
365
366 if (proto) {
367 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
368 mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
369 spec->match_criteria,
370 outer_headers),
371 MLX5_ADDR_OF(fte_match_param,
372 spec->match_value,
373 outer_headers),
374 proto, use_l4_type);
375 }
376
377 if (etype)
378 mlx5_fs_ttc_set_match_ipv_outer(dev, spec, etype);
379
380 if (ipsec_rss && proto == IPPROTO_ESP) {
381 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
382 misc_parameters_2.ipsec_next_header);
383 MLX5_SET(fte_match_param, spec->match_value,
384 misc_parameters_2.ipsec_next_header, 0);
385 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
386 }
387
388 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
389 if (IS_ERR(rule)) {
390 err = PTR_ERR(rule);
391 mlx5_core_err(dev, "%s: add rule failed\n", __func__);
392 }
393
394 kvfree(spec);
395 return err ? ERR_PTR(err) : rule;
396 }
397
mlx5_generate_ttc_table_rules(struct mlx5_core_dev * dev,struct ttc_params * params,struct mlx5_ttc_table * ttc,bool use_l4_type)398 static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
399 struct ttc_params *params,
400 struct mlx5_ttc_table *ttc,
401 bool use_l4_type)
402 {
403 struct mlx5_flow_handle **trules;
404 struct mlx5_ttc_rule *rules;
405 struct mlx5_flow_table *ft;
406 int tt;
407 int err;
408
409 ft = ttc->t;
410 rules = ttc->rules;
411 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
412 struct mlx5_ttc_rule *rule = &rules[tt];
413
414 if (mlx5_ttc_is_decrypted_esp_tt(tt))
415 continue;
416
417 if (test_bit(tt, params->ignore_dests))
418 continue;
419 rule->rule = mlx5_generate_ttc_rule(dev, ft, ¶ms->dests[tt],
420 ttc_rules[tt].etype,
421 ttc_rules[tt].proto,
422 use_l4_type,
423 params->ipsec_rss);
424 if (IS_ERR(rule->rule)) {
425 err = PTR_ERR(rule->rule);
426 rule->rule = NULL;
427 goto del_rules;
428 }
429 rule->default_dest = params->dests[tt];
430 }
431
432 if (!params->inner_ttc || !mlx5_tunnel_inner_ft_supported(dev))
433 return 0;
434
435 trules = ttc->tunnel_rules;
436 for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
437 if (!mlx5_tunnel_proto_supported_rx(dev,
438 ttc_tunnel_rules[tt].proto))
439 continue;
440 if (test_bit(tt, params->ignore_tunnel_dests))
441 continue;
442 trules[tt] = mlx5_generate_ttc_rule(dev, ft,
443 ¶ms->tunnel_dests[tt],
444 ttc_tunnel_rules[tt].etype,
445 ttc_tunnel_rules[tt].proto,
446 use_l4_type, false);
447 if (IS_ERR(trules[tt])) {
448 err = PTR_ERR(trules[tt]);
449 trules[tt] = NULL;
450 goto del_rules;
451 }
452 }
453
454 return 0;
455
456 del_rules:
457 mlx5_cleanup_ttc_rules(ttc);
458 return err;
459 }
460
mlx5_create_ttc_table_ipsec_groups(struct mlx5_ttc_table * ttc,bool use_ipv,u32 * in,int * next_ix)461 static int mlx5_create_ttc_table_ipsec_groups(struct mlx5_ttc_table *ttc,
462 bool use_ipv,
463 u32 *in, int *next_ix)
464 {
465 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
466 const struct mlx5_fs_ttc_groups *groups = ttc->groups;
467 int ix = *next_ix;
468
469 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
470
471 /* decrypted ESP outer group */
472 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
473 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.l4_type_ext);
474 MLX5_SET_CFG(in, start_flow_index, ix);
475 ix += groups->group_size[ttc->num_groups];
476 MLX5_SET_CFG(in, end_flow_index, ix - 1);
477 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
478 if (IS_ERR(ttc->g[ttc->num_groups]))
479 goto err;
480 ttc->num_groups++;
481
482 MLX5_SET(fte_match_param, mc, outer_headers.l4_type_ext, 0);
483
484 /* decrypted ESP inner group */
485 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
486 if (use_ipv)
487 MLX5_SET(fte_match_param, mc, outer_headers.ip_version, 0);
488 else
489 MLX5_SET(fte_match_param, mc, outer_headers.ethertype, 0);
490 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
491 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.l4_type_ext);
492 MLX5_SET_CFG(in, start_flow_index, ix);
493 ix += groups->group_size[ttc->num_groups];
494 MLX5_SET_CFG(in, end_flow_index, ix - 1);
495 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
496 if (IS_ERR(ttc->g[ttc->num_groups]))
497 goto err;
498 ttc->num_groups++;
499
500 MLX5_SET(fte_match_param, mc, inner_headers.ip_version, 0);
501 MLX5_SET(fte_match_param, mc, inner_headers.l4_type_ext, 0);
502
503 /* undecrypted ESP group */
504 MLX5_SET_CFG(in, match_criteria_enable,
505 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_2);
506 if (use_ipv)
507 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
508 else
509 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
510 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
511 MLX5_SET_TO_ONES(fte_match_param, mc,
512 misc_parameters_2.ipsec_next_header);
513 MLX5_SET_CFG(in, start_flow_index, ix);
514 ix += groups->group_size[ttc->num_groups];
515 MLX5_SET_CFG(in, end_flow_index, ix - 1);
516 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
517 if (IS_ERR(ttc->g[ttc->num_groups]))
518 goto err;
519 ttc->num_groups++;
520
521 *next_ix = ix;
522
523 return 0;
524
525 err:
526 return PTR_ERR(ttc->g[ttc->num_groups]);
527 }
528
mlx5_create_ttc_table_groups(struct mlx5_ttc_table * ttc,bool use_ipv)529 static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
530 bool use_ipv)
531 {
532 const struct mlx5_fs_ttc_groups *groups = ttc->groups;
533 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
534 int ix = 0;
535 u32 *in;
536 int err;
537 u8 *mc;
538
539 ttc->g = kzalloc_objs(*ttc->g, groups->num_groups);
540 if (!ttc->g)
541 return -ENOMEM;
542 in = kvzalloc(inlen, GFP_KERNEL);
543 if (!in) {
544 kfree(ttc->g);
545 ttc->g = NULL;
546 return -ENOMEM;
547 }
548
549 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
550 if (use_ipv)
551 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
552 else
553 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
554 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
555
556 /* TCP UDP group */
557 if (groups->use_l4_type) {
558 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.l4_type);
559 MLX5_SET_CFG(in, start_flow_index, ix);
560 ix += groups->group_size[ttc->num_groups];
561 MLX5_SET_CFG(in, end_flow_index, ix - 1);
562 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
563 if (IS_ERR(ttc->g[ttc->num_groups]))
564 goto err;
565 ttc->num_groups++;
566
567 MLX5_SET(fte_match_param, mc, outer_headers.l4_type, 0);
568 }
569
570 /* L4 Group */
571 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
572 MLX5_SET_CFG(in, start_flow_index, ix);
573 ix += groups->group_size[ttc->num_groups];
574 MLX5_SET_CFG(in, end_flow_index, ix - 1);
575 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
576 if (IS_ERR(ttc->g[ttc->num_groups]))
577 goto err;
578 ttc->num_groups++;
579
580 if (mlx5_ttc_has_esp_flow_group(ttc)) {
581 err = mlx5_create_ttc_table_ipsec_groups(ttc, use_ipv, in, &ix);
582 if (err)
583 goto err;
584
585 MLX5_SET(fte_match_param, mc,
586 misc_parameters_2.ipsec_next_header, 0);
587 }
588
589 /* L3 Group */
590 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
591 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
592 MLX5_SET_CFG(in, start_flow_index, ix);
593 ix += groups->group_size[ttc->num_groups];
594 MLX5_SET_CFG(in, end_flow_index, ix - 1);
595 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
596 if (IS_ERR(ttc->g[ttc->num_groups]))
597 goto err;
598 ttc->num_groups++;
599
600 /* Any Group */
601 memset(in, 0, inlen);
602 MLX5_SET_CFG(in, start_flow_index, ix);
603 ix += groups->group_size[ttc->num_groups];
604 MLX5_SET_CFG(in, end_flow_index, ix - 1);
605 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
606 if (IS_ERR(ttc->g[ttc->num_groups]))
607 goto err;
608 ttc->num_groups++;
609
610 kvfree(in);
611 return 0;
612
613 err:
614 err = PTR_ERR(ttc->g[ttc->num_groups]);
615 ttc->g[ttc->num_groups] = NULL;
616 kvfree(in);
617
618 return err;
619 }
620
621 static struct mlx5_flow_handle *
mlx5_generate_inner_ttc_rule(struct mlx5_core_dev * dev,struct mlx5_flow_table * ft,struct mlx5_flow_destination * dest,u16 etype,u8 proto,bool use_l4_type)622 mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
623 struct mlx5_flow_table *ft,
624 struct mlx5_flow_destination *dest,
625 u16 etype, u8 proto, bool use_l4_type)
626 {
627 MLX5_DECLARE_FLOW_ACT(flow_act);
628 struct mlx5_flow_handle *rule;
629 struct mlx5_flow_spec *spec;
630 int err = 0;
631 u8 ipv;
632
633 spec = kvzalloc_obj(*spec);
634 if (!spec)
635 return ERR_PTR(-ENOMEM);
636
637 ipv = mlx5_etype_to_ipv(etype);
638 if (etype && ipv) {
639 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
640 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version);
641 MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv);
642 }
643
644 if (proto) {
645 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
646 mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
647 spec->match_criteria,
648 inner_headers),
649 MLX5_ADDR_OF(fte_match_param,
650 spec->match_value,
651 inner_headers),
652 proto, use_l4_type);
653 }
654
655 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
656 if (IS_ERR(rule)) {
657 err = PTR_ERR(rule);
658 mlx5_core_err(dev, "%s: add inner TTC rule failed\n", __func__);
659 }
660
661 kvfree(spec);
662 return err ? ERR_PTR(err) : rule;
663 }
664
mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev * dev,struct ttc_params * params,struct mlx5_ttc_table * ttc,bool use_l4_type)665 static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
666 struct ttc_params *params,
667 struct mlx5_ttc_table *ttc,
668 bool use_l4_type)
669 {
670 struct mlx5_ttc_rule *rules;
671 struct mlx5_flow_table *ft;
672 int err;
673 int tt;
674
675 ft = ttc->t;
676 rules = ttc->rules;
677
678 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
679 struct mlx5_ttc_rule *rule = &rules[tt];
680
681 if (mlx5_ttc_is_decrypted_esp_tt(tt))
682 continue;
683
684 if (test_bit(tt, params->ignore_dests))
685 continue;
686 rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
687 ¶ms->dests[tt],
688 ttc_rules[tt].etype,
689 ttc_rules[tt].proto,
690 use_l4_type);
691 if (IS_ERR(rule->rule)) {
692 err = PTR_ERR(rule->rule);
693 rule->rule = NULL;
694 goto del_rules;
695 }
696 rule->default_dest = params->dests[tt];
697 }
698
699 return 0;
700
701 del_rules:
702
703 mlx5_cleanup_ttc_rules(ttc);
704 return err;
705 }
706
mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table * ttc,const struct mlx5_fs_ttc_groups * groups)707 static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc,
708 const struct mlx5_fs_ttc_groups *groups)
709 {
710 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
711 int ix = 0;
712 u32 *in;
713 int err;
714 u8 *mc;
715
716 ttc->g = kzalloc_objs(*ttc->g, groups->num_groups);
717 if (!ttc->g)
718 return -ENOMEM;
719 in = kvzalloc(inlen, GFP_KERNEL);
720 if (!in) {
721 kfree(ttc->g);
722 ttc->g = NULL;
723 return -ENOMEM;
724 }
725
726 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
727 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
728 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
729
730 /* TCP UDP group */
731 if (groups->use_l4_type) {
732 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.l4_type);
733 MLX5_SET_CFG(in, start_flow_index, ix);
734 ix += groups->group_size[ttc->num_groups];
735 MLX5_SET_CFG(in, end_flow_index, ix - 1);
736 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
737 if (IS_ERR(ttc->g[ttc->num_groups]))
738 goto err;
739 ttc->num_groups++;
740
741 MLX5_SET(fte_match_param, mc, inner_headers.l4_type, 0);
742 }
743
744 /* L4 Group */
745 MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
746 MLX5_SET_CFG(in, start_flow_index, ix);
747 ix += groups->group_size[ttc->num_groups];
748 MLX5_SET_CFG(in, end_flow_index, ix - 1);
749 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
750 if (IS_ERR(ttc->g[ttc->num_groups]))
751 goto err;
752 ttc->num_groups++;
753
754 /* L3 Group */
755 MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
756 MLX5_SET_CFG(in, start_flow_index, ix);
757 ix += groups->group_size[ttc->num_groups];
758 MLX5_SET_CFG(in, end_flow_index, ix - 1);
759 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
760 if (IS_ERR(ttc->g[ttc->num_groups]))
761 goto err;
762 ttc->num_groups++;
763
764 /* Any Group */
765 memset(in, 0, inlen);
766 MLX5_SET_CFG(in, start_flow_index, ix);
767 ix += groups->group_size[ttc->num_groups];
768 MLX5_SET_CFG(in, end_flow_index, ix - 1);
769 ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
770 if (IS_ERR(ttc->g[ttc->num_groups]))
771 goto err;
772 ttc->num_groups++;
773
774 kvfree(in);
775 return 0;
776
777 err:
778 err = PTR_ERR(ttc->g[ttc->num_groups]);
779 ttc->g[ttc->num_groups] = NULL;
780 kvfree(in);
781
782 return err;
783 }
784
mlx5_create_inner_ttc_table(struct mlx5_core_dev * dev,struct ttc_params * params)785 struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
786 struct ttc_params *params)
787 {
788 const struct mlx5_fs_ttc_groups *groups;
789 struct mlx5_flow_namespace *ns;
790 struct mlx5_ttc_table *ttc;
791 bool use_l4_type;
792 int err;
793
794 switch (params->ns_type) {
795 case MLX5_FLOW_NAMESPACE_PORT_SEL:
796 use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
797 MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
798 break;
799 case MLX5_FLOW_NAMESPACE_KERNEL:
800 use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
801 MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
802 break;
803 default:
804 return ERR_PTR(-EINVAL);
805 }
806
807 ttc = kvzalloc_obj(*ttc);
808 if (!ttc)
809 return ERR_PTR(-ENOMEM);
810
811 ns = mlx5_get_flow_namespace(dev, params->ns_type);
812 if (!ns) {
813 kvfree(ttc);
814 return ERR_PTR(-EOPNOTSUPP);
815 }
816
817 groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
818 &inner_ttc_groups[TTC_GROUPS_DEFAULT];
819
820 WARN_ON_ONCE(params->ft_attr.max_fte);
821 params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
822 ttc->t = mlx5_create_flow_table(ns, ¶ms->ft_attr);
823 if (IS_ERR(ttc->t)) {
824 err = PTR_ERR(ttc->t);
825 kvfree(ttc);
826 return ERR_PTR(err);
827 }
828
829 err = mlx5_create_inner_ttc_table_groups(ttc, groups);
830 if (err)
831 goto destroy_ft;
832
833 err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc, use_l4_type);
834 if (err)
835 goto destroy_ft;
836
837 return ttc;
838
839 destroy_ft:
840 mlx5_destroy_ttc_table(ttc);
841 return ERR_PTR(err);
842 }
843
mlx5_destroy_ttc_table(struct mlx5_ttc_table * ttc)844 void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc)
845 {
846 int i;
847
848 mlx5_cleanup_ttc_rules(ttc);
849 for (i = ttc->num_groups - 1; i >= 0; i--) {
850 if (!IS_ERR_OR_NULL(ttc->g[i]))
851 mlx5_destroy_flow_group(ttc->g[i]);
852 ttc->g[i] = NULL;
853 }
854
855 kfree(ttc->g);
856 mlx5_destroy_flow_table(ttc->t);
857 mutex_destroy(&ttc->mutex);
858 kvfree(ttc);
859 }
860
mlx5_create_ttc_table(struct mlx5_core_dev * dev,struct ttc_params * params)861 struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
862 struct ttc_params *params)
863 {
864 bool match_ipv_outer =
865 MLX5_CAP_FLOWTABLE_NIC_RX(dev,
866 ft_field_support.outer_ip_version);
867 struct mlx5_flow_namespace *ns;
868 struct mlx5_ttc_table *ttc;
869 bool use_l4_type;
870 int err;
871
872 switch (params->ns_type) {
873 case MLX5_FLOW_NAMESPACE_PORT_SEL:
874 use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
875 MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
876 break;
877 case MLX5_FLOW_NAMESPACE_KERNEL:
878 use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
879 MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
880 break;
881 default:
882 return ERR_PTR(-EINVAL);
883 }
884
885 ttc = kvzalloc_obj(*ttc);
886 if (!ttc)
887 return ERR_PTR(-ENOMEM);
888
889 ns = mlx5_get_flow_namespace(dev, params->ns_type);
890 if (!ns) {
891 kvfree(ttc);
892 return ERR_PTR(-EOPNOTSUPP);
893 }
894
895 ttc->groups = mlx5_ttc_get_fs_groups(use_l4_type, params->ipsec_rss);
896
897 WARN_ON_ONCE(params->ft_attr.max_fte);
898 params->ft_attr.max_fte = mlx5_fs_ttc_table_size(ttc->groups);
899 ttc->t = mlx5_create_flow_table(ns, ¶ms->ft_attr);
900 if (IS_ERR(ttc->t)) {
901 err = PTR_ERR(ttc->t);
902 kvfree(ttc);
903 return ERR_PTR(err);
904 }
905
906 err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer);
907 if (err)
908 goto destroy_ft;
909
910 err = mlx5_generate_ttc_table_rules(dev, params, ttc, use_l4_type);
911 if (err)
912 goto destroy_ft;
913
914 ttc->mdev = dev;
915 mutex_init(&ttc->mutex);
916
917 return ttc;
918
919 destroy_ft:
920 mlx5_destroy_ttc_table(ttc);
921 return ERR_PTR(err);
922 }
923
mlx5_ttc_fwd_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type,struct mlx5_flow_destination * new_dest)924 int mlx5_ttc_fwd_dest(struct mlx5_ttc_table *ttc, enum mlx5_traffic_types type,
925 struct mlx5_flow_destination *new_dest)
926 {
927 return mlx5_modify_rule_destination(ttc->rules[type].rule, new_dest,
928 NULL);
929 }
930
931 struct mlx5_flow_destination
mlx5_ttc_get_default_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type)932 mlx5_ttc_get_default_dest(struct mlx5_ttc_table *ttc,
933 enum mlx5_traffic_types type)
934 {
935 struct mlx5_flow_destination *dest = &ttc->rules[type].default_dest;
936
937 WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR,
938 "TTC[%d] default dest is not setup yet", type);
939
940 return *dest;
941 }
942
mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type)943 int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc,
944 enum mlx5_traffic_types type)
945 {
946 struct mlx5_flow_destination dest = mlx5_ttc_get_default_dest(ttc, type);
947
948 return mlx5_ttc_fwd_dest(ttc, type, &dest);
949 }
950
_mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table * ttc)951 static void _mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table *ttc)
952 {
953 enum mlx5_traffic_types i;
954
955 for (i = MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP;
956 i <= MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP; i++) {
957 if (!ttc->rules[i].rule)
958 continue;
959
960 mlx5_del_flow_rules(ttc->rules[i].rule);
961 ttc->rules[i].rule = NULL;
962 }
963 }
964
mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table * ttc)965 void mlx5_ttc_destroy_ipsec_rules(struct mlx5_ttc_table *ttc)
966 {
967 if (!mlx5_ttc_has_esp_flow_group(ttc))
968 return;
969
970 mutex_lock(&ttc->mutex);
971 if (--ttc->refcnt)
972 goto unlock;
973
974 _mlx5_ttc_destroy_ipsec_rules(ttc);
975 unlock:
976 mutex_unlock(&ttc->mutex);
977 }
978
mlx5_ttc_get_tt_attrs(enum mlx5_traffic_types type,u16 * etype,int * l4_type_ext,enum mlx5_traffic_types * tir_tt)979 static int mlx5_ttc_get_tt_attrs(enum mlx5_traffic_types type,
980 u16 *etype, int *l4_type_ext,
981 enum mlx5_traffic_types *tir_tt)
982 {
983 switch (type) {
984 case MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP:
985 case MLX5_TT_DECRYPTED_ESP_INNER_IPV4_TCP:
986 *etype = ETH_P_IP;
987 *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_TCP;
988 *tir_tt = MLX5_TT_IPV4_TCP;
989 break;
990 case MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_TCP:
991 case MLX5_TT_DECRYPTED_ESP_INNER_IPV6_TCP:
992 *etype = ETH_P_IPV6;
993 *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_TCP;
994 *tir_tt = MLX5_TT_IPV6_TCP;
995 break;
996 case MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_UDP:
997 case MLX5_TT_DECRYPTED_ESP_INNER_IPV4_UDP:
998 *etype = ETH_P_IP;
999 *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_UDP;
1000 *tir_tt = MLX5_TT_IPV4_UDP;
1001 break;
1002 case MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_UDP:
1003 case MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP:
1004 *etype = ETH_P_IPV6;
1005 *l4_type_ext = MLX5_PACKET_L4_TYPE_EXT_UDP;
1006 *tir_tt = MLX5_TT_IPV6_UDP;
1007 break;
1008 default:
1009 return -EINVAL;
1010 }
1011
1012 return 0;
1013 }
1014
1015 static struct mlx5_flow_handle *
mlx5_ttc_create_ipsec_outer_rule(struct mlx5_ttc_table * ttc,enum mlx5_traffic_types type)1016 mlx5_ttc_create_ipsec_outer_rule(struct mlx5_ttc_table *ttc,
1017 enum mlx5_traffic_types type)
1018 {
1019 struct mlx5_flow_destination dest;
1020 MLX5_DECLARE_FLOW_ACT(flow_act);
1021 enum mlx5_traffic_types tir_tt;
1022 struct mlx5_flow_handle *rule;
1023 struct mlx5_flow_spec *spec;
1024 int l4_type_ext;
1025 u16 etype;
1026 int err;
1027
1028 err = mlx5_ttc_get_tt_attrs(type, &etype, &l4_type_ext, &tir_tt);
1029 if (err)
1030 return ERR_PTR(err);
1031
1032 spec = kvzalloc_obj(*spec);
1033 if (!spec)
1034 return ERR_PTR(-ENOMEM);
1035
1036 mlx5_fs_ttc_set_match_ipv_outer(ttc->mdev, spec, etype);
1037
1038 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1039 outer_headers.l4_type_ext);
1040 MLX5_SET(fte_match_param, spec->match_value,
1041 outer_headers.l4_type_ext, l4_type_ext);
1042
1043 dest = mlx5_ttc_get_default_dest(ttc, tir_tt);
1044
1045 rule = mlx5_add_flow_rules(ttc->t, spec, &flow_act, &dest, 1);
1046 if (IS_ERR(rule)) {
1047 err = PTR_ERR(rule);
1048 mlx5_core_err(ttc->mdev, "%s: add rule failed\n", __func__);
1049 }
1050
1051 kvfree(spec);
1052 return err ? ERR_PTR(err) : rule;
1053 }
1054
1055 static struct mlx5_flow_handle *
mlx5_ttc_create_ipsec_inner_rule(struct mlx5_ttc_table * ttc,struct mlx5_ttc_table * inner_ttc,enum mlx5_traffic_types type)1056 mlx5_ttc_create_ipsec_inner_rule(struct mlx5_ttc_table *ttc,
1057 struct mlx5_ttc_table *inner_ttc,
1058 enum mlx5_traffic_types type)
1059 {
1060 struct mlx5_flow_destination dest;
1061 MLX5_DECLARE_FLOW_ACT(flow_act);
1062 enum mlx5_traffic_types tir_tt;
1063 struct mlx5_flow_handle *rule;
1064 struct mlx5_flow_spec *spec;
1065 int l4_type_ext;
1066 u16 etype;
1067 int err;
1068
1069 err = mlx5_ttc_get_tt_attrs(type, &etype, &l4_type_ext, &tir_tt);
1070 if (err)
1071 return ERR_PTR(err);
1072
1073 spec = kvzalloc_obj(*spec);
1074 if (!spec)
1075 return ERR_PTR(-ENOMEM);
1076
1077 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1078 inner_headers.ip_version);
1079 MLX5_SET(fte_match_param, spec->match_value,
1080 inner_headers.ip_version, mlx5_etype_to_ipv(etype));
1081 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
1082 inner_headers.l4_type_ext);
1083 MLX5_SET(fte_match_param, spec->match_value,
1084 inner_headers.l4_type_ext, l4_type_ext);
1085
1086 dest = mlx5_ttc_get_default_dest(inner_ttc, tir_tt);
1087
1088 spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
1089
1090 rule = mlx5_add_flow_rules(ttc->t, spec, &flow_act, &dest, 1);
1091 if (IS_ERR(rule)) {
1092 err = PTR_ERR(rule);
1093 mlx5_core_err(ttc->mdev, "%s: add rule failed\n", __func__);
1094 }
1095
1096 kvfree(spec);
1097 return err ? ERR_PTR(err) : rule;
1098 }
1099
mlx5_ttc_create_ipsec_rules(struct mlx5_ttc_table * ttc,struct mlx5_ttc_table * inner_ttc)1100 int mlx5_ttc_create_ipsec_rules(struct mlx5_ttc_table *ttc,
1101 struct mlx5_ttc_table *inner_ttc)
1102 {
1103 struct mlx5_flow_handle *rule;
1104 enum mlx5_traffic_types i;
1105
1106 if (!mlx5_ttc_has_esp_flow_group(ttc))
1107 return 0;
1108
1109 mutex_lock(&ttc->mutex);
1110 if (ttc->refcnt)
1111 goto skip;
1112
1113 for (i = MLX5_TT_DECRYPTED_ESP_OUTER_IPV4_TCP;
1114 i <= MLX5_TT_DECRYPTED_ESP_OUTER_IPV6_UDP; i++) {
1115 rule = mlx5_ttc_create_ipsec_outer_rule(ttc, i);
1116 if (IS_ERR(rule))
1117 goto err_out;
1118
1119 ttc->rules[i].rule = rule;
1120 }
1121
1122 for (i = MLX5_TT_DECRYPTED_ESP_INNER_IPV4_TCP;
1123 i <= MLX5_TT_DECRYPTED_ESP_INNER_IPV6_UDP; i++) {
1124 rule = mlx5_ttc_create_ipsec_inner_rule(ttc, inner_ttc, i);
1125 if (IS_ERR(rule))
1126 goto err_out;
1127
1128 ttc->rules[i].rule = rule;
1129 }
1130
1131 skip:
1132 ttc->refcnt++;
1133 mutex_unlock(&ttc->mutex);
1134 return 0;
1135
1136 err_out:
1137 _mlx5_ttc_destroy_ipsec_rules(ttc);
1138 mutex_unlock(&ttc->mutex);
1139 return PTR_ERR(rule);
1140 }
1141