1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3
4 #include "fs_core.h"
5 #include "fs_cmd.h"
6 #include "en.h"
7 #include "lib/ipsec_fs_roce.h"
8 #include "mlx5_core.h"
9 #include <linux/random.h>
10
11 struct mlx5_ipsec_miss {
12 struct mlx5_flow_group *group;
13 struct mlx5_flow_handle *rule;
14 };
15
16 struct mlx5_ipsec_rx_roce {
17 struct mlx5_flow_group *g;
18 struct mlx5_flow_table *ft;
19 struct mlx5_flow_handle *rule;
20 struct mlx5_ipsec_miss roce_miss;
21 struct mlx5_flow_table *nic_master_ft;
22 struct mlx5_flow_group *nic_master_group;
23 struct mlx5_flow_handle *nic_master_rule;
24 struct mlx5_flow_table *goto_alias_ft;
25 u32 alias_id;
26 char key[ACCESS_KEY_LEN];
27
28 struct mlx5_flow_table *ft_rdma;
29 struct mlx5_flow_namespace *ns_rdma;
30 };
31
32 struct mlx5_ipsec_tx_roce {
33 struct mlx5_flow_group *g;
34 struct mlx5_flow_table *ft;
35 struct mlx5_flow_handle *rule;
36 struct mlx5_flow_table *goto_alias_ft;
37 u32 alias_id;
38 char key[ACCESS_KEY_LEN];
39 struct mlx5_flow_namespace *ns;
40 };
41
42 struct mlx5_ipsec_fs {
43 struct mlx5_ipsec_rx_roce ipv4_rx;
44 struct mlx5_ipsec_rx_roce ipv6_rx;
45 struct mlx5_ipsec_tx_roce tx;
46 struct mlx5_devcom_comp_dev **devcom;
47 };
48
ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec * spec,u16 dport)49 static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec,
50 u16 dport)
51 {
52 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
53 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
54 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
55 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
56 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport);
57 }
58
ipsec_fs_create_alias_supported_one(struct mlx5_core_dev * mdev)59 static bool ipsec_fs_create_alias_supported_one(struct mlx5_core_dev *mdev)
60 {
61 u64 obj_allowed = MLX5_CAP_GEN_2_64(mdev, allowed_object_for_other_vhca_access);
62 u32 obj_supp = MLX5_CAP_GEN_2(mdev, cross_vhca_object_to_object_supported);
63
64 if (!(obj_supp &
65 MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS))
66 return false;
67
68 if (!(obj_allowed & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE))
69 return false;
70
71 return true;
72 }
73
ipsec_fs_create_alias_supported(struct mlx5_core_dev * mdev,struct mlx5_core_dev * master_mdev)74 static bool ipsec_fs_create_alias_supported(struct mlx5_core_dev *mdev,
75 struct mlx5_core_dev *master_mdev)
76 {
77 if (ipsec_fs_create_alias_supported_one(mdev) &&
78 ipsec_fs_create_alias_supported_one(master_mdev))
79 return true;
80
81 return false;
82 }
83
ipsec_fs_create_aliased_ft(struct mlx5_core_dev * ibv_owner,struct mlx5_core_dev * ibv_allowed,struct mlx5_flow_table * ft,u32 * obj_id,char * alias_key,bool from_event)84 static int ipsec_fs_create_aliased_ft(struct mlx5_core_dev *ibv_owner,
85 struct mlx5_core_dev *ibv_allowed,
86 struct mlx5_flow_table *ft,
87 u32 *obj_id, char *alias_key, bool from_event)
88 {
89 u32 aliased_object_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id;
90 u16 vhca_id_to_be_accessed = MLX5_CAP_GEN(ibv_owner, vhca_id);
91 struct mlx5_cmd_allow_other_vhca_access_attr allow_attr = {};
92 struct mlx5_cmd_alias_obj_create_attr alias_attr = {};
93 int ret;
94 int i;
95
96 if (!ipsec_fs_create_alias_supported(ibv_owner, ibv_allowed))
97 return -EOPNOTSUPP;
98
99 for (i = 0; i < ACCESS_KEY_LEN; i++)
100 if (!from_event)
101 alias_key[i] = get_random_u64() & 0xFF;
102
103 memcpy(allow_attr.access_key, alias_key, ACCESS_KEY_LEN);
104 allow_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
105 allow_attr.obj_id = aliased_object_id;
106
107 if (!from_event) {
108 ret = mlx5_cmd_allow_other_vhca_access(ibv_owner, &allow_attr);
109 if (ret) {
110 mlx5_core_err(ibv_owner, "Failed to allow other vhca access err=%d\n",
111 ret);
112 return ret;
113 }
114 }
115
116 memcpy(alias_attr.access_key, alias_key, ACCESS_KEY_LEN);
117 alias_attr.obj_id = aliased_object_id;
118 alias_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
119 alias_attr.vhca_id = vhca_id_to_be_accessed;
120 ret = mlx5_cmd_alias_obj_create(ibv_allowed, &alias_attr, obj_id);
121 if (ret) {
122 mlx5_core_err(ibv_allowed, "Failed to create alias object err=%d\n",
123 ret);
124 return ret;
125 }
126
127 return 0;
128 }
129
130 static int
ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev * mdev,struct mlx5_flow_destination * default_dst,struct mlx5_ipsec_rx_roce * roce)131 ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
132 struct mlx5_flow_destination *default_dst,
133 struct mlx5_ipsec_rx_roce *roce)
134 {
135 bool is_mpv_slave = mlx5_core_is_mp_slave(mdev);
136 struct mlx5_flow_destination dst = {};
137 MLX5_DECLARE_FLOW_ACT(flow_act);
138 struct mlx5_flow_handle *rule;
139 struct mlx5_flow_spec *spec;
140 int err = 0;
141
142 spec = kvzalloc_obj(*spec);
143 if (!spec)
144 return -ENOMEM;
145
146 ipsec_fs_roce_setup_udp_dport(spec, ROCE_V2_UDP_DPORT);
147
148 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
149 if (is_mpv_slave) {
150 dst.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
151 dst.ft = roce->goto_alias_ft;
152 } else {
153 dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
154 dst.ft = roce->ft_rdma;
155 }
156 rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
157 if (IS_ERR(rule)) {
158 err = PTR_ERR(rule);
159 mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule err=%d\n",
160 err);
161 goto out;
162 }
163
164 roce->rule = rule;
165
166 memset(spec, 0, sizeof(*spec));
167 if (default_dst->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
168 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
169 rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, default_dst, 1);
170 if (IS_ERR(rule)) {
171 err = PTR_ERR(rule);
172 mlx5_core_err(mdev, "Fail to add RX RoCE IPsec miss rule err=%d\n",
173 err);
174 goto fail_add_default_rule;
175 }
176
177 roce->roce_miss.rule = rule;
178
179 if (!is_mpv_slave)
180 goto out;
181
182 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
183 if (default_dst->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
184 flow_act.flags &= ~FLOW_ACT_IGNORE_FLOW_LEVEL;
185 dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
186 dst.ft = roce->ft_rdma;
187 rule = mlx5_add_flow_rules(roce->nic_master_ft, NULL, &flow_act, &dst,
188 1);
189 if (IS_ERR(rule)) {
190 err = PTR_ERR(rule);
191 mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule for alias err=%d\n",
192 err);
193 goto fail_add_nic_master_rule;
194 }
195 roce->nic_master_rule = rule;
196
197 kvfree(spec);
198 return 0;
199
200 fail_add_nic_master_rule:
201 mlx5_del_flow_rules(roce->roce_miss.rule);
202 fail_add_default_rule:
203 mlx5_del_flow_rules(roce->rule);
204 out:
205 kvfree(spec);
206 return err;
207 }
208
ipsec_fs_roce_tx_rule_setup(struct mlx5_core_dev * mdev,struct mlx5_ipsec_tx_roce * roce,struct mlx5_flow_table * pol_ft)209 static int ipsec_fs_roce_tx_rule_setup(struct mlx5_core_dev *mdev,
210 struct mlx5_ipsec_tx_roce *roce,
211 struct mlx5_flow_table *pol_ft)
212 {
213 struct mlx5_flow_destination dst = {};
214 MLX5_DECLARE_FLOW_ACT(flow_act);
215 struct mlx5_flow_handle *rule;
216 int err = 0;
217
218 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
219 dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
220 dst.ft = pol_ft;
221 rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, &dst,
222 1);
223 if (IS_ERR(rule)) {
224 err = PTR_ERR(rule);
225 mlx5_core_err(mdev, "Fail to add TX RoCE IPsec rule err=%d\n",
226 err);
227 goto out;
228 }
229 roce->rule = rule;
230
231 out:
232 return err;
233 }
234
ipsec_fs_roce_tx_mpv_rule_setup(struct mlx5_core_dev * mdev,struct mlx5_ipsec_tx_roce * roce,struct mlx5_flow_table * pol_ft)235 static int ipsec_fs_roce_tx_mpv_rule_setup(struct mlx5_core_dev *mdev,
236 struct mlx5_ipsec_tx_roce *roce,
237 struct mlx5_flow_table *pol_ft)
238 {
239 struct mlx5_flow_destination dst = {};
240 MLX5_DECLARE_FLOW_ACT(flow_act);
241 struct mlx5_flow_handle *rule;
242 struct mlx5_flow_spec *spec;
243 int err = 0;
244
245 spec = kvzalloc_obj(*spec);
246 if (!spec)
247 return -ENOMEM;
248
249 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
250 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.source_vhca_port);
251 MLX5_SET(fte_match_param, spec->match_value, misc_parameters.source_vhca_port,
252 MLX5_CAP_GEN(mdev, native_port_num));
253
254 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
255 dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
256 dst.ft = roce->goto_alias_ft;
257 rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
258 if (IS_ERR(rule)) {
259 err = PTR_ERR(rule);
260 mlx5_core_err(mdev, "Fail to add TX RoCE IPsec rule err=%d\n",
261 err);
262 goto out;
263 }
264 roce->rule = rule;
265
266 /* No need for miss rule, since on miss we go to next PRIO, in which
267 * if master is configured, he will catch the traffic to go to his
268 * encryption table.
269 */
270
271 out:
272 kvfree(spec);
273 return err;
274 }
275
276 #define MLX5_TX_ROCE_GROUP_SIZE BIT(0)
277 #define MLX5_IPSEC_RDMA_TX_FT_LEVEL 0
278 #define MLX5_IPSEC_NIC_GOTO_ALIAS_FT_LEVEL 3 /* Since last used level in NIC ipsec is 2 */
279
ipsec_fs_roce_tx_mpv_create_ft(struct mlx5_core_dev * mdev,struct mlx5_ipsec_tx_roce * roce,struct mlx5_flow_table * pol_ft,struct mlx5e_priv * peer_priv,bool from_event)280 static int ipsec_fs_roce_tx_mpv_create_ft(struct mlx5_core_dev *mdev,
281 struct mlx5_ipsec_tx_roce *roce,
282 struct mlx5_flow_table *pol_ft,
283 struct mlx5e_priv *peer_priv,
284 bool from_event)
285 {
286 struct mlx5_flow_namespace *roce_ns, *nic_ns;
287 struct mlx5_flow_table_attr ft_attr = {};
288 struct mlx5_flow_table next_ft;
289 struct mlx5_flow_table *ft;
290 int err;
291
292 roce_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC);
293 if (!roce_ns)
294 return -EOPNOTSUPP;
295
296 nic_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
297 if (!nic_ns)
298 return -EOPNOTSUPP;
299
300 err = ipsec_fs_create_aliased_ft(mdev, peer_priv->mdev, pol_ft, &roce->alias_id, roce->key,
301 from_event);
302 if (err)
303 return err;
304
305 next_ft.id = roce->alias_id;
306 ft_attr.max_fte = 1;
307 ft_attr.next_ft = &next_ft;
308 ft_attr.level = MLX5_IPSEC_NIC_GOTO_ALIAS_FT_LEVEL;
309 ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
310 ft = mlx5_create_flow_table(nic_ns, &ft_attr);
311 if (IS_ERR(ft)) {
312 err = PTR_ERR(ft);
313 mlx5_core_err(mdev, "Fail to create RoCE IPsec goto alias ft err=%d\n", err);
314 goto destroy_alias;
315 }
316
317 roce->goto_alias_ft = ft;
318
319 memset(&ft_attr, 0, sizeof(ft_attr));
320 ft_attr.max_fte = 1;
321 ft_attr.level = MLX5_IPSEC_RDMA_TX_FT_LEVEL;
322 ft = mlx5_create_flow_table(roce_ns, &ft_attr);
323 if (IS_ERR(ft)) {
324 err = PTR_ERR(ft);
325 mlx5_core_err(mdev, "Fail to create RoCE IPsec tx ft err=%d\n", err);
326 goto destroy_alias_ft;
327 }
328
329 roce->ft = ft;
330
331 return 0;
332
333 destroy_alias_ft:
334 mlx5_destroy_flow_table(roce->goto_alias_ft);
335 destroy_alias:
336 mlx5_cmd_alias_obj_destroy(peer_priv->mdev, roce->alias_id,
337 MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
338 return err;
339 }
340
ipsec_fs_roce_tx_mpv_create_group_rules(struct mlx5_core_dev * mdev,struct mlx5_ipsec_tx_roce * roce,struct mlx5_flow_table * pol_ft,u32 * in)341 static int ipsec_fs_roce_tx_mpv_create_group_rules(struct mlx5_core_dev *mdev,
342 struct mlx5_ipsec_tx_roce *roce,
343 struct mlx5_flow_table *pol_ft,
344 u32 *in)
345 {
346 struct mlx5_flow_group *g;
347 int ix = 0;
348 int err;
349 u8 *mc;
350
351 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
352 MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters.source_vhca_port);
353 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS);
354
355 MLX5_SET_CFG(in, start_flow_index, ix);
356 ix += MLX5_TX_ROCE_GROUP_SIZE;
357 MLX5_SET_CFG(in, end_flow_index, ix - 1);
358 g = mlx5_create_flow_group(roce->ft, in);
359 if (IS_ERR(g)) {
360 err = PTR_ERR(g);
361 mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group err=%d\n", err);
362 return err;
363 }
364 roce->g = g;
365
366 err = ipsec_fs_roce_tx_mpv_rule_setup(mdev, roce, pol_ft);
367 if (err) {
368 mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err);
369 goto destroy_group;
370 }
371
372 return 0;
373
374 destroy_group:
375 mlx5_destroy_flow_group(roce->g);
376 return err;
377 }
378
ipsec_fs_roce_tx_mpv_create(struct mlx5_core_dev * mdev,struct mlx5_ipsec_fs * ipsec_roce,struct mlx5_flow_table * pol_ft,u32 * in,bool from_event)379 static int ipsec_fs_roce_tx_mpv_create(struct mlx5_core_dev *mdev,
380 struct mlx5_ipsec_fs *ipsec_roce,
381 struct mlx5_flow_table *pol_ft,
382 u32 *in, bool from_event)
383 {
384 struct mlx5_devcom_comp_dev *tmp = NULL;
385 struct mlx5_ipsec_tx_roce *roce;
386 struct mlx5e_priv *peer_priv;
387 int err;
388
389 if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom))
390 return -EOPNOTSUPP;
391
392 peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
393 if (!peer_priv || !peer_priv->ipsec) {
394 mlx5_core_err(mdev, "IPsec not supported on master device\n");
395 err = -EOPNOTSUPP;
396 goto release_peer;
397 }
398
399 roce = &ipsec_roce->tx;
400
401 err = ipsec_fs_roce_tx_mpv_create_ft(mdev, roce, pol_ft, peer_priv, from_event);
402 if (err) {
403 mlx5_core_err(mdev, "Fail to create RoCE IPsec tables err=%d\n", err);
404 goto release_peer;
405 }
406
407 err = ipsec_fs_roce_tx_mpv_create_group_rules(mdev, roce, pol_ft, in);
408 if (err) {
409 mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group/rule err=%d\n", err);
410 goto destroy_tables;
411 }
412
413 mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
414 return 0;
415
416 destroy_tables:
417 mlx5_destroy_flow_table(roce->ft);
418 mlx5_destroy_flow_table(roce->goto_alias_ft);
419 mlx5_cmd_alias_obj_destroy(peer_priv->mdev, roce->alias_id,
420 MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
421 release_peer:
422 mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
423 return err;
424 }
425
roce_rx_mpv_destroy_tables(struct mlx5_core_dev * mdev,struct mlx5_ipsec_rx_roce * roce)426 static void roce_rx_mpv_destroy_tables(struct mlx5_core_dev *mdev, struct mlx5_ipsec_rx_roce *roce)
427 {
428 mlx5_destroy_flow_table(roce->goto_alias_ft);
429 mlx5_cmd_alias_obj_destroy(mdev, roce->alias_id,
430 MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
431 mlx5_destroy_flow_group(roce->nic_master_group);
432 mlx5_destroy_flow_table(roce->nic_master_ft);
433 }
434
435 #define MLX5_RX_ROCE_GROUP_SIZE BIT(0)
436 #define MLX5_IPSEC_RX_IPV4_FT_LEVEL 3
437 #define MLX5_IPSEC_RX_IPV6_FT_LEVEL 2
438
ipsec_fs_roce_rx_mpv_create(struct mlx5_core_dev * mdev,struct mlx5_ipsec_fs * ipsec_roce,struct mlx5_flow_namespace * ns,u32 family,u32 level,u32 prio)439 static int ipsec_fs_roce_rx_mpv_create(struct mlx5_core_dev *mdev,
440 struct mlx5_ipsec_fs *ipsec_roce,
441 struct mlx5_flow_namespace *ns,
442 u32 family, u32 level, u32 prio)
443 {
444 struct mlx5_flow_namespace *roce_ns, *nic_ns;
445 struct mlx5_flow_table_attr ft_attr = {};
446 struct mlx5_devcom_comp_dev *tmp = NULL;
447 struct mlx5_ipsec_rx_roce *roce;
448 struct mlx5_flow_table next_ft;
449 struct mlx5_flow_table *ft;
450 struct mlx5_flow_group *g;
451 struct mlx5e_priv *peer_priv;
452 int ix = 0;
453 u32 *in;
454 int err;
455
456 roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
457 &ipsec_roce->ipv6_rx;
458
459 if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom))
460 return -EOPNOTSUPP;
461
462 peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
463 if (!peer_priv || !peer_priv->ipsec) {
464 mlx5_core_err(mdev, "IPsec not supported on master device\n");
465 err = -EOPNOTSUPP;
466 goto release_peer;
467 }
468
469 roce_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC);
470 if (!roce_ns) {
471 err = -EOPNOTSUPP;
472 goto release_peer;
473 }
474
475 nic_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
476 if (!nic_ns) {
477 err = -EOPNOTSUPP;
478 goto release_peer;
479 }
480
481 in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
482 if (!in) {
483 err = -ENOMEM;
484 goto release_peer;
485 }
486
487 ft_attr.level = (family == AF_INET) ? MLX5_IPSEC_RX_IPV4_FT_LEVEL :
488 MLX5_IPSEC_RX_IPV6_FT_LEVEL;
489 ft_attr.max_fte = 1;
490 ft = mlx5_create_flow_table(roce_ns, &ft_attr);
491 if (IS_ERR(ft)) {
492 err = PTR_ERR(ft);
493 mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at rdma master err=%d\n", err);
494 goto free_in;
495 }
496
497 roce->ft_rdma = ft;
498
499 ft_attr.max_fte = 1;
500 ft_attr.prio = prio;
501 ft_attr.level = level + 2;
502 ft = mlx5_create_flow_table(nic_ns, &ft_attr);
503 if (IS_ERR(ft)) {
504 err = PTR_ERR(ft);
505 mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at NIC master err=%d\n", err);
506 goto destroy_ft_rdma;
507 }
508 roce->nic_master_ft = ft;
509
510 MLX5_SET_CFG(in, start_flow_index, ix);
511 ix += 1;
512 MLX5_SET_CFG(in, end_flow_index, ix - 1);
513 g = mlx5_create_flow_group(roce->nic_master_ft, in);
514 if (IS_ERR(g)) {
515 err = PTR_ERR(g);
516 mlx5_core_err(mdev, "Fail to create RoCE IPsec rx group aliased err=%d\n", err);
517 goto destroy_nic_master_ft;
518 }
519 roce->nic_master_group = g;
520
521 err = ipsec_fs_create_aliased_ft(peer_priv->mdev, mdev, roce->nic_master_ft,
522 &roce->alias_id, roce->key, false);
523 if (err) {
524 mlx5_core_err(mdev, "Fail to create RoCE IPsec rx alias FT err=%d\n", err);
525 goto destroy_group;
526 }
527
528 next_ft.id = roce->alias_id;
529 ft_attr.max_fte = 1;
530 ft_attr.prio = prio;
531 ft_attr.level = roce->ft->level + 1;
532 ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
533 ft_attr.next_ft = &next_ft;
534 ft = mlx5_create_flow_table(ns, &ft_attr);
535 if (IS_ERR(ft)) {
536 err = PTR_ERR(ft);
537 mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at NIC slave err=%d\n", err);
538 goto destroy_alias;
539 }
540 roce->goto_alias_ft = ft;
541
542 kvfree(in);
543 mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
544 return 0;
545
546 destroy_alias:
547 mlx5_cmd_alias_obj_destroy(mdev, roce->alias_id,
548 MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
549 destroy_group:
550 mlx5_destroy_flow_group(roce->nic_master_group);
551 destroy_nic_master_ft:
552 mlx5_destroy_flow_table(roce->nic_master_ft);
553 destroy_ft_rdma:
554 mlx5_destroy_flow_table(roce->ft_rdma);
555 free_in:
556 kvfree(in);
557 release_peer:
558 mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
559 return err;
560 }
561
mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs * ipsec_roce,struct mlx5_core_dev * mdev)562 void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce,
563 struct mlx5_core_dev *mdev)
564 {
565 struct mlx5_devcom_comp_dev *tmp = NULL;
566 struct mlx5_ipsec_tx_roce *tx_roce;
567 struct mlx5e_priv *peer_priv;
568
569 if (!ipsec_roce)
570 return;
571
572 tx_roce = &ipsec_roce->tx;
573
574 if (!tx_roce->ft)
575 return; /* Incase RoCE was cleaned from MPV event flow */
576
577 mlx5_del_flow_rules(tx_roce->rule);
578 mlx5_destroy_flow_group(tx_roce->g);
579 mlx5_destroy_flow_table(tx_roce->ft);
580
581 if (!mlx5_core_is_mp_slave(mdev))
582 return;
583
584 if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom))
585 return;
586
587 peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
588 if (!peer_priv) {
589 mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
590 return;
591 }
592
593 mlx5_destroy_flow_table(tx_roce->goto_alias_ft);
594 mlx5_cmd_alias_obj_destroy(peer_priv->mdev, tx_roce->alias_id,
595 MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
596 mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
597 tx_roce->ft = NULL;
598 }
599
mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev * mdev,struct mlx5_ipsec_fs * ipsec_roce,struct mlx5_flow_table * pol_ft,bool from_event)600 int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
601 struct mlx5_ipsec_fs *ipsec_roce,
602 struct mlx5_flow_table *pol_ft,
603 bool from_event)
604 {
605 struct mlx5_flow_table_attr ft_attr = {};
606 struct mlx5_ipsec_tx_roce *roce;
607 struct mlx5_flow_table *ft;
608 struct mlx5_flow_group *g;
609 int ix = 0;
610 int err;
611 u32 *in;
612
613 if (!ipsec_roce)
614 return 0;
615
616 roce = &ipsec_roce->tx;
617
618 in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
619 if (!in)
620 return -ENOMEM;
621
622 if (mlx5_core_is_mp_slave(mdev)) {
623 err = ipsec_fs_roce_tx_mpv_create(mdev, ipsec_roce, pol_ft, in, from_event);
624 goto free_in;
625 }
626
627 ft_attr.max_fte = 1;
628 ft_attr.prio = 1;
629 ft_attr.level = MLX5_IPSEC_RDMA_TX_FT_LEVEL;
630 ft = mlx5_create_flow_table(roce->ns, &ft_attr);
631 if (IS_ERR(ft)) {
632 err = PTR_ERR(ft);
633 mlx5_core_err(mdev, "Fail to create RoCE IPsec tx ft err=%d\n", err);
634 goto free_in;
635 }
636
637 roce->ft = ft;
638
639 MLX5_SET_CFG(in, start_flow_index, ix);
640 ix += MLX5_TX_ROCE_GROUP_SIZE;
641 MLX5_SET_CFG(in, end_flow_index, ix - 1);
642 g = mlx5_create_flow_group(ft, in);
643 if (IS_ERR(g)) {
644 err = PTR_ERR(g);
645 mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group err=%d\n", err);
646 goto destroy_table;
647 }
648 roce->g = g;
649
650 err = ipsec_fs_roce_tx_rule_setup(mdev, roce, pol_ft);
651 if (err) {
652 mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err);
653 goto destroy_group;
654 }
655
656 kvfree(in);
657 return 0;
658
659 destroy_group:
660 mlx5_destroy_flow_group(roce->g);
661 destroy_table:
662 mlx5_destroy_flow_table(ft);
663 free_in:
664 kvfree(in);
665 return err;
666 }
667
mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs * ipsec_roce,u32 family)668 struct mlx5_flow_table *mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family)
669 {
670 struct mlx5_ipsec_rx_roce *rx_roce;
671
672 if (!ipsec_roce)
673 return NULL;
674
675 rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
676 &ipsec_roce->ipv6_rx;
677
678 return rx_roce->ft;
679 }
680
mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs * ipsec_roce,u32 family,struct mlx5_core_dev * mdev)681 void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family,
682 struct mlx5_core_dev *mdev)
683 {
684 bool is_mpv_slave = mlx5_core_is_mp_slave(mdev);
685 struct mlx5_ipsec_rx_roce *rx_roce;
686
687 if (!ipsec_roce)
688 return;
689
690 rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
691 &ipsec_roce->ipv6_rx;
692 if (!rx_roce->ft)
693 return; /* Incase RoCE was cleaned from MPV event flow */
694
695 if (is_mpv_slave)
696 mlx5_del_flow_rules(rx_roce->nic_master_rule);
697 mlx5_del_flow_rules(rx_roce->roce_miss.rule);
698 mlx5_del_flow_rules(rx_roce->rule);
699 if (is_mpv_slave)
700 roce_rx_mpv_destroy_tables(mdev, rx_roce);
701 mlx5_destroy_flow_table(rx_roce->ft_rdma);
702 mlx5_destroy_flow_group(rx_roce->roce_miss.group);
703 mlx5_destroy_flow_group(rx_roce->g);
704 mlx5_destroy_flow_table(rx_roce->ft);
705 rx_roce->ft = NULL;
706 }
707
mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev * mdev,struct mlx5_ipsec_fs * ipsec_roce,struct mlx5_flow_namespace * ns,struct mlx5_flow_destination * default_dst,u32 family,u32 level,u32 prio)708 int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
709 struct mlx5_ipsec_fs *ipsec_roce,
710 struct mlx5_flow_namespace *ns,
711 struct mlx5_flow_destination *default_dst,
712 u32 family, u32 level, u32 prio)
713 {
714 bool is_mpv_slave = mlx5_core_is_mp_slave(mdev);
715 struct mlx5_flow_table_attr ft_attr = {};
716 struct mlx5_ipsec_rx_roce *roce;
717 struct mlx5_flow_table *ft;
718 struct mlx5_flow_group *g;
719 void *outer_headers_c;
720 int ix = 0;
721 u32 *in;
722 int err;
723 u8 *mc;
724
725 if (!ipsec_roce)
726 return 0;
727
728 roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
729 &ipsec_roce->ipv6_rx;
730
731 ft_attr.max_fte = 2;
732 ft_attr.level = level;
733 ft_attr.prio = prio;
734 ft = mlx5_create_flow_table(ns, &ft_attr);
735 if (IS_ERR(ft)) {
736 err = PTR_ERR(ft);
737 mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at nic err=%d\n", err);
738 return err;
739 }
740
741 roce->ft = ft;
742
743 in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
744 if (!in) {
745 err = -ENOMEM;
746 goto fail_nomem;
747 }
748
749 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
750 outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
751 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
752 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
753
754 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
755 MLX5_SET_CFG(in, start_flow_index, ix);
756 ix += MLX5_RX_ROCE_GROUP_SIZE;
757 MLX5_SET_CFG(in, end_flow_index, ix - 1);
758 g = mlx5_create_flow_group(ft, in);
759 if (IS_ERR(g)) {
760 err = PTR_ERR(g);
761 mlx5_core_err(mdev, "Fail to create RoCE IPsec rx group at nic err=%d\n", err);
762 goto fail_group;
763 }
764 roce->g = g;
765
766 memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in));
767 MLX5_SET_CFG(in, start_flow_index, ix);
768 ix += MLX5_RX_ROCE_GROUP_SIZE;
769 MLX5_SET_CFG(in, end_flow_index, ix - 1);
770 g = mlx5_create_flow_group(ft, in);
771 if (IS_ERR(g)) {
772 err = PTR_ERR(g);
773 mlx5_core_err(mdev, "Fail to create RoCE IPsec rx miss group at nic err=%d\n", err);
774 goto fail_mgroup;
775 }
776 roce->roce_miss.group = g;
777
778 if (is_mpv_slave) {
779 err = ipsec_fs_roce_rx_mpv_create(mdev, ipsec_roce, ns, family, level, prio);
780 if (err) {
781 mlx5_core_err(mdev, "Fail to create RoCE IPsec rx alias err=%d\n", err);
782 goto fail_mpv_create;
783 }
784 } else {
785 memset(&ft_attr, 0, sizeof(ft_attr));
786 if (family == AF_INET)
787 ft_attr.level = 1;
788 ft_attr.max_fte = 1;
789 ft = mlx5_create_flow_table(roce->ns_rdma, &ft_attr);
790 if (IS_ERR(ft)) {
791 err = PTR_ERR(ft);
792 mlx5_core_err(mdev,
793 "Fail to create RoCE IPsec rx ft at rdma err=%d\n", err);
794 goto fail_rdma_table;
795 }
796
797 roce->ft_rdma = ft;
798 }
799
800 err = ipsec_fs_roce_rx_rule_setup(mdev, default_dst, roce);
801 if (err) {
802 mlx5_core_err(mdev, "Fail to create RoCE IPsec rx rules err=%d\n", err);
803 goto fail_setup_rule;
804 }
805
806 kvfree(in);
807 return 0;
808
809 fail_setup_rule:
810 if (is_mpv_slave)
811 roce_rx_mpv_destroy_tables(mdev, roce);
812 mlx5_destroy_flow_table(roce->ft_rdma);
813 fail_mpv_create:
814 fail_rdma_table:
815 mlx5_destroy_flow_group(roce->roce_miss.group);
816 fail_mgroup:
817 mlx5_destroy_flow_group(roce->g);
818 fail_group:
819 kvfree(in);
820 fail_nomem:
821 mlx5_destroy_flow_table(roce->ft);
822 return err;
823 }
824
mlx5_ipsec_fs_is_mpv_roce_supported(struct mlx5_core_dev * mdev)825 bool mlx5_ipsec_fs_is_mpv_roce_supported(struct mlx5_core_dev *mdev)
826 {
827 if (!mlx5_core_mp_enabled(mdev))
828 return true;
829
830 if (ipsec_fs_create_alias_supported_one(mdev))
831 return true;
832
833 return false;
834 }
835
mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs * ipsec_roce)836 void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce)
837 {
838 kfree(ipsec_roce);
839 }
840
mlx5_ipsec_fs_roce_init(struct mlx5_core_dev * mdev,struct mlx5_devcom_comp_dev ** devcom)841 struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev,
842 struct mlx5_devcom_comp_dev **devcom)
843 {
844 struct mlx5_ipsec_fs *roce_ipsec;
845 struct mlx5_flow_namespace *ns;
846
847 ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC);
848 if (!ns) {
849 mlx5_core_err(mdev, "Failed to get RoCE rx ns\n");
850 return NULL;
851 }
852
853 roce_ipsec = kzalloc_obj(*roce_ipsec);
854 if (!roce_ipsec)
855 return NULL;
856
857 roce_ipsec->ipv4_rx.ns_rdma = ns;
858 roce_ipsec->ipv6_rx.ns_rdma = ns;
859
860 ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC);
861 if (!ns) {
862 mlx5_core_err(mdev, "Failed to get RoCE tx ns\n");
863 goto err_tx;
864 }
865
866 roce_ipsec->tx.ns = ns;
867
868 roce_ipsec->devcom = devcom;
869
870 return roce_ipsec;
871
872 err_tx:
873 kfree(roce_ipsec);
874 return NULL;
875 }
876