xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c (revision aa0743a229366e8c1963f1b72a1c974a9d15f08f)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
3 
4 #include "fs_core.h"
5 #include "fs_cmd.h"
6 #include "en.h"
7 #include "lib/ipsec_fs_roce.h"
8 #include "mlx5_core.h"
9 #include <linux/random.h>
10 
11 struct mlx5_ipsec_miss {
12 	struct mlx5_flow_group *group;
13 	struct mlx5_flow_handle *rule;
14 };
15 
16 struct mlx5_ipsec_rx_roce {
17 	struct mlx5_flow_group *g;
18 	struct mlx5_flow_table *ft;
19 	struct mlx5_flow_handle *rule;
20 	struct mlx5_ipsec_miss roce_miss;
21 	struct mlx5_flow_table *nic_master_ft;
22 	struct mlx5_flow_group *nic_master_group;
23 	struct mlx5_flow_handle *nic_master_rule;
24 	struct mlx5_flow_table *goto_alias_ft;
25 	u32 alias_id;
26 	char key[ACCESS_KEY_LEN];
27 
28 	struct mlx5_flow_table *ft_rdma;
29 	struct mlx5_flow_namespace *ns_rdma;
30 };
31 
32 struct mlx5_ipsec_tx_roce {
33 	struct mlx5_flow_group *g;
34 	struct mlx5_flow_table *ft;
35 	struct mlx5_flow_handle *rule;
36 	struct mlx5_flow_table *goto_alias_ft;
37 	u32 alias_id;
38 	char key[ACCESS_KEY_LEN];
39 	struct mlx5_flow_namespace *ns;
40 };
41 
42 struct mlx5_ipsec_fs {
43 	struct mlx5_ipsec_rx_roce ipv4_rx;
44 	struct mlx5_ipsec_rx_roce ipv6_rx;
45 	struct mlx5_ipsec_tx_roce tx;
46 	struct mlx5_devcom_comp_dev **devcom;
47 };
48 
ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec * spec,u16 dport)49 static void ipsec_fs_roce_setup_udp_dport(struct mlx5_flow_spec *spec,
50 					  u16 dport)
51 {
52 	spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
53 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
54 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
55 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
56 	MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport);
57 }
58 
ipsec_fs_create_alias_supported_one(struct mlx5_core_dev * mdev)59 static bool ipsec_fs_create_alias_supported_one(struct mlx5_core_dev *mdev)
60 {
61 	u64 obj_allowed = MLX5_CAP_GEN_2_64(mdev, allowed_object_for_other_vhca_access);
62 	u32 obj_supp = MLX5_CAP_GEN_2(mdev, cross_vhca_object_to_object_supported);
63 
64 	if (!(obj_supp &
65 	    MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS))
66 		return false;
67 
68 	if (!(obj_allowed & MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE))
69 		return false;
70 
71 	return true;
72 }
73 
ipsec_fs_create_alias_supported(struct mlx5_core_dev * mdev,struct mlx5_core_dev * master_mdev)74 static bool ipsec_fs_create_alias_supported(struct mlx5_core_dev *mdev,
75 					    struct mlx5_core_dev *master_mdev)
76 {
77 	if (ipsec_fs_create_alias_supported_one(mdev) &&
78 	    ipsec_fs_create_alias_supported_one(master_mdev))
79 		return true;
80 
81 	return false;
82 }
83 
ipsec_fs_create_aliased_ft(struct mlx5_core_dev * ibv_owner,struct mlx5_core_dev * ibv_allowed,struct mlx5_flow_table * ft,u32 * obj_id,char * alias_key,bool from_event)84 static int ipsec_fs_create_aliased_ft(struct mlx5_core_dev *ibv_owner,
85 				      struct mlx5_core_dev *ibv_allowed,
86 				      struct mlx5_flow_table *ft,
87 				      u32 *obj_id, char *alias_key, bool from_event)
88 {
89 	u32 aliased_object_id = (ft->type << FT_ID_FT_TYPE_OFFSET) | ft->id;
90 	u16 vhca_id_to_be_accessed = MLX5_CAP_GEN(ibv_owner, vhca_id);
91 	struct mlx5_cmd_allow_other_vhca_access_attr allow_attr = {};
92 	struct mlx5_cmd_alias_obj_create_attr alias_attr = {};
93 	int ret;
94 	int i;
95 
96 	if (!ipsec_fs_create_alias_supported(ibv_owner, ibv_allowed))
97 		return -EOPNOTSUPP;
98 
99 	for (i = 0; i < ACCESS_KEY_LEN; i++)
100 		if (!from_event)
101 			alias_key[i] = get_random_u64() & 0xFF;
102 
103 	memcpy(allow_attr.access_key, alias_key, ACCESS_KEY_LEN);
104 	allow_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
105 	allow_attr.obj_id = aliased_object_id;
106 
107 	if (!from_event) {
108 		ret = mlx5_cmd_allow_other_vhca_access(ibv_owner, &allow_attr);
109 		if (ret) {
110 			mlx5_core_err(ibv_owner, "Failed to allow other vhca access err=%d\n",
111 				      ret);
112 			return ret;
113 		}
114 	}
115 
116 	memcpy(alias_attr.access_key, alias_key, ACCESS_KEY_LEN);
117 	alias_attr.obj_id = aliased_object_id;
118 	alias_attr.obj_type = MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS;
119 	alias_attr.vhca_id = vhca_id_to_be_accessed;
120 	ret = mlx5_cmd_alias_obj_create(ibv_allowed, &alias_attr, obj_id);
121 	if (ret) {
122 		mlx5_core_err(ibv_allowed, "Failed to create alias object err=%d\n",
123 			      ret);
124 		return ret;
125 	}
126 
127 	return 0;
128 }
129 
130 static int
ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev * mdev,struct mlx5_flow_destination * default_dst,struct mlx5_ipsec_rx_roce * roce)131 ipsec_fs_roce_rx_rule_setup(struct mlx5_core_dev *mdev,
132 			    struct mlx5_flow_destination *default_dst,
133 			    struct mlx5_ipsec_rx_roce *roce)
134 {
135 	bool is_mpv_slave = mlx5_core_is_mp_slave(mdev);
136 	struct mlx5_flow_destination dst = {};
137 	MLX5_DECLARE_FLOW_ACT(flow_act);
138 	struct mlx5_flow_handle *rule;
139 	struct mlx5_flow_spec *spec;
140 	int err = 0;
141 
142 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
143 	if (!spec)
144 		return -ENOMEM;
145 
146 	ipsec_fs_roce_setup_udp_dport(spec, ROCE_V2_UDP_DPORT);
147 
148 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
149 	if (is_mpv_slave) {
150 		dst.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
151 		dst.ft = roce->goto_alias_ft;
152 	} else {
153 		dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
154 		dst.ft = roce->ft_rdma;
155 	}
156 	rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
157 	if (IS_ERR(rule)) {
158 		err = PTR_ERR(rule);
159 		mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule err=%d\n",
160 			      err);
161 		goto out;
162 	}
163 
164 	roce->rule = rule;
165 
166 	memset(spec, 0, sizeof(*spec));
167 	rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, default_dst, 1);
168 	if (IS_ERR(rule)) {
169 		err = PTR_ERR(rule);
170 		mlx5_core_err(mdev, "Fail to add RX RoCE IPsec miss rule err=%d\n",
171 			      err);
172 		goto fail_add_default_rule;
173 	}
174 
175 	roce->roce_miss.rule = rule;
176 
177 	if (!is_mpv_slave)
178 		goto out;
179 
180 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
181 	dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
182 	dst.ft = roce->ft_rdma;
183 	rule = mlx5_add_flow_rules(roce->nic_master_ft, NULL, &flow_act, &dst,
184 				   1);
185 	if (IS_ERR(rule)) {
186 		err = PTR_ERR(rule);
187 		mlx5_core_err(mdev, "Fail to add RX RoCE IPsec rule for alias err=%d\n",
188 			      err);
189 		goto fail_add_nic_master_rule;
190 	}
191 	roce->nic_master_rule = rule;
192 
193 	kvfree(spec);
194 	return 0;
195 
196 fail_add_nic_master_rule:
197 	mlx5_del_flow_rules(roce->roce_miss.rule);
198 fail_add_default_rule:
199 	mlx5_del_flow_rules(roce->rule);
200 out:
201 	kvfree(spec);
202 	return err;
203 }
204 
ipsec_fs_roce_tx_rule_setup(struct mlx5_core_dev * mdev,struct mlx5_ipsec_tx_roce * roce,struct mlx5_flow_table * pol_ft)205 static int ipsec_fs_roce_tx_rule_setup(struct mlx5_core_dev *mdev,
206 				       struct mlx5_ipsec_tx_roce *roce,
207 				       struct mlx5_flow_table *pol_ft)
208 {
209 	struct mlx5_flow_destination dst = {};
210 	MLX5_DECLARE_FLOW_ACT(flow_act);
211 	struct mlx5_flow_handle *rule;
212 	int err = 0;
213 
214 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
215 	dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
216 	dst.ft = pol_ft;
217 	rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, &dst,
218 				   1);
219 	if (IS_ERR(rule)) {
220 		err = PTR_ERR(rule);
221 		mlx5_core_err(mdev, "Fail to add TX RoCE IPsec rule err=%d\n",
222 			      err);
223 		goto out;
224 	}
225 	roce->rule = rule;
226 
227 out:
228 	return err;
229 }
230 
ipsec_fs_roce_tx_mpv_rule_setup(struct mlx5_core_dev * mdev,struct mlx5_ipsec_tx_roce * roce,struct mlx5_flow_table * pol_ft)231 static int ipsec_fs_roce_tx_mpv_rule_setup(struct mlx5_core_dev *mdev,
232 					   struct mlx5_ipsec_tx_roce *roce,
233 					   struct mlx5_flow_table *pol_ft)
234 {
235 	struct mlx5_flow_destination dst = {};
236 	MLX5_DECLARE_FLOW_ACT(flow_act);
237 	struct mlx5_flow_handle *rule;
238 	struct mlx5_flow_spec *spec;
239 	int err = 0;
240 
241 	spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
242 	if (!spec)
243 		return -ENOMEM;
244 
245 	spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
246 	MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.source_vhca_port);
247 	MLX5_SET(fte_match_param, spec->match_value, misc_parameters.source_vhca_port,
248 		 MLX5_CAP_GEN(mdev, native_port_num));
249 
250 	flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
251 	dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
252 	dst.ft = roce->goto_alias_ft;
253 	rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
254 	if (IS_ERR(rule)) {
255 		err = PTR_ERR(rule);
256 		mlx5_core_err(mdev, "Fail to add TX RoCE IPsec rule err=%d\n",
257 			      err);
258 		goto out;
259 	}
260 	roce->rule = rule;
261 
262 	/* No need for miss rule, since on miss we go to next PRIO, in which
263 	 * if master is configured, he will catch the traffic to go to his
264 	 * encryption table.
265 	 */
266 
267 out:
268 	kvfree(spec);
269 	return err;
270 }
271 
272 #define MLX5_TX_ROCE_GROUP_SIZE BIT(0)
273 #define MLX5_IPSEC_RDMA_TX_FT_LEVEL 0
274 #define MLX5_IPSEC_NIC_GOTO_ALIAS_FT_LEVEL 3 /* Since last used level in NIC ipsec is 2 */
275 
ipsec_fs_roce_tx_mpv_create_ft(struct mlx5_core_dev * mdev,struct mlx5_ipsec_tx_roce * roce,struct mlx5_flow_table * pol_ft,struct mlx5e_priv * peer_priv,bool from_event)276 static int ipsec_fs_roce_tx_mpv_create_ft(struct mlx5_core_dev *mdev,
277 					  struct mlx5_ipsec_tx_roce *roce,
278 					  struct mlx5_flow_table *pol_ft,
279 					  struct mlx5e_priv *peer_priv,
280 					  bool from_event)
281 {
282 	struct mlx5_flow_namespace *roce_ns, *nic_ns;
283 	struct mlx5_flow_table_attr ft_attr = {};
284 	struct mlx5_flow_table next_ft;
285 	struct mlx5_flow_table *ft;
286 	int err;
287 
288 	roce_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC);
289 	if (!roce_ns)
290 		return -EOPNOTSUPP;
291 
292 	nic_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
293 	if (!nic_ns)
294 		return -EOPNOTSUPP;
295 
296 	err = ipsec_fs_create_aliased_ft(mdev, peer_priv->mdev, pol_ft, &roce->alias_id, roce->key,
297 					 from_event);
298 	if (err)
299 		return err;
300 
301 	next_ft.id = roce->alias_id;
302 	ft_attr.max_fte = 1;
303 	ft_attr.next_ft = &next_ft;
304 	ft_attr.level = MLX5_IPSEC_NIC_GOTO_ALIAS_FT_LEVEL;
305 	ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
306 	ft = mlx5_create_flow_table(nic_ns, &ft_attr);
307 	if (IS_ERR(ft)) {
308 		err = PTR_ERR(ft);
309 		mlx5_core_err(mdev, "Fail to create RoCE IPsec goto alias ft err=%d\n", err);
310 		goto destroy_alias;
311 	}
312 
313 	roce->goto_alias_ft = ft;
314 
315 	memset(&ft_attr, 0, sizeof(ft_attr));
316 	ft_attr.max_fte = 1;
317 	ft_attr.level = MLX5_IPSEC_RDMA_TX_FT_LEVEL;
318 	ft = mlx5_create_flow_table(roce_ns, &ft_attr);
319 	if (IS_ERR(ft)) {
320 		err = PTR_ERR(ft);
321 		mlx5_core_err(mdev, "Fail to create RoCE IPsec tx ft err=%d\n", err);
322 		goto destroy_alias_ft;
323 	}
324 
325 	roce->ft = ft;
326 
327 	return 0;
328 
329 destroy_alias_ft:
330 	mlx5_destroy_flow_table(roce->goto_alias_ft);
331 destroy_alias:
332 	mlx5_cmd_alias_obj_destroy(peer_priv->mdev, roce->alias_id,
333 				   MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
334 	return err;
335 }
336 
ipsec_fs_roce_tx_mpv_create_group_rules(struct mlx5_core_dev * mdev,struct mlx5_ipsec_tx_roce * roce,struct mlx5_flow_table * pol_ft,u32 * in)337 static int ipsec_fs_roce_tx_mpv_create_group_rules(struct mlx5_core_dev *mdev,
338 						   struct mlx5_ipsec_tx_roce *roce,
339 						   struct mlx5_flow_table *pol_ft,
340 						   u32 *in)
341 {
342 	struct mlx5_flow_group *g;
343 	int ix = 0;
344 	int err;
345 	u8 *mc;
346 
347 	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
348 	MLX5_SET_TO_ONES(fte_match_param, mc, misc_parameters.source_vhca_port);
349 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS);
350 
351 	MLX5_SET_CFG(in, start_flow_index, ix);
352 	ix += MLX5_TX_ROCE_GROUP_SIZE;
353 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
354 	g = mlx5_create_flow_group(roce->ft, in);
355 	if (IS_ERR(g)) {
356 		err = PTR_ERR(g);
357 		mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group err=%d\n", err);
358 		return err;
359 	}
360 	roce->g = g;
361 
362 	err = ipsec_fs_roce_tx_mpv_rule_setup(mdev, roce, pol_ft);
363 	if (err) {
364 		mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err);
365 		goto destroy_group;
366 	}
367 
368 	return 0;
369 
370 destroy_group:
371 	mlx5_destroy_flow_group(roce->g);
372 	return err;
373 }
374 
ipsec_fs_roce_tx_mpv_create(struct mlx5_core_dev * mdev,struct mlx5_ipsec_fs * ipsec_roce,struct mlx5_flow_table * pol_ft,u32 * in,bool from_event)375 static int ipsec_fs_roce_tx_mpv_create(struct mlx5_core_dev *mdev,
376 				       struct mlx5_ipsec_fs *ipsec_roce,
377 				       struct mlx5_flow_table *pol_ft,
378 				       u32 *in, bool from_event)
379 {
380 	struct mlx5_devcom_comp_dev *tmp = NULL;
381 	struct mlx5_ipsec_tx_roce *roce;
382 	struct mlx5e_priv *peer_priv;
383 	int err;
384 
385 	if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom))
386 		return -EOPNOTSUPP;
387 
388 	peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
389 	if (!peer_priv || !peer_priv->ipsec) {
390 		mlx5_core_err(mdev, "IPsec not supported on master device\n");
391 		err = -EOPNOTSUPP;
392 		goto release_peer;
393 	}
394 
395 	roce = &ipsec_roce->tx;
396 
397 	err = ipsec_fs_roce_tx_mpv_create_ft(mdev, roce, pol_ft, peer_priv, from_event);
398 	if (err) {
399 		mlx5_core_err(mdev, "Fail to create RoCE IPsec tables err=%d\n", err);
400 		goto release_peer;
401 	}
402 
403 	err = ipsec_fs_roce_tx_mpv_create_group_rules(mdev, roce, pol_ft, in);
404 	if (err) {
405 		mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group/rule err=%d\n", err);
406 		goto destroy_tables;
407 	}
408 
409 	mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
410 	return 0;
411 
412 destroy_tables:
413 	mlx5_destroy_flow_table(roce->ft);
414 	mlx5_destroy_flow_table(roce->goto_alias_ft);
415 	mlx5_cmd_alias_obj_destroy(peer_priv->mdev, roce->alias_id,
416 				   MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
417 release_peer:
418 	mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
419 	return err;
420 }
421 
roce_rx_mpv_destroy_tables(struct mlx5_core_dev * mdev,struct mlx5_ipsec_rx_roce * roce)422 static void roce_rx_mpv_destroy_tables(struct mlx5_core_dev *mdev, struct mlx5_ipsec_rx_roce *roce)
423 {
424 	mlx5_destroy_flow_table(roce->goto_alias_ft);
425 	mlx5_cmd_alias_obj_destroy(mdev, roce->alias_id,
426 				   MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
427 	mlx5_destroy_flow_group(roce->nic_master_group);
428 	mlx5_destroy_flow_table(roce->nic_master_ft);
429 }
430 
431 #define MLX5_RX_ROCE_GROUP_SIZE BIT(0)
432 #define MLX5_IPSEC_RX_IPV4_FT_LEVEL 3
433 #define MLX5_IPSEC_RX_IPV6_FT_LEVEL 2
434 
ipsec_fs_roce_rx_mpv_create(struct mlx5_core_dev * mdev,struct mlx5_ipsec_fs * ipsec_roce,struct mlx5_flow_namespace * ns,u32 family,u32 level,u32 prio)435 static int ipsec_fs_roce_rx_mpv_create(struct mlx5_core_dev *mdev,
436 				       struct mlx5_ipsec_fs *ipsec_roce,
437 				       struct mlx5_flow_namespace *ns,
438 				       u32 family, u32 level, u32 prio)
439 {
440 	struct mlx5_flow_namespace *roce_ns, *nic_ns;
441 	struct mlx5_flow_table_attr ft_attr = {};
442 	struct mlx5_devcom_comp_dev *tmp = NULL;
443 	struct mlx5_ipsec_rx_roce *roce;
444 	struct mlx5_flow_table next_ft;
445 	struct mlx5_flow_table *ft;
446 	struct mlx5_flow_group *g;
447 	struct mlx5e_priv *peer_priv;
448 	int ix = 0;
449 	u32 *in;
450 	int err;
451 
452 	roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
453 				     &ipsec_roce->ipv6_rx;
454 
455 	if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom))
456 		return -EOPNOTSUPP;
457 
458 	peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
459 	if (!peer_priv || !peer_priv->ipsec) {
460 		mlx5_core_err(mdev, "IPsec not supported on master device\n");
461 		err = -EOPNOTSUPP;
462 		goto release_peer;
463 	}
464 
465 	roce_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC);
466 	if (!roce_ns) {
467 		err = -EOPNOTSUPP;
468 		goto release_peer;
469 	}
470 
471 	nic_ns = mlx5_get_flow_namespace(peer_priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
472 	if (!nic_ns) {
473 		err = -EOPNOTSUPP;
474 		goto release_peer;
475 	}
476 
477 	in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
478 	if (!in) {
479 		err = -ENOMEM;
480 		goto release_peer;
481 	}
482 
483 	ft_attr.level = (family == AF_INET) ? MLX5_IPSEC_RX_IPV4_FT_LEVEL :
484 					      MLX5_IPSEC_RX_IPV6_FT_LEVEL;
485 	ft_attr.max_fte = 1;
486 	ft = mlx5_create_flow_table(roce_ns, &ft_attr);
487 	if (IS_ERR(ft)) {
488 		err = PTR_ERR(ft);
489 		mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at rdma master err=%d\n", err);
490 		goto free_in;
491 	}
492 
493 	roce->ft_rdma = ft;
494 
495 	ft_attr.max_fte = 1;
496 	ft_attr.prio = prio;
497 	ft_attr.level = level + 2;
498 	ft = mlx5_create_flow_table(nic_ns, &ft_attr);
499 	if (IS_ERR(ft)) {
500 		err = PTR_ERR(ft);
501 		mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at NIC master err=%d\n", err);
502 		goto destroy_ft_rdma;
503 	}
504 	roce->nic_master_ft = ft;
505 
506 	MLX5_SET_CFG(in, start_flow_index, ix);
507 	ix += 1;
508 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
509 	g = mlx5_create_flow_group(roce->nic_master_ft, in);
510 	if (IS_ERR(g)) {
511 		err = PTR_ERR(g);
512 		mlx5_core_err(mdev, "Fail to create RoCE IPsec rx group aliased err=%d\n", err);
513 		goto destroy_nic_master_ft;
514 	}
515 	roce->nic_master_group = g;
516 
517 	err = ipsec_fs_create_aliased_ft(peer_priv->mdev, mdev, roce->nic_master_ft,
518 					 &roce->alias_id, roce->key, false);
519 	if (err) {
520 		mlx5_core_err(mdev, "Fail to create RoCE IPsec rx alias FT err=%d\n", err);
521 		goto destroy_group;
522 	}
523 
524 	next_ft.id = roce->alias_id;
525 	ft_attr.max_fte = 1;
526 	ft_attr.prio = prio;
527 	ft_attr.level = roce->ft->level + 1;
528 	ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
529 	ft_attr.next_ft = &next_ft;
530 	ft = mlx5_create_flow_table(ns, &ft_attr);
531 	if (IS_ERR(ft)) {
532 		err = PTR_ERR(ft);
533 		mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at NIC slave err=%d\n", err);
534 		goto destroy_alias;
535 	}
536 	roce->goto_alias_ft = ft;
537 
538 	kvfree(in);
539 	mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
540 	return 0;
541 
542 destroy_alias:
543 	mlx5_cmd_alias_obj_destroy(mdev, roce->alias_id,
544 				   MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
545 destroy_group:
546 	mlx5_destroy_flow_group(roce->nic_master_group);
547 destroy_nic_master_ft:
548 	mlx5_destroy_flow_table(roce->nic_master_ft);
549 destroy_ft_rdma:
550 	mlx5_destroy_flow_table(roce->ft_rdma);
551 free_in:
552 	kvfree(in);
553 release_peer:
554 	mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
555 	return err;
556 }
557 
mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs * ipsec_roce,struct mlx5_core_dev * mdev)558 void mlx5_ipsec_fs_roce_tx_destroy(struct mlx5_ipsec_fs *ipsec_roce,
559 				   struct mlx5_core_dev *mdev)
560 {
561 	struct mlx5_devcom_comp_dev *tmp = NULL;
562 	struct mlx5_ipsec_tx_roce *tx_roce;
563 	struct mlx5e_priv *peer_priv;
564 
565 	if (!ipsec_roce)
566 		return;
567 
568 	tx_roce = &ipsec_roce->tx;
569 
570 	if (!tx_roce->ft)
571 		return; /* Incase RoCE was cleaned from MPV event flow */
572 
573 	mlx5_del_flow_rules(tx_roce->rule);
574 	mlx5_destroy_flow_group(tx_roce->g);
575 	mlx5_destroy_flow_table(tx_roce->ft);
576 
577 	if (!mlx5_core_is_mp_slave(mdev))
578 		return;
579 
580 	if (!mlx5_devcom_for_each_peer_begin(*ipsec_roce->devcom))
581 		return;
582 
583 	peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
584 	if (!peer_priv) {
585 		mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
586 		return;
587 	}
588 
589 	mlx5_destroy_flow_table(tx_roce->goto_alias_ft);
590 	mlx5_cmd_alias_obj_destroy(peer_priv->mdev, tx_roce->alias_id,
591 				   MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS);
592 	mlx5_devcom_for_each_peer_end(*ipsec_roce->devcom);
593 	tx_roce->ft = NULL;
594 }
595 
mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev * mdev,struct mlx5_ipsec_fs * ipsec_roce,struct mlx5_flow_table * pol_ft,bool from_event)596 int mlx5_ipsec_fs_roce_tx_create(struct mlx5_core_dev *mdev,
597 				 struct mlx5_ipsec_fs *ipsec_roce,
598 				 struct mlx5_flow_table *pol_ft,
599 				 bool from_event)
600 {
601 	struct mlx5_flow_table_attr ft_attr = {};
602 	struct mlx5_ipsec_tx_roce *roce;
603 	struct mlx5_flow_table *ft;
604 	struct mlx5_flow_group *g;
605 	int ix = 0;
606 	int err;
607 	u32 *in;
608 
609 	if (!ipsec_roce)
610 		return 0;
611 
612 	roce = &ipsec_roce->tx;
613 
614 	in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
615 	if (!in)
616 		return -ENOMEM;
617 
618 	if (mlx5_core_is_mp_slave(mdev)) {
619 		err = ipsec_fs_roce_tx_mpv_create(mdev, ipsec_roce, pol_ft, in, from_event);
620 		goto free_in;
621 	}
622 
623 	ft_attr.max_fte = 1;
624 	ft_attr.prio = 1;
625 	ft_attr.level = MLX5_IPSEC_RDMA_TX_FT_LEVEL;
626 	ft = mlx5_create_flow_table(roce->ns, &ft_attr);
627 	if (IS_ERR(ft)) {
628 		err = PTR_ERR(ft);
629 		mlx5_core_err(mdev, "Fail to create RoCE IPsec tx ft err=%d\n", err);
630 		goto free_in;
631 	}
632 
633 	roce->ft = ft;
634 
635 	MLX5_SET_CFG(in, start_flow_index, ix);
636 	ix += MLX5_TX_ROCE_GROUP_SIZE;
637 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
638 	g = mlx5_create_flow_group(ft, in);
639 	if (IS_ERR(g)) {
640 		err = PTR_ERR(g);
641 		mlx5_core_err(mdev, "Fail to create RoCE IPsec tx group err=%d\n", err);
642 		goto destroy_table;
643 	}
644 	roce->g = g;
645 
646 	err = ipsec_fs_roce_tx_rule_setup(mdev, roce, pol_ft);
647 	if (err) {
648 		mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err);
649 		goto destroy_group;
650 	}
651 
652 	kvfree(in);
653 	return 0;
654 
655 destroy_group:
656 	mlx5_destroy_flow_group(roce->g);
657 destroy_table:
658 	mlx5_destroy_flow_table(ft);
659 free_in:
660 	kvfree(in);
661 	return err;
662 }
663 
mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs * ipsec_roce,u32 family)664 struct mlx5_flow_table *mlx5_ipsec_fs_roce_ft_get(struct mlx5_ipsec_fs *ipsec_roce, u32 family)
665 {
666 	struct mlx5_ipsec_rx_roce *rx_roce;
667 
668 	if (!ipsec_roce)
669 		return NULL;
670 
671 	rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
672 					&ipsec_roce->ipv6_rx;
673 
674 	return rx_roce->ft;
675 }
676 
mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs * ipsec_roce,u32 family,struct mlx5_core_dev * mdev)677 void mlx5_ipsec_fs_roce_rx_destroy(struct mlx5_ipsec_fs *ipsec_roce, u32 family,
678 				   struct mlx5_core_dev *mdev)
679 {
680 	bool is_mpv_slave = mlx5_core_is_mp_slave(mdev);
681 	struct mlx5_ipsec_rx_roce *rx_roce;
682 
683 	if (!ipsec_roce)
684 		return;
685 
686 	rx_roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
687 					&ipsec_roce->ipv6_rx;
688 	if (!rx_roce->ft)
689 		return; /* Incase RoCE was cleaned from MPV event flow */
690 
691 	if (is_mpv_slave)
692 		mlx5_del_flow_rules(rx_roce->nic_master_rule);
693 	mlx5_del_flow_rules(rx_roce->roce_miss.rule);
694 	mlx5_del_flow_rules(rx_roce->rule);
695 	if (is_mpv_slave)
696 		roce_rx_mpv_destroy_tables(mdev, rx_roce);
697 	mlx5_destroy_flow_table(rx_roce->ft_rdma);
698 	mlx5_destroy_flow_group(rx_roce->roce_miss.group);
699 	mlx5_destroy_flow_group(rx_roce->g);
700 	mlx5_destroy_flow_table(rx_roce->ft);
701 	rx_roce->ft = NULL;
702 }
703 
mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev * mdev,struct mlx5_ipsec_fs * ipsec_roce,struct mlx5_flow_namespace * ns,struct mlx5_flow_destination * default_dst,u32 family,u32 level,u32 prio)704 int mlx5_ipsec_fs_roce_rx_create(struct mlx5_core_dev *mdev,
705 				 struct mlx5_ipsec_fs *ipsec_roce,
706 				 struct mlx5_flow_namespace *ns,
707 				 struct mlx5_flow_destination *default_dst,
708 				 u32 family, u32 level, u32 prio)
709 {
710 	bool is_mpv_slave = mlx5_core_is_mp_slave(mdev);
711 	struct mlx5_flow_table_attr ft_attr = {};
712 	struct mlx5_ipsec_rx_roce *roce;
713 	struct mlx5_flow_table *ft;
714 	struct mlx5_flow_group *g;
715 	void *outer_headers_c;
716 	int ix = 0;
717 	u32 *in;
718 	int err;
719 	u8 *mc;
720 
721 	if (!ipsec_roce)
722 		return 0;
723 
724 	roce = (family == AF_INET) ? &ipsec_roce->ipv4_rx :
725 				     &ipsec_roce->ipv6_rx;
726 
727 	ft_attr.max_fte = 2;
728 	ft_attr.level = level;
729 	ft_attr.prio = prio;
730 	ft = mlx5_create_flow_table(ns, &ft_attr);
731 	if (IS_ERR(ft)) {
732 		err = PTR_ERR(ft);
733 		mlx5_core_err(mdev, "Fail to create RoCE IPsec rx ft at nic err=%d\n", err);
734 		return err;
735 	}
736 
737 	roce->ft = ft;
738 
739 	in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
740 	if (!in) {
741 		err = -ENOMEM;
742 		goto fail_nomem;
743 	}
744 
745 	mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
746 	outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
747 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
748 	MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
749 
750 	MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
751 	MLX5_SET_CFG(in, start_flow_index, ix);
752 	ix += MLX5_RX_ROCE_GROUP_SIZE;
753 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
754 	g = mlx5_create_flow_group(ft, in);
755 	if (IS_ERR(g)) {
756 		err = PTR_ERR(g);
757 		mlx5_core_err(mdev, "Fail to create RoCE IPsec rx group at nic err=%d\n", err);
758 		goto fail_group;
759 	}
760 	roce->g = g;
761 
762 	memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in));
763 	MLX5_SET_CFG(in, start_flow_index, ix);
764 	ix += MLX5_RX_ROCE_GROUP_SIZE;
765 	MLX5_SET_CFG(in, end_flow_index, ix - 1);
766 	g = mlx5_create_flow_group(ft, in);
767 	if (IS_ERR(g)) {
768 		err = PTR_ERR(g);
769 		mlx5_core_err(mdev, "Fail to create RoCE IPsec rx miss group at nic err=%d\n", err);
770 		goto fail_mgroup;
771 	}
772 	roce->roce_miss.group = g;
773 
774 	if (is_mpv_slave) {
775 		err = ipsec_fs_roce_rx_mpv_create(mdev, ipsec_roce, ns, family, level, prio);
776 		if (err) {
777 			mlx5_core_err(mdev, "Fail to create RoCE IPsec rx alias err=%d\n", err);
778 			goto fail_mpv_create;
779 		}
780 	} else {
781 		memset(&ft_attr, 0, sizeof(ft_attr));
782 		if (family == AF_INET)
783 			ft_attr.level = 1;
784 		ft_attr.max_fte = 1;
785 		ft = mlx5_create_flow_table(roce->ns_rdma, &ft_attr);
786 		if (IS_ERR(ft)) {
787 			err = PTR_ERR(ft);
788 			mlx5_core_err(mdev,
789 				      "Fail to create RoCE IPsec rx ft at rdma err=%d\n", err);
790 			goto fail_rdma_table;
791 		}
792 
793 		roce->ft_rdma = ft;
794 	}
795 
796 	err = ipsec_fs_roce_rx_rule_setup(mdev, default_dst, roce);
797 	if (err) {
798 		mlx5_core_err(mdev, "Fail to create RoCE IPsec rx rules err=%d\n", err);
799 		goto fail_setup_rule;
800 	}
801 
802 	kvfree(in);
803 	return 0;
804 
805 fail_setup_rule:
806 	if (is_mpv_slave)
807 		roce_rx_mpv_destroy_tables(mdev, roce);
808 	mlx5_destroy_flow_table(roce->ft_rdma);
809 fail_mpv_create:
810 fail_rdma_table:
811 	mlx5_destroy_flow_group(roce->roce_miss.group);
812 fail_mgroup:
813 	mlx5_destroy_flow_group(roce->g);
814 fail_group:
815 	kvfree(in);
816 fail_nomem:
817 	mlx5_destroy_flow_table(roce->ft);
818 	return err;
819 }
820 
mlx5_ipsec_fs_is_mpv_roce_supported(struct mlx5_core_dev * mdev)821 bool mlx5_ipsec_fs_is_mpv_roce_supported(struct mlx5_core_dev *mdev)
822 {
823 	if (!mlx5_core_mp_enabled(mdev))
824 		return true;
825 
826 	if (ipsec_fs_create_alias_supported_one(mdev))
827 		return true;
828 
829 	return false;
830 }
831 
mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs * ipsec_roce)832 void mlx5_ipsec_fs_roce_cleanup(struct mlx5_ipsec_fs *ipsec_roce)
833 {
834 	kfree(ipsec_roce);
835 }
836 
mlx5_ipsec_fs_roce_init(struct mlx5_core_dev * mdev,struct mlx5_devcom_comp_dev ** devcom)837 struct mlx5_ipsec_fs *mlx5_ipsec_fs_roce_init(struct mlx5_core_dev *mdev,
838 					      struct mlx5_devcom_comp_dev **devcom)
839 {
840 	struct mlx5_ipsec_fs *roce_ipsec;
841 	struct mlx5_flow_namespace *ns;
842 
843 	ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC);
844 	if (!ns) {
845 		mlx5_core_err(mdev, "Failed to get RoCE rx ns\n");
846 		return NULL;
847 	}
848 
849 	roce_ipsec = kzalloc(sizeof(*roce_ipsec), GFP_KERNEL);
850 	if (!roce_ipsec)
851 		return NULL;
852 
853 	roce_ipsec->ipv4_rx.ns_rdma = ns;
854 	roce_ipsec->ipv6_rx.ns_rdma = ns;
855 
856 	ns = mlx5_get_flow_namespace(mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC);
857 	if (!ns) {
858 		mlx5_core_err(mdev, "Failed to get RoCE tx ns\n");
859 		goto err_tx;
860 	}
861 
862 	roce_ipsec->tx.ns = ns;
863 
864 	roce_ipsec->devcom = devcom;
865 
866 	return roce_ipsec;
867 
868 err_tx:
869 	kfree(roce_ipsec);
870 	return NULL;
871 }
872