xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c (revision a1c613ae4c322ddd58d5a8539dbfba2a0380a8c0)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 
4 #include <linux/mlx5/device.h>
5 #include <linux/mlx5/vport.h>
6 #include "mlx5_core.h"
7 #include "eswitch.h"
8 
esw_ipsec_vf_query_generic(struct mlx5_core_dev * dev,u16 vport_num,bool * result)9 static int esw_ipsec_vf_query_generic(struct mlx5_core_dev *dev, u16 vport_num, bool *result)
10 {
11 	int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
12 	void *hca_cap, *query_cap;
13 	int err;
14 
15 	if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
16 		return -EOPNOTSUPP;
17 
18 	if (!mlx5_esw_ipsec_vf_offload_supported(dev)) {
19 		*result = false;
20 		return 0;
21 	}
22 
23 	query_cap = kvzalloc(query_sz, GFP_KERNEL);
24 	if (!query_cap)
25 		return -ENOMEM;
26 
27 	err = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap);
28 	if (err)
29 		goto free;
30 
31 	hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
32 	*result = MLX5_GET(cmd_hca_cap, hca_cap, ipsec_offload);
33 free:
34 	kvfree(query_cap);
35 	return err;
36 }
37 
38 enum esw_vport_ipsec_offload {
39 	MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD,
40 	MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD,
41 };
42 
mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev * dev,struct mlx5_vport * vport)43 int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *vport)
44 {
45 	int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
46 	void *hca_cap, *query_cap;
47 	bool ipsec_enabled;
48 	int err;
49 
50 	/* Querying IPsec caps only makes sense when generic ipsec_offload
51 	 * HCA cap is enabled
52 	 */
53 	err = esw_ipsec_vf_query_generic(dev, vport->vport, &ipsec_enabled);
54 	if (err)
55 		return err;
56 
57 	if (!ipsec_enabled) {
58 		vport->info.ipsec_crypto_enabled = false;
59 		vport->info.ipsec_packet_enabled = false;
60 		return 0;
61 	}
62 
63 	query_cap = kvzalloc(query_sz, GFP_KERNEL);
64 	if (!query_cap)
65 		return -ENOMEM;
66 
67 	err = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC);
68 	if (err)
69 		goto free;
70 
71 	hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
72 	vport->info.ipsec_crypto_enabled =
73 		MLX5_GET(ipsec_cap, hca_cap, ipsec_crypto_offload);
74 	vport->info.ipsec_packet_enabled =
75 		MLX5_GET(ipsec_cap, hca_cap, ipsec_full_offload);
76 free:
77 	kvfree(query_cap);
78 	return err;
79 }
80 
esw_ipsec_vf_set_generic(struct mlx5_core_dev * dev,u16 vport_num,bool ipsec_ofld)81 static int esw_ipsec_vf_set_generic(struct mlx5_core_dev *dev, u16 vport_num, bool ipsec_ofld)
82 {
83 	int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
84 	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
85 	void *hca_cap, *query_cap, *cap;
86 	int ret;
87 
88 	if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
89 		return -EOPNOTSUPP;
90 
91 	query_cap = kvzalloc(query_sz, GFP_KERNEL);
92 	hca_cap = kvzalloc(set_sz, GFP_KERNEL);
93 	if (!hca_cap || !query_cap) {
94 		ret = -ENOMEM;
95 		goto free;
96 	}
97 
98 	ret = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap);
99 	if (ret)
100 		goto free;
101 
102 	cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
103 	memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
104 	       MLX5_UN_SZ_BYTES(hca_cap_union));
105 	MLX5_SET(cmd_hca_cap, cap, ipsec_offload, ipsec_ofld);
106 
107 	MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
108 	MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
109 	MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
110 
111 	MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
112 		 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
113 	ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
114 free:
115 	kvfree(hca_cap);
116 	kvfree(query_cap);
117 	return ret;
118 }
119 
esw_ipsec_vf_set_bytype(struct mlx5_core_dev * dev,struct mlx5_vport * vport,bool enable,enum esw_vport_ipsec_offload type)120 static int esw_ipsec_vf_set_bytype(struct mlx5_core_dev *dev, struct mlx5_vport *vport,
121 				   bool enable, enum esw_vport_ipsec_offload type)
122 {
123 	int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
124 	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
125 	void *hca_cap, *query_cap, *cap;
126 	int ret;
127 
128 	if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
129 		return -EOPNOTSUPP;
130 
131 	query_cap = kvzalloc(query_sz, GFP_KERNEL);
132 	hca_cap = kvzalloc(set_sz, GFP_KERNEL);
133 	if (!hca_cap || !query_cap) {
134 		ret = -ENOMEM;
135 		goto free;
136 	}
137 
138 	ret = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC);
139 	if (ret)
140 		goto free;
141 
142 	cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
143 	memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
144 	       MLX5_UN_SZ_BYTES(hca_cap_union));
145 
146 	switch (type) {
147 	case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
148 		MLX5_SET(ipsec_cap, cap, ipsec_crypto_offload, enable);
149 		break;
150 	case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
151 		MLX5_SET(ipsec_cap, cap, ipsec_full_offload, enable);
152 		break;
153 	default:
154 		ret = -EOPNOTSUPP;
155 		goto free;
156 	}
157 
158 	MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
159 	MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
160 	MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport->vport);
161 
162 	MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
163 		 MLX5_SET_HCA_CAP_OP_MOD_IPSEC << 1);
164 	ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
165 free:
166 	kvfree(hca_cap);
167 	kvfree(query_cap);
168 	return ret;
169 }
170 
esw_ipsec_vf_crypto_aux_caps_set(struct mlx5_core_dev * dev,u16 vport_num,bool enable)171 static int esw_ipsec_vf_crypto_aux_caps_set(struct mlx5_core_dev *dev, u16 vport_num, bool enable)
172 {
173 	int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
174 	int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
175 	struct mlx5_eswitch *esw = dev->priv.eswitch;
176 	void *hca_cap, *query_cap, *cap;
177 	int ret;
178 
179 	query_cap = kvzalloc(query_sz, GFP_KERNEL);
180 	hca_cap = kvzalloc(set_sz, GFP_KERNEL);
181 	if (!hca_cap || !query_cap) {
182 		ret = -ENOMEM;
183 		goto free;
184 	}
185 
186 	ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS);
187 	if (ret)
188 		goto free;
189 
190 	cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
191 	memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
192 	       MLX5_UN_SZ_BYTES(hca_cap_union));
193 	MLX5_SET(per_protocol_networking_offload_caps, cap, insert_trailer, enable);
194 	MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
195 	MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
196 	MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
197 	MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
198 		 MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS << 1);
199 	ret = mlx5_cmd_exec_in(esw->dev, set_hca_cap, hca_cap);
200 free:
201 	kvfree(hca_cap);
202 	kvfree(query_cap);
203 	return ret;
204 }
205 
esw_ipsec_vf_offload_set_bytype(struct mlx5_eswitch * esw,struct mlx5_vport * vport,bool enable,enum esw_vport_ipsec_offload type)206 static int esw_ipsec_vf_offload_set_bytype(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
207 					   bool enable, enum esw_vport_ipsec_offload type)
208 {
209 	struct mlx5_core_dev *dev = esw->dev;
210 	int err;
211 
212 	if (vport->vport == MLX5_VPORT_PF)
213 		return -EOPNOTSUPP;
214 
215 	if (type == MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD) {
216 		err = esw_ipsec_vf_crypto_aux_caps_set(dev, vport->vport, enable);
217 		if (err)
218 			return err;
219 	}
220 
221 	if (enable) {
222 		err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
223 		if (err)
224 			return err;
225 		err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
226 		if (err)
227 			return err;
228 	} else {
229 		err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
230 		if (err)
231 			return err;
232 		err = mlx5_esw_ipsec_vf_offload_get(dev, vport);
233 		if (err)
234 			return err;
235 
236 		/* The generic ipsec_offload cap can be disabled only if both
237 		 * ipsec_crypto_offload and ipsec_full_offload aren't enabled.
238 		 */
239 		if (!vport->info.ipsec_crypto_enabled &&
240 		    !vport->info.ipsec_packet_enabled) {
241 			err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
242 			if (err)
243 				return err;
244 		}
245 	}
246 
247 	switch (type) {
248 	case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
249 		vport->info.ipsec_crypto_enabled = enable;
250 		break;
251 	case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
252 		vport->info.ipsec_packet_enabled = enable;
253 		break;
254 	default:
255 		return -EINVAL;
256 	}
257 
258 	return 0;
259 }
260 
esw_ipsec_offload_supported(struct mlx5_core_dev * dev,u16 vport_num)261 static int esw_ipsec_offload_supported(struct mlx5_core_dev *dev, u16 vport_num)
262 {
263 	int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
264 	void *hca_cap, *query_cap;
265 	int ret;
266 
267 	query_cap = kvzalloc(query_sz, GFP_KERNEL);
268 	if (!query_cap)
269 		return -ENOMEM;
270 
271 	ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_GENERAL);
272 	if (ret)
273 		goto free;
274 
275 	hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
276 	if (!MLX5_GET(cmd_hca_cap, hca_cap, log_max_dek))
277 		ret = -EOPNOTSUPP;
278 free:
279 	kvfree(query_cap);
280 	return ret;
281 }
282 
mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev * dev)283 bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev)
284 {
285 	/* Old firmware doesn't support ipsec_offload capability for VFs. This
286 	 * can be detected by checking reformat_add_esp_trasport capability -
287 	 * when this cap isn't supported it means firmware cannot be trusted
288 	 * about what it reports for ipsec_offload cap.
289 	 */
290 	return MLX5_CAP_FLOWTABLE_NIC_TX(dev, reformat_add_esp_trasport);
291 }
292 
mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev * dev,u16 vport_num)293 int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
294 					       u16 vport_num)
295 {
296 	int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
297 	void *hca_cap, *query_cap;
298 	int err;
299 
300 	if (!mlx5_esw_ipsec_vf_offload_supported(dev))
301 		return -EOPNOTSUPP;
302 
303 	err = esw_ipsec_offload_supported(dev, vport_num);
304 	if (err)
305 		return err;
306 
307 	query_cap = kvzalloc(query_sz, GFP_KERNEL);
308 	if (!query_cap)
309 		return -ENOMEM;
310 
311 	err = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS);
312 	if (err)
313 		goto free;
314 
315 	hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
316 	if (!MLX5_GET(per_protocol_networking_offload_caps, hca_cap, swp))
317 		goto free;
318 
319 free:
320 	kvfree(query_cap);
321 	return err;
322 }
323 
mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev * dev,u16 vport_num)324 int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
325 					       u16 vport_num)
326 {
327 	int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
328 	void *hca_cap, *query_cap;
329 	int ret;
330 
331 	if (!mlx5_esw_ipsec_vf_offload_supported(dev))
332 		return -EOPNOTSUPP;
333 
334 	ret = esw_ipsec_offload_supported(dev, vport_num);
335 	if (ret)
336 		return ret;
337 
338 	query_cap = kvzalloc(query_sz, GFP_KERNEL);
339 	if (!query_cap)
340 		return -ENOMEM;
341 
342 	ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_FLOW_TABLE);
343 	if (ret)
344 		goto out;
345 
346 	hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
347 	if (!MLX5_GET(flow_table_nic_cap, hca_cap, flow_table_properties_nic_receive.decap)) {
348 		ret = -EOPNOTSUPP;
349 		goto out;
350 	}
351 
352 out:
353 	kvfree(query_cap);
354 	return ret;
355 }
356 
mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch * esw,struct mlx5_vport * vport,bool enable)357 int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
358 					 bool enable)
359 {
360 	return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
361 					       MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD);
362 }
363 
mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch * esw,struct mlx5_vport * vport,bool enable)364 int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
365 					 bool enable)
366 {
367 	return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
368 					       MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD);
369 }
370