xref: /freebsd/sys/dev/mlx5/mlx5_core/mlx5_vport.c (revision 95ee2897e98f5d444f26ed2334cc7c439f9c16c6)
1 /*-
2  * Copyright (c) 2013-2017, Mellanox Technologies, Ltd.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include "opt_rss.h"
27 #include "opt_ratelimit.h"
28 
29 #include <linux/etherdevice.h>
30 #include <dev/mlx5/driver.h>
31 #include <dev/mlx5/vport.h>
32 #include <dev/mlx5/mlx5_core/mlx5_core.h>
33 
34 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
35 					 int inlen);
36 
_mlx5_query_vport_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport,u32 * out,int outlen)37 static int _mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod,
38 				   u16 vport, u32 *out, int outlen)
39 {
40 	int err;
41 	u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {0};
42 
43 	MLX5_SET(query_vport_state_in, in, opcode,
44 		 MLX5_CMD_OP_QUERY_VPORT_STATE);
45 	MLX5_SET(query_vport_state_in, in, op_mod, opmod);
46 	MLX5_SET(query_vport_state_in, in, vport_number, vport);
47 	if (vport)
48 		MLX5_SET(query_vport_state_in, in, other_vport, 1);
49 
50 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
51 	if (err)
52 		mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
53 
54 	return err;
55 }
56 
mlx5_query_vport_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport)57 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
58 {
59 	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
60 
61 	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
62 
63 	return MLX5_GET(query_vport_state_out, out, state);
64 }
65 EXPORT_SYMBOL_GPL(mlx5_query_vport_state);
66 
mlx5_query_vport_admin_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport)67 u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
68 {
69 	u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {0};
70 
71 	_mlx5_query_vport_state(mdev, opmod, vport, out, sizeof(out));
72 
73 	return MLX5_GET(query_vport_state_out, out, admin_state);
74 }
75 EXPORT_SYMBOL(mlx5_query_vport_admin_state);
76 
mlx5_modify_vport_admin_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport,u8 state)77 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
78 				  u16 vport, u8 state)
79 {
80 	u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {0};
81 	u32 out[MLX5_ST_SZ_DW(modify_vport_state_out)] = {0};
82 	int err;
83 
84 	MLX5_SET(modify_vport_state_in, in, opcode,
85 		 MLX5_CMD_OP_MODIFY_VPORT_STATE);
86 	MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
87 	MLX5_SET(modify_vport_state_in, in, vport_number, vport);
88 
89 	if (vport)
90 		MLX5_SET(modify_vport_state_in, in, other_vport, 1);
91 
92 	MLX5_SET(modify_vport_state_in, in, admin_state, state);
93 
94 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
95 	if (err)
96 		mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_VPORT_STATE failed\n");
97 
98 	return err;
99 }
100 EXPORT_SYMBOL(mlx5_modify_vport_admin_state);
101 
mlx5_query_nic_vport_context(struct mlx5_core_dev * mdev,u16 vport,u32 * out,int outlen)102 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
103 					u32 *out, int outlen)
104 {
105 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
106 
107 	MLX5_SET(query_nic_vport_context_in, in, opcode,
108 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
109 
110 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
111 	if (vport)
112 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
113 
114 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
115 }
116 
mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev * mdev,int client_id)117 static u32 mlx5_vport_max_q_counter_allocator(struct mlx5_core_dev *mdev,
118 					      int client_id)
119 {
120 	switch (client_id) {
121 	case MLX5_INTERFACE_PROTOCOL_IB:
122 		return (MLX5_CAP_GEN(mdev, max_qp_cnt) -
123 			MLX5_QCOUNTER_SETS_NETDEV);
124 	case MLX5_INTERFACE_PROTOCOL_ETH:
125 		return MLX5_QCOUNTER_SETS_NETDEV;
126 	default:
127 		mlx5_core_warn(mdev, "Unknown Client: %d\n", client_id);
128 		return 0;
129 	}
130 }
131 
mlx5_vport_alloc_q_counter(struct mlx5_core_dev * mdev,int client_id,u16 * counter_set_id)132 int mlx5_vport_alloc_q_counter(struct mlx5_core_dev *mdev,
133 			       int client_id, u16 *counter_set_id)
134 {
135 	u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)] = {0};
136 	u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)] = {0};
137 	int err;
138 
139 	if (mdev->num_q_counter_allocated[client_id] >
140 	    mlx5_vport_max_q_counter_allocator(mdev, client_id))
141 		return -EINVAL;
142 
143 	MLX5_SET(alloc_q_counter_in, in, opcode,
144 		 MLX5_CMD_OP_ALLOC_Q_COUNTER);
145 
146 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
147 
148 	if (!err)
149 		*counter_set_id = MLX5_GET(alloc_q_counter_out, out,
150 					   counter_set_id);
151 
152 	mdev->num_q_counter_allocated[client_id]++;
153 
154 	return err;
155 }
156 
mlx5_vport_dealloc_q_counter(struct mlx5_core_dev * mdev,int client_id,u16 counter_set_id)157 int mlx5_vport_dealloc_q_counter(struct mlx5_core_dev *mdev,
158 				 int client_id, u16 counter_set_id)
159 {
160 	u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {0};
161 	u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)] = {0};
162 	int err;
163 
164 	if (mdev->num_q_counter_allocated[client_id] <= 0)
165 		return -EINVAL;
166 
167 	MLX5_SET(dealloc_q_counter_in, in, opcode,
168 		 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
169 	MLX5_SET(dealloc_q_counter_in, in, counter_set_id,
170 		 counter_set_id);
171 
172 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
173 
174 	mdev->num_q_counter_allocated[client_id]--;
175 
176 	return err;
177 }
178 
mlx5_vport_query_q_counter(struct mlx5_core_dev * mdev,u16 counter_set_id,int reset,void * out,int out_size)179 int mlx5_vport_query_q_counter(struct mlx5_core_dev *mdev,
180 				      u16 counter_set_id,
181 				      int reset,
182 				      void *out,
183 				      int out_size)
184 {
185 	u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {0};
186 
187 	MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
188 	MLX5_SET(query_q_counter_in, in, clear, reset);
189 	MLX5_SET(query_q_counter_in, in, counter_set_id, counter_set_id);
190 
191 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, out_size);
192 }
193 
mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev * mdev,u16 counter_set_id,u32 * out_of_rx_buffer)194 int mlx5_vport_query_out_of_rx_buffer(struct mlx5_core_dev *mdev,
195 				      u16 counter_set_id,
196 				      u32 *out_of_rx_buffer)
197 {
198 	u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {0};
199 	int err;
200 
201 	err = mlx5_vport_query_q_counter(mdev, counter_set_id, 0, out,
202 					 sizeof(out));
203 
204 	if (err)
205 		return err;
206 
207 	*out_of_rx_buffer = MLX5_GET(query_q_counter_out, out,
208 				     out_of_buffer);
209 	return err;
210 }
211 
mlx5_query_nic_vport_min_inline(struct mlx5_core_dev * mdev,u16 vport,u8 * min_inline)212 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
213 				    u16 vport, u8 *min_inline)
214 {
215 	u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
216 	int err;
217 
218 	err = mlx5_query_nic_vport_context(mdev, vport, out, sizeof(out));
219 	if (!err)
220 		*min_inline = MLX5_GET(query_nic_vport_context_out, out,
221 				       nic_vport_context.min_wqe_inline_mode);
222 	return err;
223 }
224 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
225 
mlx5_query_min_inline(struct mlx5_core_dev * mdev,u8 * min_inline_mode)226 int mlx5_query_min_inline(struct mlx5_core_dev *mdev,
227 			  u8 *min_inline_mode)
228 {
229 	int err;
230 
231 	switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
232 	case MLX5_CAP_INLINE_MODE_L2:
233 		*min_inline_mode = MLX5_INLINE_MODE_L2;
234 		err = 0;
235 		break;
236 	case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
237 		err = mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
238 		break;
239 	case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
240 		*min_inline_mode = MLX5_INLINE_MODE_NONE;
241 		err = 0;
242 		break;
243 	default:
244 		err = -EINVAL;
245 		break;
246 	}
247 	return err;
248 }
249 EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
250 
mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev * mdev,u16 vport,u8 min_inline)251 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
252 				     u16 vport, u8 min_inline)
253 {
254 	u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
255 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
256 	void *nic_vport_ctx;
257 
258 	MLX5_SET(modify_nic_vport_context_in, in,
259 		 field_select.min_wqe_inline_mode, 1);
260 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
261 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
262 
263 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
264 				     in, nic_vport_context);
265 	MLX5_SET(nic_vport_context, nic_vport_ctx,
266 		 min_wqe_inline_mode, min_inline);
267 
268 	return mlx5_modify_nic_vport_context(mdev, in, inlen);
269 }
270 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_min_inline);
271 
mlx5_query_nic_vport_mac_address(struct mlx5_core_dev * mdev,u16 vport,u8 * addr)272 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
273 				     u16 vport, u8 *addr)
274 {
275 	u32 *out;
276 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
277 	u8 *out_addr;
278 	int err;
279 
280 	out = mlx5_vzalloc(outlen);
281 	if (!out)
282 		return -ENOMEM;
283 
284 	out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
285 				nic_vport_context.permanent_address);
286 
287 	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
288 	if (err)
289 		goto out;
290 
291 	ether_addr_copy(addr, &out_addr[2]);
292 
293 out:
294 	kvfree(out);
295 	return err;
296 }
297 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
298 
mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev * mdev,u16 vport,u8 * addr)299 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
300 				      u16 vport, u8 *addr)
301 {
302 	void *in;
303 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
304 	int err;
305 	void *nic_vport_ctx;
306 	u8 *perm_mac;
307 
308 	in = mlx5_vzalloc(inlen);
309 	if (!in) {
310 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
311 		return -ENOMEM;
312 	}
313 
314 	MLX5_SET(modify_nic_vport_context_in, in,
315 		 field_select.permanent_address, 1);
316 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
317 
318 	if (vport)
319 		MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
320 
321 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
322 				     in, nic_vport_context);
323 	perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
324 				permanent_address);
325 
326 	ether_addr_copy(&perm_mac[2], addr);
327 
328 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
329 
330 	kvfree(in);
331 
332 	return err;
333 }
334 EXPORT_SYMBOL(mlx5_modify_nic_vport_mac_address);
335 
mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev * mdev,u64 * system_image_guid)336 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
337 					   u64 *system_image_guid)
338 {
339 	u32 *out;
340 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
341 	int err;
342 
343 	out = mlx5_vzalloc(outlen);
344 	if (!out)
345 		return -ENOMEM;
346 
347 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
348 	if (err)
349 		goto out;
350 
351 	*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
352 					nic_vport_context.system_image_guid);
353 out:
354 	kvfree(out);
355 	return err;
356 }
357 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
358 
mlx5_query_nic_vport_node_guid(struct mlx5_core_dev * mdev,u64 * node_guid)359 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
360 {
361 	u32 *out;
362 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
363 	int err;
364 
365 	out = mlx5_vzalloc(outlen);
366 	if (!out)
367 		return -ENOMEM;
368 
369 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
370 	if (err)
371 		goto out;
372 
373 	*node_guid = MLX5_GET64(query_nic_vport_context_out, out,
374 				nic_vport_context.node_guid);
375 
376 out:
377 	kvfree(out);
378 	return err;
379 }
380 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
381 
mlx5_query_nic_vport_port_guid(struct mlx5_core_dev * mdev,u64 * port_guid)382 static int mlx5_query_nic_vport_port_guid(struct mlx5_core_dev *mdev,
383 					  u64 *port_guid)
384 {
385 	u32 *out;
386 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
387 	int err;
388 
389 	out = mlx5_vzalloc(outlen);
390 	if (!out)
391 		return -ENOMEM;
392 
393 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
394 	if (err)
395 		goto out;
396 
397 	*port_guid = MLX5_GET64(query_nic_vport_context_out, out,
398 				nic_vport_context.port_guid);
399 
400 out:
401 	kvfree(out);
402 	return err;
403 }
404 
mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev * mdev,u16 * qkey_viol_cntr)405 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
406 					u16 *qkey_viol_cntr)
407 {
408 	u32 *out;
409 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
410 	int err;
411 
412 	out = mlx5_vzalloc(outlen);
413 	if (!out)
414 		return -ENOMEM;
415 
416 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
417 	if (err)
418 		goto out;
419 
420 	*qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
421 				nic_vport_context.qkey_violation_counter);
422 
423 out:
424 	kvfree(out);
425 	return err;
426 }
427 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
428 
mlx5_modify_nic_vport_context(struct mlx5_core_dev * mdev,void * in,int inlen)429 static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
430 					 int inlen)
431 {
432 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
433 
434 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
435 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
436 
437 	return mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
438 }
439 
mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev * mdev,int enable_disable)440 static int mlx5_nic_vport_enable_disable_roce(struct mlx5_core_dev *mdev,
441 					      int enable_disable)
442 {
443 	void *in;
444 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
445 	int err;
446 
447 	in = mlx5_vzalloc(inlen);
448 	if (!in) {
449 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
450 		return -ENOMEM;
451 	}
452 
453 	MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
454 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
455 		 enable_disable);
456 
457 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
458 
459 	kvfree(in);
460 
461 	return err;
462 }
463 
mlx5_set_nic_vport_current_mac(struct mlx5_core_dev * mdev,int vport,bool other_vport,u8 * addr)464 int mlx5_set_nic_vport_current_mac(struct mlx5_core_dev *mdev, int vport,
465 				   bool other_vport, u8 *addr)
466 {
467 	void *in;
468 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
469 		  + MLX5_ST_SZ_BYTES(mac_address_layout);
470 	u8  *mac_layout;
471 	u8  *mac_ptr;
472 	int err;
473 
474 	in = mlx5_vzalloc(inlen);
475 	if (!in) {
476 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
477 		return -ENOMEM;
478 	}
479 
480 	MLX5_SET(modify_nic_vport_context_in, in,
481 		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
482 	MLX5_SET(modify_nic_vport_context_in, in,
483 		 vport_number, vport);
484 	MLX5_SET(modify_nic_vport_context_in, in,
485 		 other_vport, other_vport);
486 	MLX5_SET(modify_nic_vport_context_in, in,
487 		 field_select.addresses_list, 1);
488 	MLX5_SET(modify_nic_vport_context_in, in,
489 		 nic_vport_context.allowed_list_type,
490 		 MLX5_NIC_VPORT_LIST_TYPE_UC);
491 	MLX5_SET(modify_nic_vport_context_in, in,
492 		 nic_vport_context.allowed_list_size, 1);
493 
494 	mac_layout = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
495 		nic_vport_context.current_uc_mac_address);
496 	mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_layout,
497 		mac_addr_47_32);
498 	ether_addr_copy(mac_ptr, addr);
499 
500 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
501 
502 	kvfree(in);
503 
504 	return err;
505 }
506 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_current_mac);
507 
mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev * mdev,u32 vport,u64 node_guid)508 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
509 				    u32 vport, u64 node_guid)
510 {
511 	void *in;
512 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
513 	int err;
514 	void *nic_vport_context;
515 
516 	if (!vport)
517 		return -EINVAL;
518 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
519 		return -EPERM;
520 	if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
521 		return -ENOTSUPP;
522 
523 	in = mlx5_vzalloc(inlen);
524 	if (!in) {
525 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
526 		return -ENOMEM;
527 	}
528 
529 	MLX5_SET(modify_nic_vport_context_in, in,
530 		 field_select.node_guid, 1);
531 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
532 
533 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
534 
535 	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
536 					 in, nic_vport_context);
537 	MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
538 
539 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
540 
541 	kvfree(in);
542 
543 	return err;
544 }
545 EXPORT_SYMBOL(mlx5_modify_nic_vport_node_guid);
546 
mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev * mdev,u32 vport,u64 port_guid)547 int mlx5_modify_nic_vport_port_guid(struct mlx5_core_dev *mdev,
548 				    u32 vport, u64 port_guid)
549 {
550 	void *in;
551 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
552 	int err;
553 	void *nic_vport_context;
554 
555 	if (!vport)
556 		return -EINVAL;
557 	if (!MLX5_CAP_GEN(mdev, vport_group_manager))
558 		return -EPERM;
559 	if (!MLX5_CAP_ESW(mdev, nic_vport_port_guid_modify))
560 		return -ENOTSUPP;
561 
562 	in = mlx5_vzalloc(inlen);
563 	if (!in) {
564 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
565 		return -ENOMEM;
566 	}
567 
568 	MLX5_SET(modify_nic_vport_context_in, in,
569 		 field_select.port_guid, 1);
570 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
571 
572 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
573 
574 	nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
575 					 in, nic_vport_context);
576 	MLX5_SET64(nic_vport_context, nic_vport_context, port_guid, port_guid);
577 
578 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
579 
580 	kvfree(in);
581 
582 	return err;
583 }
584 EXPORT_SYMBOL(mlx5_modify_nic_vport_port_guid);
585 
mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev * dev,u16 vport,u16 * vlan_list,int list_len)586 int mlx5_set_nic_vport_vlan_list(struct mlx5_core_dev *dev, u16 vport,
587 				 u16 *vlan_list, int list_len)
588 {
589 	void *in, *ctx;
590 	int i, err;
591 	int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
592 		+ MLX5_ST_SZ_BYTES(vlan_layout) * (int)list_len;
593 
594 	int max_list_size = 1 << MLX5_CAP_GEN_MAX(dev, log_max_vlan_list);
595 
596 	if (list_len > max_list_size) {
597 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
598 			       list_len, max_list_size);
599 		return -ENOSPC;
600 	}
601 
602 	in = mlx5_vzalloc(inlen);
603 	if (!in) {
604 		mlx5_core_warn(dev, "failed to allocate inbox\n");
605 		return -ENOMEM;
606 	}
607 
608 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
609 	if (vport)
610 		MLX5_SET(modify_nic_vport_context_in, in,
611 			 other_vport, 1);
612 	MLX5_SET(modify_nic_vport_context_in, in,
613 		 field_select.addresses_list, 1);
614 
615 	ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
616 
617 	MLX5_SET(nic_vport_context, ctx, allowed_list_type,
618 		 MLX5_NIC_VPORT_LIST_TYPE_VLAN);
619 	MLX5_SET(nic_vport_context, ctx, allowed_list_size, list_len);
620 
621 	for (i = 0; i < list_len; i++) {
622 		u8 *vlan_lout = MLX5_ADDR_OF(nic_vport_context, ctx,
623 					 current_uc_mac_address[i]);
624 		MLX5_SET(vlan_layout, vlan_lout, vlan, vlan_list[i]);
625 	}
626 
627 	err = mlx5_modify_nic_vport_context(dev, in, inlen);
628 
629 	kvfree(in);
630 	return err;
631 }
632 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_vlan_list);
633 
mlx5_set_nic_vport_mc_list(struct mlx5_core_dev * mdev,int vport,u64 * addr_list,size_t addr_list_len)634 int mlx5_set_nic_vport_mc_list(struct mlx5_core_dev *mdev, int vport,
635 			       u64 *addr_list, size_t addr_list_len)
636 {
637 	void *in, *ctx;
638 	int  inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)
639 		  + MLX5_ST_SZ_BYTES(mac_address_layout) * (int)addr_list_len;
640 	int err;
641 	size_t i;
642 	int max_list_sz = 1 << MLX5_CAP_GEN_MAX(mdev, log_max_current_mc_list);
643 
644 	if ((int)addr_list_len > max_list_sz) {
645 		mlx5_core_warn(mdev, "Requested list size (%d) > (%d) max_list_size\n",
646 			       (int)addr_list_len, max_list_sz);
647 		return -ENOSPC;
648 	}
649 
650 	in = mlx5_vzalloc(inlen);
651 	if (!in) {
652 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
653 		return -ENOMEM;
654 	}
655 
656 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
657 	if (vport)
658 		MLX5_SET(modify_nic_vport_context_in, in,
659 			 other_vport, 1);
660 	MLX5_SET(modify_nic_vport_context_in, in,
661 		 field_select.addresses_list, 1);
662 
663 	ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in, nic_vport_context);
664 
665 	MLX5_SET(nic_vport_context, ctx, allowed_list_type,
666 		 MLX5_NIC_VPORT_LIST_TYPE_MC);
667 	MLX5_SET(nic_vport_context, ctx, allowed_list_size, addr_list_len);
668 
669 	for (i = 0; i < addr_list_len; i++) {
670 		u8 *mac_lout = (u8 *)MLX5_ADDR_OF(nic_vport_context, ctx,
671 						  current_uc_mac_address[i]);
672 		u8 *mac_ptr = (u8 *)MLX5_ADDR_OF(mac_address_layout, mac_lout,
673 						 mac_addr_47_32);
674 		ether_addr_copy(mac_ptr, (u8 *)&addr_list[i]);
675 	}
676 
677 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
678 
679 	kvfree(in);
680 
681 	return err;
682 }
683 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_mc_list);
684 
mlx5_set_nic_vport_promisc(struct mlx5_core_dev * mdev,int vport,bool promisc_mc,bool promisc_uc,bool promisc_all)685 int mlx5_set_nic_vport_promisc(struct mlx5_core_dev *mdev, int vport,
686 			       bool promisc_mc, bool promisc_uc,
687 			       bool promisc_all)
688 {
689 	u8  in[MLX5_ST_SZ_BYTES(modify_nic_vport_context_in)];
690 	u8 *ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
691 			       nic_vport_context);
692 
693 	memset(in, 0, MLX5_ST_SZ_BYTES(modify_nic_vport_context_in));
694 
695 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
696 	if (vport)
697 		MLX5_SET(modify_nic_vport_context_in, in,
698 			 other_vport, 1);
699 	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
700 	if (promisc_mc)
701 		MLX5_SET(nic_vport_context, ctx, promisc_mc, 1);
702 	if (promisc_uc)
703 		MLX5_SET(nic_vport_context, ctx, promisc_uc, 1);
704 	if (promisc_all)
705 		MLX5_SET(nic_vport_context, ctx, promisc_all, 1);
706 
707 	return mlx5_modify_nic_vport_context(mdev, in, sizeof(in));
708 }
709 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_promisc);
710 
mlx5_query_nic_vport_mac_list(struct mlx5_core_dev * dev,u16 vport,enum mlx5_list_type list_type,u8 addr_list[][ETH_ALEN],int * list_size)711 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
712 				  u16 vport,
713 				  enum mlx5_list_type list_type,
714 				  u8 addr_list[][ETH_ALEN],
715 				  int *list_size)
716 {
717 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
718 	void *nic_vport_ctx;
719 	int max_list_size;
720 	int req_list_size;
721 	int out_sz;
722 	void *out;
723 	int err;
724 	int i;
725 
726 	req_list_size = *list_size;
727 
728 	max_list_size = (list_type == MLX5_NIC_VPORT_LIST_TYPE_UC) ?
729 			1 << MLX5_CAP_GEN_MAX(dev, log_max_current_uc_list) :
730 			1 << MLX5_CAP_GEN_MAX(dev, log_max_current_mc_list);
731 
732 	if (req_list_size > max_list_size) {
733 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
734 			       req_list_size, max_list_size);
735 		req_list_size = max_list_size;
736 	}
737 
738 	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
739 		 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
740 
741 	out = kzalloc(out_sz, GFP_KERNEL);
742 	if (!out)
743 		return -ENOMEM;
744 
745 	MLX5_SET(query_nic_vport_context_in, in, opcode,
746 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
747 	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
748 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
749 
750 	if (vport)
751 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
752 
753 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
754 	if (err)
755 		goto out;
756 
757 	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
758 				     nic_vport_context);
759 	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
760 				 allowed_list_size);
761 
762 	*list_size = req_list_size;
763 	for (i = 0; i < req_list_size; i++) {
764 		u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
765 					nic_vport_ctx,
766 					current_uc_mac_address[i]) + 2;
767 		ether_addr_copy(addr_list[i], mac_addr);
768 	}
769 out:
770 	kfree(out);
771 	return err;
772 }
773 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
774 
mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev * dev,enum mlx5_list_type list_type,u8 addr_list[][ETH_ALEN],int list_size)775 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
776 				   enum mlx5_list_type list_type,
777 				   u8 addr_list[][ETH_ALEN],
778 				   int list_size)
779 {
780 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
781 	void *nic_vport_ctx;
782 	int max_list_size;
783 	int in_sz;
784 	void *in;
785 	int err;
786 	int i;
787 
788 	max_list_size = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC ?
789 		 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
790 		 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
791 
792 	if (list_size > max_list_size)
793 		return -ENOSPC;
794 
795 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
796 		list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
797 
798 	in = kzalloc(in_sz, GFP_KERNEL);
799 	if (!in)
800 		return -ENOMEM;
801 
802 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
803 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
804 	MLX5_SET(modify_nic_vport_context_in, in,
805 		 field_select.addresses_list, 1);
806 
807 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
808 				     nic_vport_context);
809 
810 	MLX5_SET(nic_vport_context, nic_vport_ctx,
811 		 allowed_list_type, list_type);
812 	MLX5_SET(nic_vport_context, nic_vport_ctx,
813 		 allowed_list_size, list_size);
814 
815 	for (i = 0; i < list_size; i++) {
816 		u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
817 					    nic_vport_ctx,
818 					    current_uc_mac_address[i]) + 2;
819 		ether_addr_copy(curr_mac, addr_list[i]);
820 	}
821 
822 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
823 	kfree(in);
824 	return err;
825 }
826 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
827 
mlx5_query_nic_vport_vlans(struct mlx5_core_dev * dev,u16 vport,u16 vlans[],int * size)828 int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev,
829 			       u16 vport,
830 			       u16 vlans[],
831 			       int *size)
832 {
833 	u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
834 	void *nic_vport_ctx;
835 	int req_list_size;
836 	int max_list_size;
837 	int out_sz;
838 	void *out;
839 	int err;
840 	int i;
841 
842 	req_list_size = *size;
843 	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
844 	if (req_list_size > max_list_size) {
845 		mlx5_core_warn(dev, "Requested list size (%d) > (%d) max list size\n",
846 			       req_list_size, max_list_size);
847 		req_list_size = max_list_size;
848 	}
849 
850 	out_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
851 		 req_list_size * MLX5_ST_SZ_BYTES(vlan_layout);
852 
853 	out = kzalloc(out_sz, GFP_KERNEL);
854 	if (!out)
855 		return -ENOMEM;
856 
857 	MLX5_SET(query_nic_vport_context_in, in, opcode,
858 		 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
859 	MLX5_SET(query_nic_vport_context_in, in, allowed_list_type,
860 		 MLX5_NIC_VPORT_CONTEXT_ALLOWED_LIST_TYPE_VLAN_LIST);
861 	MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
862 
863 	if (vport)
864 		MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
865 
866 	err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
867 	if (err)
868 		goto out;
869 
870 	nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
871 				     nic_vport_context);
872 	req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
873 				 allowed_list_size);
874 
875 	*size = req_list_size;
876 	for (i = 0; i < req_list_size; i++) {
877 		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
878 					       nic_vport_ctx,
879 					 current_uc_mac_address[i]);
880 		vlans[i] = MLX5_GET(vlan_layout, vlan_addr, vlan);
881 	}
882 out:
883 	kfree(out);
884 	return err;
885 }
886 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_vlans);
887 
mlx5_modify_nic_vport_vlans(struct mlx5_core_dev * dev,u16 vlans[],int list_size)888 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
889 				u16 vlans[],
890 				int list_size)
891 {
892 	u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
893 	void *nic_vport_ctx;
894 	int max_list_size;
895 	int in_sz;
896 	void *in;
897 	int err;
898 	int i;
899 
900 	max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
901 
902 	if (list_size > max_list_size)
903 		return -ENOSPC;
904 
905 	in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
906 		list_size * MLX5_ST_SZ_BYTES(vlan_layout);
907 
908 	in = kzalloc(in_sz, GFP_KERNEL);
909 	if (!in)
910 		return -ENOMEM;
911 
912 	MLX5_SET(modify_nic_vport_context_in, in, opcode,
913 		 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
914 	MLX5_SET(modify_nic_vport_context_in, in,
915 		 field_select.addresses_list, 1);
916 
917 	nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
918 				     nic_vport_context);
919 
920 	MLX5_SET(nic_vport_context, nic_vport_ctx,
921 		 allowed_list_type, MLX5_NIC_VPORT_LIST_TYPE_VLAN);
922 	MLX5_SET(nic_vport_context, nic_vport_ctx,
923 		 allowed_list_size, list_size);
924 
925 	for (i = 0; i < list_size; i++) {
926 		void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
927 					       nic_vport_ctx,
928 					       current_uc_mac_address[i]);
929 		MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
930 	}
931 
932 	err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
933 	kfree(in);
934 	return err;
935 }
936 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
937 
mlx5_query_nic_vport_roce_en(struct mlx5_core_dev * mdev,u8 * enable)938 int mlx5_query_nic_vport_roce_en(struct mlx5_core_dev *mdev, u8 *enable)
939 {
940 	u32 *out;
941 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
942 	int err;
943 
944 	out = kzalloc(outlen, GFP_KERNEL);
945 	if (!out)
946 		return -ENOMEM;
947 
948 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
949 	if (err)
950 		goto out;
951 
952 	*enable = MLX5_GET(query_nic_vport_context_out, out,
953 				nic_vport_context.roce_en);
954 
955 out:
956 	kfree(out);
957 	return err;
958 }
959 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_roce_en);
960 
mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev * mdev,int vport,u8 * addr)961 int mlx5_set_nic_vport_permanent_mac(struct mlx5_core_dev *mdev, int vport,
962 				     u8 *addr)
963 {
964 	void *in;
965 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
966 	u8  *mac_ptr;
967 	int err;
968 
969 	in = mlx5_vzalloc(inlen);
970 	if (!in) {
971 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
972 		return -ENOMEM;
973 	}
974 
975 	MLX5_SET(modify_nic_vport_context_in, in,
976 		 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
977 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
978 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
979 	MLX5_SET(modify_nic_vport_context_in, in,
980 		 field_select.permanent_address, 1);
981 	mac_ptr = (u8 *)MLX5_ADDR_OF(modify_nic_vport_context_in, in,
982 		nic_vport_context.permanent_address.mac_addr_47_32);
983 	ether_addr_copy(mac_ptr, addr);
984 
985 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
986 
987 	kvfree(in);
988 
989 	return err;
990 }
991 EXPORT_SYMBOL_GPL(mlx5_set_nic_vport_permanent_mac);
992 
mlx5_nic_vport_enable_roce(struct mlx5_core_dev * mdev)993 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
994 {
995 	return mlx5_nic_vport_enable_disable_roce(mdev, 1);
996 }
997 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
998 
mlx5_nic_vport_disable_roce(struct mlx5_core_dev * mdev)999 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
1000 {
1001 	return mlx5_nic_vport_enable_disable_roce(mdev, 0);
1002 }
1003 EXPORT_SYMBOL_GPL(mlx5_nic_vport_disable_roce);
1004 
mlx5_core_query_vport_counter(struct mlx5_core_dev * dev,u8 other_vport,int vf,u8 port_num,void * out,size_t out_sz)1005 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
1006 				  int vf, u8 port_num, void *out,
1007 				  size_t out_sz)
1008 {
1009 	int	in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1010 	int	is_group_manager;
1011 	void   *in;
1012 	int	err;
1013 
1014 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1015 	in = mlx5_vzalloc(in_sz);
1016 	if (!in) {
1017 		err = -ENOMEM;
1018 		return err;
1019 	}
1020 
1021 	MLX5_SET(query_vport_counter_in, in, opcode,
1022 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1023 	if (other_vport) {
1024 		if (is_group_manager) {
1025 			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1026 			MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
1027 		} else {
1028 			err = -EPERM;
1029 			goto free;
1030 		}
1031 	}
1032 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1033 		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1034 
1035 	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_sz);
1036 free:
1037 	kvfree(in);
1038 	return err;
1039 }
1040 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1041 
mlx5_query_hca_vport_context(struct mlx5_core_dev * mdev,u8 port_num,u8 vport_num,u32 * out,int outlen)1042 int mlx5_query_hca_vport_context(struct mlx5_core_dev *mdev,
1043 				 u8 port_num, u8 vport_num, u32 *out,
1044 				 int outlen)
1045 {
1046 	u32 in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {0};
1047 	int is_group_manager;
1048 
1049 	is_group_manager = MLX5_CAP_GEN(mdev, vport_group_manager);
1050 
1051 	MLX5_SET(query_hca_vport_context_in, in, opcode,
1052 		 MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
1053 
1054 	if (vport_num) {
1055 		if (is_group_manager) {
1056 			MLX5_SET(query_hca_vport_context_in, in, other_vport,
1057 				 1);
1058 			MLX5_SET(query_hca_vport_context_in, in, vport_number,
1059 				 vport_num);
1060 		} else {
1061 			return -EPERM;
1062 		}
1063 	}
1064 
1065 	if (MLX5_CAP_GEN(mdev, num_ports) == 2)
1066 		MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
1067 
1068 	return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
1069 }
1070 
mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev * mdev,u64 * system_image_guid)1071 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *mdev,
1072 					   u64 *system_image_guid)
1073 {
1074 	u32 *out;
1075 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1076 	int err;
1077 
1078 	out = mlx5_vzalloc(outlen);
1079 	if (!out)
1080 		return -ENOMEM;
1081 
1082 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1083 	if (err)
1084 		goto out;
1085 
1086 	*system_image_guid = MLX5_GET64(query_hca_vport_context_out, out,
1087 					hca_vport_context.system_image_guid);
1088 
1089 out:
1090 	kvfree(out);
1091 	return err;
1092 }
1093 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
1094 
mlx5_query_hca_vport_node_guid(struct mlx5_core_dev * mdev,u64 * node_guid)1095 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
1096 {
1097 	u32 *out;
1098 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1099 	int err;
1100 
1101 	out = mlx5_vzalloc(outlen);
1102 	if (!out)
1103 		return -ENOMEM;
1104 
1105 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1106 	if (err)
1107 		goto out;
1108 
1109 	*node_guid = MLX5_GET64(query_hca_vport_context_out, out,
1110 				hca_vport_context.node_guid);
1111 
1112 out:
1113 	kvfree(out);
1114 	return err;
1115 }
1116 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
1117 
mlx5_query_hca_vport_port_guid(struct mlx5_core_dev * mdev,u64 * port_guid)1118 static int mlx5_query_hca_vport_port_guid(struct mlx5_core_dev *mdev,
1119 					  u64 *port_guid)
1120 {
1121 	u32 *out;
1122 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1123 	int err;
1124 
1125 	out = mlx5_vzalloc(outlen);
1126 	if (!out)
1127 		return -ENOMEM;
1128 
1129 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1130 	if (err)
1131 		goto out;
1132 
1133 	*port_guid = MLX5_GET64(query_hca_vport_context_out, out,
1134 				hca_vport_context.port_guid);
1135 
1136 out:
1137 	kvfree(out);
1138 	return err;
1139 }
1140 
mlx5_query_hca_vport_gid(struct mlx5_core_dev * dev,u8 port_num,u16 vport_num,u16 gid_index,union ib_gid * gid)1141 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 port_num,
1142 			     u16 vport_num, u16 gid_index, union ib_gid *gid)
1143 {
1144 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
1145 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
1146 	int is_group_manager;
1147 	void *out = NULL;
1148 	void *in = NULL;
1149 	union ib_gid *tmp;
1150 	int tbsz;
1151 	int nout;
1152 	int err;
1153 
1154 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1155 	tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
1156 
1157 	if (gid_index > tbsz && gid_index != 0xffff)
1158 		return -EINVAL;
1159 
1160 	if (gid_index == 0xffff)
1161 		nout = tbsz;
1162 	else
1163 		nout = 1;
1164 
1165 	out_sz += nout * sizeof(*gid);
1166 
1167 	in = mlx5_vzalloc(in_sz);
1168 	out = mlx5_vzalloc(out_sz);
1169 	if (!in || !out) {
1170 		err = -ENOMEM;
1171 		goto out;
1172 	}
1173 
1174 	MLX5_SET(query_hca_vport_gid_in, in, opcode,
1175 		 MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
1176 	if (vport_num) {
1177 		if (is_group_manager) {
1178 			MLX5_SET(query_hca_vport_gid_in, in, vport_number,
1179 				 vport_num);
1180 			MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
1181 		} else {
1182 			err = -EPERM;
1183 			goto out;
1184 		}
1185 	}
1186 
1187 	MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
1188 
1189 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1190 		MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
1191 
1192 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1193 	if (err)
1194 		goto out;
1195 
1196 	tmp = (union ib_gid *)MLX5_ADDR_OF(query_hca_vport_gid_out, out, gid);
1197 	gid->global.subnet_prefix = tmp->global.subnet_prefix;
1198 	gid->global.interface_id = tmp->global.interface_id;
1199 
1200 out:
1201 	kvfree(in);
1202 	kvfree(out);
1203 	return err;
1204 }
1205 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
1206 
mlx5_query_hca_vport_pkey(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,u16 pkey_index,u16 * pkey)1207 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
1208 			      u8 port_num, u16 vf_num, u16 pkey_index,
1209 			      u16 *pkey)
1210 {
1211 	int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
1212 	int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
1213 	int is_group_manager;
1214 	void *out = NULL;
1215 	void *in = NULL;
1216 	void *pkarr;
1217 	int nout;
1218 	int tbsz;
1219 	int err;
1220 	int i;
1221 
1222 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1223 
1224 	tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
1225 	if (pkey_index > tbsz && pkey_index != 0xffff)
1226 		return -EINVAL;
1227 
1228 	if (pkey_index == 0xffff)
1229 		nout = tbsz;
1230 	else
1231 		nout = 1;
1232 
1233 	out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
1234 
1235 	in = kzalloc(in_sz, GFP_KERNEL);
1236 	out = kzalloc(out_sz, GFP_KERNEL);
1237 
1238 	MLX5_SET(query_hca_vport_pkey_in, in, opcode,
1239 		 MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
1240 	if (other_vport) {
1241 		if (is_group_manager) {
1242 			MLX5_SET(query_hca_vport_pkey_in, in, vport_number,
1243 				 vf_num);
1244 			MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
1245 		} else {
1246 			err = -EPERM;
1247 			goto out;
1248 		}
1249 	}
1250 	MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
1251 
1252 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1253 		MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
1254 
1255 	err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
1256 	if (err)
1257 		goto out;
1258 
1259 	pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
1260 	for (i = 0; i < nout; i++, pkey++,
1261 	     pkarr += MLX5_ST_SZ_BYTES(pkey))
1262 		*pkey = MLX5_GET_PR(pkey, pkarr, pkey);
1263 
1264 out:
1265 	kfree(in);
1266 	kfree(out);
1267 	return err;
1268 }
1269 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
1270 
mlx5_query_hca_min_wqe_header(struct mlx5_core_dev * mdev,int * min_header)1271 static int mlx5_query_hca_min_wqe_header(struct mlx5_core_dev *mdev,
1272 					 int *min_header)
1273 {
1274 	u32 *out;
1275 	u32 outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1276 	int err;
1277 
1278 	out = mlx5_vzalloc(outlen);
1279 	if (!out)
1280 		return -ENOMEM;
1281 
1282 	err = mlx5_query_hca_vport_context(mdev, 1, 0, out, outlen);
1283 	if (err)
1284 		goto out;
1285 
1286 	*min_header = MLX5_GET(query_hca_vport_context_out, out,
1287 			       hca_vport_context.min_wqe_inline_mode);
1288 
1289 out:
1290 	kvfree(out);
1291 	return err;
1292 }
1293 
mlx5_modify_eswitch_vport_context(struct mlx5_core_dev * mdev,u16 vport,void * in,int inlen)1294 static int mlx5_modify_eswitch_vport_context(struct mlx5_core_dev *mdev,
1295 					     u16 vport, void *in, int inlen)
1296 {
1297 	u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
1298 	int err;
1299 
1300 	MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
1301 	if (vport)
1302 		MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
1303 
1304 	MLX5_SET(modify_esw_vport_context_in, in, opcode,
1305 		 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
1306 
1307 	err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
1308 	if (err)
1309 		mlx5_core_warn(mdev, "MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT failed\n");
1310 
1311 	return err;
1312 }
1313 
mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev * mdev,u8 vport,u8 insert_mode,u8 strip_mode,u16 vlan,u8 cfi,u8 pcp)1314 int mlx5_set_eswitch_cvlan_info(struct mlx5_core_dev *mdev, u8 vport,
1315 				u8 insert_mode, u8 strip_mode,
1316 				u16 vlan, u8 cfi, u8 pcp)
1317 {
1318 	u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)];
1319 
1320 	memset(in, 0, sizeof(in));
1321 
1322 	if (insert_mode != MLX5_MODIFY_ESW_VPORT_CONTEXT_CVLAN_INSERT_NONE) {
1323 		MLX5_SET(modify_esw_vport_context_in, in,
1324 			 esw_vport_context.cvlan_cfi, cfi);
1325 		MLX5_SET(modify_esw_vport_context_in, in,
1326 			 esw_vport_context.cvlan_pcp, pcp);
1327 		MLX5_SET(modify_esw_vport_context_in, in,
1328 			 esw_vport_context.cvlan_id, vlan);
1329 	}
1330 
1331 	MLX5_SET(modify_esw_vport_context_in, in,
1332 		 esw_vport_context.vport_cvlan_insert, insert_mode);
1333 
1334 	MLX5_SET(modify_esw_vport_context_in, in,
1335 		 esw_vport_context.vport_cvlan_strip, strip_mode);
1336 
1337 	MLX5_SET(modify_esw_vport_context_in, in, field_select,
1338 		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_STRIP |
1339 		 MLX5_MODIFY_ESW_VPORT_CONTEXT_FIELD_SELECT_CVLAN_INSERT);
1340 
1341 	return mlx5_modify_eswitch_vport_context(mdev, vport, in, sizeof(in));
1342 }
1343 EXPORT_SYMBOL_GPL(mlx5_set_eswitch_cvlan_info);
1344 
mlx5_query_vport_mtu(struct mlx5_core_dev * mdev,int * mtu)1345 int mlx5_query_vport_mtu(struct mlx5_core_dev *mdev, int *mtu)
1346 {
1347 	u32 *out;
1348 	u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1349 	int err;
1350 
1351 	out = mlx5_vzalloc(outlen);
1352 	if (!out)
1353 		return -ENOMEM;
1354 
1355 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1356 	if (err)
1357 		goto out;
1358 
1359 	*mtu = MLX5_GET(query_nic_vport_context_out, out,
1360 			nic_vport_context.mtu);
1361 
1362 out:
1363 	kvfree(out);
1364 	return err;
1365 }
1366 EXPORT_SYMBOL_GPL(mlx5_query_vport_mtu);
1367 
mlx5_set_vport_mtu(struct mlx5_core_dev * mdev,int mtu)1368 int mlx5_set_vport_mtu(struct mlx5_core_dev *mdev, int mtu)
1369 {
1370 	u32 *in;
1371 	u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1372 	int err;
1373 
1374 	in = mlx5_vzalloc(inlen);
1375 	if (!in)
1376 		return -ENOMEM;
1377 
1378 	MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
1379 	MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
1380 
1381 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1382 
1383 	kvfree(in);
1384 	return err;
1385 }
1386 EXPORT_SYMBOL_GPL(mlx5_set_vport_mtu);
1387 
mlx5_query_vport_min_wqe_header(struct mlx5_core_dev * mdev,int * min_header)1388 static int mlx5_query_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1389 					   int *min_header)
1390 {
1391 	u32 *out;
1392 	u32 outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1393 	int err;
1394 
1395 	out = mlx5_vzalloc(outlen);
1396 	if (!out)
1397 		return -ENOMEM;
1398 
1399 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1400 	if (err)
1401 		goto out;
1402 
1403 	*min_header = MLX5_GET(query_nic_vport_context_out, out,
1404 			       nic_vport_context.min_wqe_inline_mode);
1405 
1406 out:
1407 	kvfree(out);
1408 	return err;
1409 }
1410 
mlx5_set_vport_min_wqe_header(struct mlx5_core_dev * mdev,u8 vport,int min_header)1411 int mlx5_set_vport_min_wqe_header(struct mlx5_core_dev *mdev,
1412 				  u8 vport, int min_header)
1413 {
1414 	u32 *in;
1415 	u32 inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1416 	int err;
1417 
1418 	in = mlx5_vzalloc(inlen);
1419 	if (!in)
1420 		return -ENOMEM;
1421 
1422 	MLX5_SET(modify_nic_vport_context_in, in,
1423 		 field_select.min_wqe_inline_mode, 1);
1424 	MLX5_SET(modify_nic_vport_context_in, in,
1425 		 nic_vport_context.min_wqe_inline_mode, min_header);
1426 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
1427 	MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
1428 
1429 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1430 
1431 	kvfree(in);
1432 	return err;
1433 }
1434 EXPORT_SYMBOL_GPL(mlx5_set_vport_min_wqe_header);
1435 
mlx5_query_min_wqe_header(struct mlx5_core_dev * dev,int * min_header)1436 int mlx5_query_min_wqe_header(struct mlx5_core_dev *dev, int *min_header)
1437 {
1438 	switch (MLX5_CAP_GEN(dev, port_type)) {
1439 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1440 		return mlx5_query_hca_min_wqe_header(dev, min_header);
1441 
1442 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1443 		return mlx5_query_vport_min_wqe_header(dev, min_header);
1444 
1445 	default:
1446 		return -EINVAL;
1447 	}
1448 }
1449 EXPORT_SYMBOL_GPL(mlx5_query_min_wqe_header);
1450 
mlx5_query_nic_vport_promisc(struct mlx5_core_dev * mdev,u16 vport,int * promisc_uc,int * promisc_mc,int * promisc_all)1451 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
1452 				 u16 vport,
1453 				 int *promisc_uc,
1454 				 int *promisc_mc,
1455 				 int *promisc_all)
1456 {
1457 	u32 *out;
1458 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1459 	int err;
1460 
1461 	out = kzalloc(outlen, GFP_KERNEL);
1462 	if (!out)
1463 		return -ENOMEM;
1464 
1465 	err = mlx5_query_nic_vport_context(mdev, vport, out, outlen);
1466 	if (err)
1467 		goto out;
1468 
1469 	*promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
1470 			       nic_vport_context.promisc_uc);
1471 	*promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
1472 			       nic_vport_context.promisc_mc);
1473 	*promisc_all = MLX5_GET(query_nic_vport_context_out, out,
1474 				nic_vport_context.promisc_all);
1475 
1476 out:
1477 	kfree(out);
1478 	return err;
1479 }
1480 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
1481 
mlx5_modify_nic_vport_promisc(struct mlx5_core_dev * mdev,int promisc_uc,int promisc_mc,int promisc_all)1482 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
1483 				  int promisc_uc,
1484 				  int promisc_mc,
1485 				  int promisc_all)
1486 {
1487 	void *in;
1488 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1489 	int err;
1490 
1491 	in = mlx5_vzalloc(inlen);
1492 	if (!in) {
1493 		mlx5_core_err(mdev, "failed to allocate inbox\n");
1494 		return -ENOMEM;
1495 	}
1496 
1497 	MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
1498 	MLX5_SET(modify_nic_vport_context_in, in,
1499 		 nic_vport_context.promisc_uc, promisc_uc);
1500 	MLX5_SET(modify_nic_vport_context_in, in,
1501 		 nic_vport_context.promisc_mc, promisc_mc);
1502 	MLX5_SET(modify_nic_vport_context_in, in,
1503 		 nic_vport_context.promisc_all, promisc_all);
1504 
1505 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1506 	kvfree(in);
1507 	return err;
1508 }
1509 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
1510 
mlx5_nic_vport_update_local_lb(struct mlx5_core_dev * mdev,bool enable)1511 int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
1512 {
1513 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1514 	void *in;
1515 	int err;
1516 
1517 	if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
1518 	    !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
1519 		return 0;
1520 
1521 	in = kvzalloc(inlen, GFP_KERNEL);
1522 	if (!in)
1523 		return -ENOMEM;
1524 
1525 	MLX5_SET(modify_nic_vport_context_in, in,
1526 		 nic_vport_context.disable_mc_local_lb, !enable);
1527 	MLX5_SET(modify_nic_vport_context_in, in,
1528 		 nic_vport_context.disable_uc_local_lb, !enable);
1529 
1530 	if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
1531 		MLX5_SET(modify_nic_vport_context_in, in,
1532 			 field_select.disable_mc_local_lb, 1);
1533 
1534 	if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
1535 		MLX5_SET(modify_nic_vport_context_in, in,
1536 			 field_select.disable_uc_local_lb, 1);
1537 
1538 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1539 
1540 	if (!err)
1541 		mlx5_core_dbg(mdev, "%s local_lb\n",
1542 			      enable ? "enable" : "disable");
1543 
1544 	kvfree(in);
1545 	return err;
1546 }
1547 EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
1548 
mlx5_nic_vport_modify_local_lb(struct mlx5_core_dev * mdev,enum mlx5_local_lb_selection selection,u8 value)1549 int mlx5_nic_vport_modify_local_lb(struct mlx5_core_dev *mdev,
1550 				   enum mlx5_local_lb_selection selection,
1551 				   u8 value)
1552 {
1553 	void *in;
1554 	int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1555 	int err;
1556 
1557 	in = mlx5_vzalloc(inlen);
1558 	if (!in) {
1559 		mlx5_core_warn(mdev, "failed to allocate inbox\n");
1560 		return -ENOMEM;
1561 	}
1562 
1563 	MLX5_SET(modify_nic_vport_context_in, in, vport_number, 0);
1564 
1565 	if (selection == MLX5_LOCAL_MC_LB) {
1566 		MLX5_SET(modify_nic_vport_context_in, in,
1567 			 field_select.disable_mc_local_lb, 1);
1568 		MLX5_SET(modify_nic_vport_context_in, in,
1569 			 nic_vport_context.disable_mc_local_lb,
1570 			 value);
1571 	} else {
1572 		MLX5_SET(modify_nic_vport_context_in, in,
1573 			 field_select.disable_uc_local_lb, 1);
1574 		MLX5_SET(modify_nic_vport_context_in, in,
1575 			 nic_vport_context.disable_uc_local_lb,
1576 			 value);
1577 	}
1578 
1579 	err = mlx5_modify_nic_vport_context(mdev, in, inlen);
1580 
1581 	kvfree(in);
1582 	return err;
1583 }
1584 EXPORT_SYMBOL_GPL(mlx5_nic_vport_modify_local_lb);
1585 
mlx5_nic_vport_query_local_lb(struct mlx5_core_dev * mdev,enum mlx5_local_lb_selection selection,u8 * value)1586 int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev,
1587 				  enum mlx5_local_lb_selection selection,
1588 				  u8 *value)
1589 {
1590 	void *out;
1591 	int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
1592 	int err;
1593 
1594 	out = kzalloc(outlen, GFP_KERNEL);
1595 	if (!out)
1596 		return -ENOMEM;
1597 
1598 	err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
1599 	if (err)
1600 		goto done;
1601 
1602 	if (selection == MLX5_LOCAL_MC_LB)
1603 		*value = MLX5_GET(query_nic_vport_context_out, out,
1604 				  nic_vport_context.disable_mc_local_lb);
1605 	else
1606 		*value = MLX5_GET(query_nic_vport_context_out, out,
1607 				  nic_vport_context.disable_uc_local_lb);
1608 
1609 done:
1610 	kfree(out);
1611 	return err;
1612 }
1613 EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
1614 
mlx5_query_vport_counter(struct mlx5_core_dev * dev,u8 port_num,u16 vport_num,void * out,int out_size)1615 int mlx5_query_vport_counter(struct mlx5_core_dev *dev,
1616 			     u8 port_num, u16 vport_num,
1617 			     void *out, int out_size)
1618 {
1619 	int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1620 	int is_group_manager;
1621 	void *in;
1622 	int err;
1623 
1624 	is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1625 
1626 	in = mlx5_vzalloc(in_sz);
1627 	if (!in)
1628 		return -ENOMEM;
1629 
1630 	MLX5_SET(query_vport_counter_in, in, opcode,
1631 		 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1632 	if (vport_num) {
1633 		if (is_group_manager) {
1634 			MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1635 			MLX5_SET(query_vport_counter_in, in, vport_number,
1636 				 vport_num);
1637 		} else {
1638 			err = -EPERM;
1639 			goto ex;
1640 		}
1641 	}
1642 	if (MLX5_CAP_GEN(dev, num_ports) == 2)
1643 		MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1644 
1645 	err = mlx5_cmd_exec(dev, in, in_sz, out,  out_size);
1646 
1647 ex:
1648 	kvfree(in);
1649 	return err;
1650 }
1651 EXPORT_SYMBOL_GPL(mlx5_query_vport_counter);
1652 
mlx5_get_vport_counters(struct mlx5_core_dev * dev,u8 port_num,struct mlx5_vport_counters * vc)1653 int mlx5_get_vport_counters(struct mlx5_core_dev *dev, u8 port_num,
1654 			    struct mlx5_vport_counters *vc)
1655 {
1656 	int out_sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
1657 	void *out;
1658 	int err;
1659 
1660 	out = mlx5_vzalloc(out_sz);
1661 	if (!out)
1662 		return -ENOMEM;
1663 
1664 	err = mlx5_query_vport_counter(dev, port_num, 0, out, out_sz);
1665 	if (err)
1666 		goto ex;
1667 
1668 	vc->received_errors.packets =
1669 		MLX5_GET64(query_vport_counter_out,
1670 			   out, received_errors.packets);
1671 	vc->received_errors.octets =
1672 		MLX5_GET64(query_vport_counter_out,
1673 			   out, received_errors.octets);
1674 	vc->transmit_errors.packets =
1675 		MLX5_GET64(query_vport_counter_out,
1676 			   out, transmit_errors.packets);
1677 	vc->transmit_errors.octets =
1678 		MLX5_GET64(query_vport_counter_out,
1679 			   out, transmit_errors.octets);
1680 	vc->received_ib_unicast.packets =
1681 		MLX5_GET64(query_vport_counter_out,
1682 			   out, received_ib_unicast.packets);
1683 	vc->received_ib_unicast.octets =
1684 		MLX5_GET64(query_vport_counter_out,
1685 			   out, received_ib_unicast.octets);
1686 	vc->transmitted_ib_unicast.packets =
1687 		MLX5_GET64(query_vport_counter_out,
1688 			   out, transmitted_ib_unicast.packets);
1689 	vc->transmitted_ib_unicast.octets =
1690 		MLX5_GET64(query_vport_counter_out,
1691 			   out, transmitted_ib_unicast.octets);
1692 	vc->received_ib_multicast.packets =
1693 		MLX5_GET64(query_vport_counter_out,
1694 			   out, received_ib_multicast.packets);
1695 	vc->received_ib_multicast.octets =
1696 		MLX5_GET64(query_vport_counter_out,
1697 			   out, received_ib_multicast.octets);
1698 	vc->transmitted_ib_multicast.packets =
1699 		MLX5_GET64(query_vport_counter_out,
1700 			   out, transmitted_ib_multicast.packets);
1701 	vc->transmitted_ib_multicast.octets =
1702 		MLX5_GET64(query_vport_counter_out,
1703 			   out, transmitted_ib_multicast.octets);
1704 	vc->received_eth_broadcast.packets =
1705 		MLX5_GET64(query_vport_counter_out,
1706 			   out, received_eth_broadcast.packets);
1707 	vc->received_eth_broadcast.octets =
1708 		MLX5_GET64(query_vport_counter_out,
1709 			   out, received_eth_broadcast.octets);
1710 	vc->transmitted_eth_broadcast.packets =
1711 		MLX5_GET64(query_vport_counter_out,
1712 			   out, transmitted_eth_broadcast.packets);
1713 	vc->transmitted_eth_broadcast.octets =
1714 		MLX5_GET64(query_vport_counter_out,
1715 			   out, transmitted_eth_broadcast.octets);
1716 	vc->received_eth_unicast.octets =
1717 		MLX5_GET64(query_vport_counter_out,
1718 			   out, received_eth_unicast.octets);
1719 	vc->received_eth_unicast.packets =
1720 		MLX5_GET64(query_vport_counter_out,
1721 			   out, received_eth_unicast.packets);
1722 	vc->transmitted_eth_unicast.octets =
1723 		MLX5_GET64(query_vport_counter_out,
1724 			   out, transmitted_eth_unicast.octets);
1725 	vc->transmitted_eth_unicast.packets =
1726 		MLX5_GET64(query_vport_counter_out,
1727 			   out, transmitted_eth_unicast.packets);
1728 	vc->received_eth_multicast.octets =
1729 		MLX5_GET64(query_vport_counter_out,
1730 			   out, received_eth_multicast.octets);
1731 	vc->received_eth_multicast.packets =
1732 		MLX5_GET64(query_vport_counter_out,
1733 			   out, received_eth_multicast.packets);
1734 	vc->transmitted_eth_multicast.octets =
1735 		MLX5_GET64(query_vport_counter_out,
1736 			   out, transmitted_eth_multicast.octets);
1737 	vc->transmitted_eth_multicast.packets =
1738 		MLX5_GET64(query_vport_counter_out,
1739 			   out, transmitted_eth_multicast.packets);
1740 
1741 ex:
1742 	kvfree(out);
1743 	return err;
1744 }
1745 
mlx5_query_vport_system_image_guid(struct mlx5_core_dev * dev,u64 * sys_image_guid)1746 int mlx5_query_vport_system_image_guid(struct mlx5_core_dev *dev,
1747 				       u64 *sys_image_guid)
1748 {
1749 	switch (MLX5_CAP_GEN(dev, port_type)) {
1750 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1751 		return mlx5_query_hca_vport_system_image_guid(dev,
1752 							      sys_image_guid);
1753 
1754 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1755 		return mlx5_query_nic_vport_system_image_guid(dev,
1756 							      sys_image_guid);
1757 
1758 	default:
1759 		return -EINVAL;
1760 	}
1761 }
1762 EXPORT_SYMBOL_GPL(mlx5_query_vport_system_image_guid);
1763 
mlx5_query_vport_node_guid(struct mlx5_core_dev * dev,u64 * node_guid)1764 int mlx5_query_vport_node_guid(struct mlx5_core_dev *dev, u64 *node_guid)
1765 {
1766 	switch (MLX5_CAP_GEN(dev, port_type)) {
1767 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1768 		return mlx5_query_hca_vport_node_guid(dev, node_guid);
1769 
1770 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1771 		return mlx5_query_nic_vport_node_guid(dev, node_guid);
1772 
1773 	default:
1774 		return -EINVAL;
1775 	}
1776 }
1777 EXPORT_SYMBOL_GPL(mlx5_query_vport_node_guid);
1778 
mlx5_query_vport_port_guid(struct mlx5_core_dev * dev,u64 * port_guid)1779 int mlx5_query_vport_port_guid(struct mlx5_core_dev *dev, u64 *port_guid)
1780 {
1781 	switch (MLX5_CAP_GEN(dev, port_type)) {
1782 	case MLX5_CMD_HCA_CAP_PORT_TYPE_IB:
1783 		return mlx5_query_hca_vport_port_guid(dev, port_guid);
1784 
1785 	case MLX5_CMD_HCA_CAP_PORT_TYPE_ETHERNET:
1786 		return mlx5_query_nic_vport_port_guid(dev, port_guid);
1787 
1788 	default:
1789 		return -EINVAL;
1790 	}
1791 }
1792 EXPORT_SYMBOL_GPL(mlx5_query_vport_port_guid);
1793 
mlx5_query_hca_vport_state(struct mlx5_core_dev * dev,u8 * vport_state)1794 int mlx5_query_hca_vport_state(struct mlx5_core_dev *dev, u8 *vport_state)
1795 {
1796 	u32 *out;
1797 	int outlen = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
1798 	int err;
1799 
1800 	out = mlx5_vzalloc(outlen);
1801 	if (!out)
1802 		return -ENOMEM;
1803 
1804 	err = mlx5_query_hca_vport_context(dev, 1, 0, out, outlen);
1805 	if (err)
1806 		goto out;
1807 
1808 	*vport_state = MLX5_GET(query_hca_vport_context_out, out,
1809 				hca_vport_context.vport_state);
1810 
1811 out:
1812 	kvfree(out);
1813 	return err;
1814 }
1815 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_state);
1816 
mlx5_core_query_ib_ppcnt(struct mlx5_core_dev * dev,u8 port_num,void * out,size_t sz)1817 int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1818 			     u8 port_num, void *out, size_t sz)
1819 {
1820 	u32 *in;
1821 	int err;
1822 
1823 	in  = mlx5_vzalloc(sz);
1824 	if (!in) {
1825 		err = -ENOMEM;
1826 		return err;
1827 	}
1828 
1829 	MLX5_SET(ppcnt_reg, in, local_port, port_num);
1830 
1831 	MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
1832 	err = mlx5_core_access_reg(dev, in, sz, out,
1833 				   sz, MLX5_REG_PPCNT, 0, 0);
1834 
1835 	kvfree(in);
1836 	return err;
1837 }
1838