1 /*
2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/export.h>
34 #include <linux/etherdevice.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/eswitch.h>
38 #include "mlx5_core.h"
39 #include "eswitch.h"
40 #include "sf/sf.h"
41
42 /* Mutex to hold while enabling or disabling RoCE */
43 static DEFINE_MUTEX(mlx5_roce_en_lock);
44
mlx5_query_vport_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport)45 u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
46 {
47 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
48 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
49 int err;
50
51 MLX5_SET(query_vport_state_in, in, opcode,
52 MLX5_CMD_OP_QUERY_VPORT_STATE);
53 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
54 MLX5_SET(query_vport_state_in, in, vport_number, vport);
55 if (vport)
56 MLX5_SET(query_vport_state_in, in, other_vport, 1);
57
58 err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
59 if (err)
60 return 0;
61
62 return MLX5_GET(query_vport_state_out, out, state);
63 }
64
mlx5_query_vport_admin_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport,u8 other_vport,u8 * admin_state)65 static int mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
66 u16 vport, u8 other_vport,
67 u8 *admin_state)
68 {
69 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
70 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
71 int err;
72
73 MLX5_SET(query_vport_state_in, in, opcode,
74 MLX5_CMD_OP_QUERY_VPORT_STATE);
75 MLX5_SET(query_vport_state_in, in, op_mod, opmod);
76 MLX5_SET(query_vport_state_in, in, vport_number, vport);
77 MLX5_SET(query_vport_state_in, in, other_vport, other_vport);
78
79 err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
80 if (err)
81 return err;
82
83 *admin_state = MLX5_GET(query_vport_state_out, out, admin_state);
84 return 0;
85 }
86
mlx5_modify_vport_admin_state(struct mlx5_core_dev * mdev,u8 opmod,u16 vport,u8 other_vport,u8 state)87 int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
88 u16 vport, u8 other_vport, u8 state)
89 {
90 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
91
92 MLX5_SET(modify_vport_state_in, in, opcode,
93 MLX5_CMD_OP_MODIFY_VPORT_STATE);
94 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
95 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
96 MLX5_SET(modify_vport_state_in, in, other_vport, other_vport);
97 MLX5_SET(modify_vport_state_in, in, admin_state, state);
98
99 return mlx5_cmd_exec_in(mdev, modify_vport_state, in);
100 }
101
mlx5_modify_vport_max_tx_speed(struct mlx5_core_dev * mdev,u8 opmod,u16 vport,u8 other_vport,u16 max_tx_speed)102 int mlx5_modify_vport_max_tx_speed(struct mlx5_core_dev *mdev, u8 opmod,
103 u16 vport, u8 other_vport, u16 max_tx_speed)
104 {
105 u32 in[MLX5_ST_SZ_DW(modify_vport_state_in)] = {};
106 u8 admin_state;
107 int err;
108
109 err = mlx5_query_vport_admin_state(mdev, opmod, vport, other_vport,
110 &admin_state);
111 if (err)
112 return err;
113
114 MLX5_SET(modify_vport_state_in, in, opcode,
115 MLX5_CMD_OP_MODIFY_VPORT_STATE);
116 MLX5_SET(modify_vport_state_in, in, op_mod, opmod);
117 MLX5_SET(modify_vport_state_in, in, vport_number, vport);
118 MLX5_SET(modify_vport_state_in, in, other_vport, other_vport);
119 MLX5_SET(modify_vport_state_in, in, admin_state, admin_state);
120 MLX5_SET(modify_vport_state_in, in, max_tx_speed, max_tx_speed);
121
122 return mlx5_cmd_exec_in(mdev, modify_vport_state, in);
123 }
124
mlx5_query_vport_max_tx_speed(struct mlx5_core_dev * mdev,u8 op_mod,u16 vport,u8 other_vport,u32 * max_tx_speed)125 int mlx5_query_vport_max_tx_speed(struct mlx5_core_dev *mdev, u8 op_mod,
126 u16 vport, u8 other_vport, u32 *max_tx_speed)
127 {
128 u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
129 u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
130 u32 state;
131 int err;
132
133 MLX5_SET(query_vport_state_in, in, opcode,
134 MLX5_CMD_OP_QUERY_VPORT_STATE);
135 MLX5_SET(query_vport_state_in, in, op_mod, op_mod);
136 MLX5_SET(query_vport_state_in, in, vport_number, vport);
137 MLX5_SET(query_vport_state_in, in, other_vport, other_vport);
138
139 err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
140 if (err)
141 return err;
142
143 state = MLX5_GET(query_vport_state_out, out, state);
144 if (state == VPORT_STATE_DOWN) {
145 *max_tx_speed = 0;
146 return 0;
147 }
148
149 *max_tx_speed = MLX5_GET(query_vport_state_out, out, max_tx_speed);
150 return 0;
151 }
152 EXPORT_SYMBOL_GPL(mlx5_query_vport_max_tx_speed);
153
mlx5_query_nic_vport_context(struct mlx5_core_dev * mdev,u16 vport,bool other_vport,u32 * out)154 static int mlx5_query_nic_vport_context(struct mlx5_core_dev *mdev, u16 vport,
155 bool other_vport, u32 *out)
156 {
157 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
158
159 MLX5_SET(query_nic_vport_context_in, in, opcode,
160 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
161 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
162 MLX5_SET(query_nic_vport_context_in, in, other_vport, other_vport);
163
164 return mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
165 }
166
mlx5_query_nic_vport_min_inline(struct mlx5_core_dev * mdev,u16 vport,u8 * min_inline)167 int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
168 u16 vport, u8 *min_inline)
169 {
170 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
171 int err;
172
173 err = mlx5_query_nic_vport_context(mdev, vport, vport > 0, out);
174 if (!err)
175 *min_inline = MLX5_GET(query_nic_vport_context_out, out,
176 nic_vport_context.min_wqe_inline_mode);
177 return err;
178 }
179 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
180
mlx5_query_min_inline(struct mlx5_core_dev * mdev,u8 * min_inline_mode)181 void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
182 u8 *min_inline_mode)
183 {
184 switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
185 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
186 if (!mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode))
187 break;
188 fallthrough;
189 case MLX5_CAP_INLINE_MODE_L2:
190 *min_inline_mode = MLX5_INLINE_MODE_L2;
191 break;
192 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
193 *min_inline_mode = MLX5_INLINE_MODE_NONE;
194 break;
195 }
196 }
197 EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
198
mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev * mdev,u16 vport,u8 min_inline)199 int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
200 u16 vport, u8 min_inline)
201 {
202 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
203 void *nic_vport_ctx;
204
205 MLX5_SET(modify_nic_vport_context_in, in,
206 field_select.min_inline, 1);
207 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
208 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
209
210 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
211 in, nic_vport_context);
212 MLX5_SET(nic_vport_context, nic_vport_ctx,
213 min_wqe_inline_mode, min_inline);
214 MLX5_SET(modify_nic_vport_context_in, in, opcode,
215 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
216
217 return mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
218 }
219
mlx5_query_nic_vport_mac_address(struct mlx5_core_dev * mdev,u16 vport,bool other,u8 * addr)220 int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
221 u16 vport, bool other, u8 *addr)
222 {
223 u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
224 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
225 u8 *out_addr;
226 int err;
227
228 out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
229 nic_vport_context.permanent_address);
230
231 MLX5_SET(query_nic_vport_context_in, in, opcode,
232 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
233 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
234 MLX5_SET(query_nic_vport_context_in, in, other_vport, other);
235
236 err = mlx5_cmd_exec_inout(mdev, query_nic_vport_context, in, out);
237 if (!err)
238 ether_addr_copy(addr, &out_addr[2]);
239
240 return err;
241 }
242 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_address);
243
mlx5_query_mac_address(struct mlx5_core_dev * mdev,u8 * addr)244 int mlx5_query_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
245 {
246 return mlx5_query_nic_vport_mac_address(mdev, 0, false, addr);
247 }
248 EXPORT_SYMBOL_GPL(mlx5_query_mac_address);
249
mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev * mdev,u16 vport,const u8 * addr)250 int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
251 u16 vport, const u8 *addr)
252 {
253 void *in;
254 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
255 int err;
256 void *nic_vport_ctx;
257 u8 *perm_mac;
258
259 in = kvzalloc(inlen, GFP_KERNEL);
260 if (!in)
261 return -ENOMEM;
262
263 MLX5_SET(modify_nic_vport_context_in, in,
264 field_select.permanent_address, 1);
265 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
266 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
267
268 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
269 in, nic_vport_context);
270 perm_mac = MLX5_ADDR_OF(nic_vport_context, nic_vport_ctx,
271 permanent_address);
272
273 ether_addr_copy(&perm_mac[2], addr);
274 MLX5_SET(modify_nic_vport_context_in, in, opcode,
275 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
276
277 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
278
279 kvfree(in);
280
281 return err;
282 }
283 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
284
mlx5_query_nic_vport_mtu(struct mlx5_core_dev * mdev,u16 * mtu)285 int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
286 {
287 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
288 u32 *out;
289 int err;
290
291 out = kvzalloc(outlen, GFP_KERNEL);
292 if (!out)
293 return -ENOMEM;
294
295 err = mlx5_query_nic_vport_context(mdev, 0, false, out);
296 if (!err)
297 *mtu = MLX5_GET(query_nic_vport_context_out, out,
298 nic_vport_context.mtu);
299
300 kvfree(out);
301 return err;
302 }
303 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
304
mlx5_modify_nic_vport_mtu(struct mlx5_core_dev * mdev,u16 mtu)305 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
306 {
307 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
308 void *in;
309 int err;
310
311 in = kvzalloc(inlen, GFP_KERNEL);
312 if (!in)
313 return -ENOMEM;
314
315 MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
316 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
317 MLX5_SET(modify_nic_vport_context_in, in, opcode,
318 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
319
320 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
321
322 kvfree(in);
323 return err;
324 }
325 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
326
mlx5_query_nic_vport_mac_list(struct mlx5_core_dev * dev,u16 vport,enum mlx5_list_type list_type,u8 addr_list[][ETH_ALEN],int * list_size)327 int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
328 u16 vport,
329 enum mlx5_list_type list_type,
330 u8 addr_list[][ETH_ALEN],
331 int *list_size)
332 {
333 u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {0};
334 void *nic_vport_ctx;
335 int max_list_size;
336 int req_list_size;
337 int out_sz;
338 void *out;
339 int err;
340 int i;
341
342 req_list_size = *list_size;
343
344 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
345 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
346 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
347
348 if (req_list_size > max_list_size) {
349 mlx5_core_warn(dev, "Requested list size (%d) > (%d) max_list_size\n",
350 req_list_size, max_list_size);
351 req_list_size = max_list_size;
352 }
353
354 out_sz = MLX5_ST_SZ_BYTES(query_nic_vport_context_out) +
355 req_list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
356
357 out = kvzalloc(out_sz, GFP_KERNEL);
358 if (!out)
359 return -ENOMEM;
360
361 MLX5_SET(query_nic_vport_context_in, in, opcode,
362 MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
363 MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
364 MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
365 if (vport || mlx5_core_is_ecpf(dev))
366 MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
367
368 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
369 if (err)
370 goto out;
371
372 nic_vport_ctx = MLX5_ADDR_OF(query_nic_vport_context_out, out,
373 nic_vport_context);
374 req_list_size = MLX5_GET(nic_vport_context, nic_vport_ctx,
375 allowed_list_size);
376
377 *list_size = req_list_size;
378 for (i = 0; i < req_list_size; i++) {
379 u8 *mac_addr = MLX5_ADDR_OF(nic_vport_context,
380 nic_vport_ctx,
381 current_uc_mac_address[i]) + 2;
382 ether_addr_copy(addr_list[i], mac_addr);
383 }
384 out:
385 kvfree(out);
386 return err;
387 }
388 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mac_list);
389
mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev * dev,enum mlx5_list_type list_type,u8 addr_list[][ETH_ALEN],int list_size)390 int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev,
391 enum mlx5_list_type list_type,
392 u8 addr_list[][ETH_ALEN],
393 int list_size)
394 {
395 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {};
396 void *nic_vport_ctx;
397 int max_list_size;
398 int in_sz;
399 void *in;
400 int err;
401 int i;
402
403 max_list_size = list_type == MLX5_NVPRT_LIST_TYPE_UC ?
404 1 << MLX5_CAP_GEN(dev, log_max_current_uc_list) :
405 1 << MLX5_CAP_GEN(dev, log_max_current_mc_list);
406
407 if (list_size > max_list_size)
408 return -ENOSPC;
409
410 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
411 list_size * MLX5_ST_SZ_BYTES(mac_address_layout);
412
413 in = kvzalloc(in_sz, GFP_KERNEL);
414 if (!in)
415 return -ENOMEM;
416
417 MLX5_SET(modify_nic_vport_context_in, in, opcode,
418 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
419 MLX5_SET(modify_nic_vport_context_in, in,
420 field_select.addresses_list, 1);
421
422 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
423 nic_vport_context);
424
425 MLX5_SET(nic_vport_context, nic_vport_ctx,
426 allowed_list_type, list_type);
427 MLX5_SET(nic_vport_context, nic_vport_ctx,
428 allowed_list_size, list_size);
429
430 for (i = 0; i < list_size; i++) {
431 u8 *curr_mac = MLX5_ADDR_OF(nic_vport_context,
432 nic_vport_ctx,
433 current_uc_mac_address[i]) + 2;
434 ether_addr_copy(curr_mac, addr_list[i]);
435 }
436
437 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
438 kvfree(in);
439 return err;
440 }
441 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_list);
442
mlx5_modify_nic_vport_vlans(struct mlx5_core_dev * dev,u16 vlans[],int list_size)443 int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
444 u16 vlans[],
445 int list_size)
446 {
447 u32 out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)];
448 void *nic_vport_ctx;
449 int max_list_size;
450 int in_sz;
451 void *in;
452 int err;
453 int i;
454
455 max_list_size = 1 << MLX5_CAP_GEN(dev, log_max_vlan_list);
456
457 if (list_size > max_list_size)
458 return -ENOSPC;
459
460 in_sz = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in) +
461 list_size * MLX5_ST_SZ_BYTES(vlan_layout);
462
463 memset(out, 0, sizeof(out));
464 in = kvzalloc(in_sz, GFP_KERNEL);
465 if (!in)
466 return -ENOMEM;
467
468 MLX5_SET(modify_nic_vport_context_in, in, opcode,
469 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
470 MLX5_SET(modify_nic_vport_context_in, in,
471 field_select.addresses_list, 1);
472
473 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, in,
474 nic_vport_context);
475
476 MLX5_SET(nic_vport_context, nic_vport_ctx,
477 allowed_list_type, MLX5_NVPRT_LIST_TYPE_VLAN);
478 MLX5_SET(nic_vport_context, nic_vport_ctx,
479 allowed_list_size, list_size);
480
481 for (i = 0; i < list_size; i++) {
482 void *vlan_addr = MLX5_ADDR_OF(nic_vport_context,
483 nic_vport_ctx,
484 current_uc_mac_address[i]);
485 MLX5_SET(vlan_layout, vlan_addr, vlan, vlans[i]);
486 }
487
488 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
489 kvfree(in);
490 return err;
491 }
492 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_vlans);
493
mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev * mdev,u64 * system_image_guid)494 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
495 u64 *system_image_guid)
496 {
497 u32 *out;
498 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
499 int err;
500
501 out = kvzalloc(outlen, GFP_KERNEL);
502 if (!out)
503 return -ENOMEM;
504
505 err = mlx5_query_nic_vport_context(mdev, 0, false, out);
506 if (err)
507 goto out;
508
509 *system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
510 nic_vport_context.system_image_guid);
511 out:
512 kvfree(out);
513 return err;
514 }
515 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
516
mlx5_query_nic_vport_sd_group(struct mlx5_core_dev * mdev,u8 * sd_group)517 int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
518 {
519 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
520 u32 *out;
521 int err;
522
523 out = kvzalloc(outlen, GFP_KERNEL);
524 if (!out)
525 return -ENOMEM;
526
527 err = mlx5_query_nic_vport_context(mdev, 0, false, out);
528 if (err)
529 goto out;
530
531 *sd_group = MLX5_GET(query_nic_vport_context_out, out,
532 nic_vport_context.sd_group);
533 out:
534 kvfree(out);
535 return err;
536 }
537
mlx5_query_nic_vport_node_guid(struct mlx5_core_dev * mdev,u16 vport,bool other_vport,u64 * node_guid)538 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev,
539 u16 vport, bool other_vport, u64 *node_guid)
540 {
541 u32 *out;
542 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
543 int err;
544
545 out = kvzalloc(outlen, GFP_KERNEL);
546 if (!out)
547 return -ENOMEM;
548
549 err = mlx5_query_nic_vport_context(mdev, vport, other_vport, out);
550 if (err)
551 goto out;
552
553 *node_guid = MLX5_GET64(query_nic_vport_context_out, out,
554 nic_vport_context.node_guid);
555 out:
556 kvfree(out);
557
558 return err;
559 }
560 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
561
mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev * mdev,u16 vport,u64 node_guid)562 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
563 u16 vport, u64 node_guid)
564 {
565 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
566 void *nic_vport_context;
567 void *in;
568 int err;
569
570 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
571 return -EACCES;
572
573 in = kvzalloc(inlen, GFP_KERNEL);
574 if (!in)
575 return -ENOMEM;
576
577 MLX5_SET(modify_nic_vport_context_in, in,
578 field_select.node_guid, 1);
579 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
580 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
581
582 nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
583 in, nic_vport_context);
584 MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
585 MLX5_SET(modify_nic_vport_context_in, in, opcode,
586 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
587
588 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
589
590 kvfree(in);
591
592 return err;
593 }
594
mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev * mdev,u16 * qkey_viol_cntr)595 int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
596 u16 *qkey_viol_cntr)
597 {
598 u32 *out;
599 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
600 int err;
601
602 out = kvzalloc(outlen, GFP_KERNEL);
603 if (!out)
604 return -ENOMEM;
605
606 err = mlx5_query_nic_vport_context(mdev, 0, false, out);
607 if (err)
608 goto out;
609
610 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out,
611 nic_vport_context.qkey_violation_counter);
612 out:
613 kvfree(out);
614
615 return err;
616 }
617 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr);
618
mlx5_query_hca_vport_gid(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,u16 gid_index,union ib_gid * gid)619 int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
620 u8 port_num, u16 vf_num, u16 gid_index,
621 union ib_gid *gid)
622 {
623 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_in);
624 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
625 int is_group_manager;
626 void *out = NULL;
627 void *in = NULL;
628 union ib_gid *tmp;
629 int tbsz;
630 int nout;
631 int err;
632
633 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
634 tbsz = mlx5_get_gid_table_len(MLX5_CAP_GEN(dev, gid_table_size));
635 mlx5_core_dbg(dev, "vf_num %d, index %d, gid_table_size %d\n",
636 vf_num, gid_index, tbsz);
637
638 if (gid_index > tbsz && gid_index != 0xffff)
639 return -EINVAL;
640
641 if (gid_index == 0xffff)
642 nout = tbsz;
643 else
644 nout = 1;
645
646 out_sz += nout * sizeof(*gid);
647
648 in = kvzalloc(in_sz, GFP_KERNEL);
649 out = kvzalloc(out_sz, GFP_KERNEL);
650 if (!in || !out) {
651 err = -ENOMEM;
652 goto out;
653 }
654
655 MLX5_SET(query_hca_vport_gid_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_GID);
656 if (other_vport) {
657 if (is_group_manager) {
658 MLX5_SET(query_hca_vport_gid_in, in, vport_number, vf_num);
659 MLX5_SET(query_hca_vport_gid_in, in, other_vport, 1);
660 } else {
661 err = -EPERM;
662 goto out;
663 }
664 }
665 MLX5_SET(query_hca_vport_gid_in, in, gid_index, gid_index);
666
667 if (MLX5_CAP_GEN(dev, num_ports) == 2)
668 MLX5_SET(query_hca_vport_gid_in, in, port_num, port_num);
669
670 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
671 if (err)
672 goto out;
673
674 tmp = out + MLX5_ST_SZ_BYTES(query_hca_vport_gid_out);
675 gid->global.subnet_prefix = tmp->global.subnet_prefix;
676 gid->global.interface_id = tmp->global.interface_id;
677
678 out:
679 kvfree(in);
680 kvfree(out);
681 return err;
682 }
683 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_gid);
684
mlx5_query_hca_vport_pkey(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,u16 pkey_index,u16 * pkey)685 int mlx5_query_hca_vport_pkey(struct mlx5_core_dev *dev, u8 other_vport,
686 u8 port_num, u16 vf_num, u16 pkey_index,
687 u16 *pkey)
688 {
689 int in_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_in);
690 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_pkey_out);
691 int is_group_manager;
692 void *out = NULL;
693 void *in = NULL;
694 void *pkarr;
695 int nout;
696 int tbsz;
697 int err;
698 int i;
699
700 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
701
702 tbsz = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size));
703 if (pkey_index > tbsz && pkey_index != 0xffff)
704 return -EINVAL;
705
706 if (pkey_index == 0xffff)
707 nout = tbsz;
708 else
709 nout = 1;
710
711 out_sz += nout * MLX5_ST_SZ_BYTES(pkey);
712
713 in = kvzalloc(in_sz, GFP_KERNEL);
714 out = kvzalloc(out_sz, GFP_KERNEL);
715 if (!in || !out) {
716 err = -ENOMEM;
717 goto out;
718 }
719
720 MLX5_SET(query_hca_vport_pkey_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY);
721 if (other_vport) {
722 if (is_group_manager) {
723 MLX5_SET(query_hca_vport_pkey_in, in, vport_number, vf_num);
724 MLX5_SET(query_hca_vport_pkey_in, in, other_vport, 1);
725 } else {
726 err = -EPERM;
727 goto out;
728 }
729 }
730 MLX5_SET(query_hca_vport_pkey_in, in, pkey_index, pkey_index);
731
732 if (MLX5_CAP_GEN(dev, num_ports) == 2)
733 MLX5_SET(query_hca_vport_pkey_in, in, port_num, port_num);
734
735 err = mlx5_cmd_exec(dev, in, in_sz, out, out_sz);
736 if (err)
737 goto out;
738
739 pkarr = MLX5_ADDR_OF(query_hca_vport_pkey_out, out, pkey);
740 for (i = 0; i < nout; i++, pkey++, pkarr += MLX5_ST_SZ_BYTES(pkey))
741 *pkey = MLX5_GET_PR(pkey, pkarr, pkey);
742
743 out:
744 kvfree(in);
745 kvfree(out);
746 return err;
747 }
748 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_pkey);
749
mlx5_query_hca_vport_context(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,u16 vf_num,struct mlx5_hca_vport_context * rep)750 int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
751 u8 other_vport, u8 port_num,
752 u16 vf_num,
753 struct mlx5_hca_vport_context *rep)
754 {
755 int out_sz = MLX5_ST_SZ_BYTES(query_hca_vport_context_out);
756 int in[MLX5_ST_SZ_DW(query_hca_vport_context_in)] = {};
757 int is_group_manager;
758 void *out;
759 void *ctx;
760 int err;
761
762 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
763
764 out = kvzalloc(out_sz, GFP_KERNEL);
765 if (!out)
766 return -ENOMEM;
767
768 MLX5_SET(query_hca_vport_context_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT);
769
770 if (other_vport) {
771 if (is_group_manager) {
772 MLX5_SET(query_hca_vport_context_in, in, other_vport, 1);
773 MLX5_SET(query_hca_vport_context_in, in, vport_number, vf_num);
774 } else {
775 err = -EPERM;
776 goto ex;
777 }
778 }
779
780 if (MLX5_CAP_GEN(dev, num_ports) == 2)
781 MLX5_SET(query_hca_vport_context_in, in, port_num, port_num);
782
783 err = mlx5_cmd_exec_inout(dev, query_hca_vport_context, in, out);
784 if (err)
785 goto ex;
786
787 ctx = MLX5_ADDR_OF(query_hca_vport_context_out, out, hca_vport_context);
788 rep->field_select = MLX5_GET_PR(hca_vport_context, ctx, field_select);
789 rep->sm_virt_aware = MLX5_GET_PR(hca_vport_context, ctx, sm_virt_aware);
790 rep->has_smi = MLX5_GET_PR(hca_vport_context, ctx, has_smi);
791 rep->has_raw = MLX5_GET_PR(hca_vport_context, ctx, has_raw);
792 rep->policy = MLX5_GET_PR(hca_vport_context, ctx, vport_state_policy);
793 rep->phys_state = MLX5_GET_PR(hca_vport_context, ctx,
794 port_physical_state);
795 rep->vport_state = MLX5_GET_PR(hca_vport_context, ctx, vport_state);
796 rep->port_physical_state = MLX5_GET_PR(hca_vport_context, ctx,
797 port_physical_state);
798 rep->port_guid = MLX5_GET64_PR(hca_vport_context, ctx, port_guid);
799 rep->node_guid = MLX5_GET64_PR(hca_vport_context, ctx, node_guid);
800 rep->cap_mask1 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask1);
801 rep->cap_mask1_perm = MLX5_GET_PR(hca_vport_context, ctx,
802 cap_mask1_field_select);
803 rep->cap_mask2 = MLX5_GET_PR(hca_vport_context, ctx, cap_mask2);
804 rep->cap_mask2_perm = MLX5_GET_PR(hca_vport_context, ctx,
805 cap_mask2_field_select);
806 rep->lid = MLX5_GET_PR(hca_vport_context, ctx, lid);
807 rep->init_type_reply = MLX5_GET_PR(hca_vport_context, ctx,
808 init_type_reply);
809 rep->lmc = MLX5_GET_PR(hca_vport_context, ctx, lmc);
810 rep->subnet_timeout = MLX5_GET_PR(hca_vport_context, ctx,
811 subnet_timeout);
812 rep->sm_lid = MLX5_GET_PR(hca_vport_context, ctx, sm_lid);
813 rep->sm_sl = MLX5_GET_PR(hca_vport_context, ctx, sm_sl);
814 rep->qkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
815 qkey_violation_counter);
816 rep->pkey_violation_counter = MLX5_GET_PR(hca_vport_context, ctx,
817 pkey_violation_counter);
818 rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
819 rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
820 system_image_guid);
821 rep->num_plane = MLX5_GET_PR(hca_vport_context, ctx, num_port_plane);
822
823 ex:
824 kvfree(out);
825 return err;
826 }
827 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_context);
828
mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev * dev,u64 * sys_image_guid)829 int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev,
830 u64 *sys_image_guid)
831 {
832 struct mlx5_hca_vport_context *rep;
833 int err;
834
835 rep = kvzalloc_obj(*rep);
836 if (!rep)
837 return -ENOMEM;
838
839 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
840 if (!err)
841 *sys_image_guid = rep->sys_image_guid;
842
843 kvfree(rep);
844 return err;
845 }
846 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_system_image_guid);
847
mlx5_query_hca_vport_node_guid(struct mlx5_core_dev * dev,u64 * node_guid)848 int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev,
849 u64 *node_guid)
850 {
851 struct mlx5_hca_vport_context *rep;
852 int err;
853
854 rep = kvzalloc_obj(*rep);
855 if (!rep)
856 return -ENOMEM;
857
858 err = mlx5_query_hca_vport_context(dev, 0, 1, 0, rep);
859 if (!err)
860 *node_guid = rep->node_guid;
861
862 kvfree(rep);
863 return err;
864 }
865 EXPORT_SYMBOL_GPL(mlx5_query_hca_vport_node_guid);
866
mlx5_query_nic_vport_promisc(struct mlx5_core_dev * mdev,u16 vport,int * promisc_uc,int * promisc_mc,int * promisc_all)867 int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev,
868 u16 vport,
869 int *promisc_uc,
870 int *promisc_mc,
871 int *promisc_all)
872 {
873 u32 *out;
874 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
875 int err;
876
877 out = kvzalloc(outlen, GFP_KERNEL);
878 if (!out)
879 return -ENOMEM;
880
881 err = mlx5_query_nic_vport_context(mdev, vport, vport > 0, out);
882 if (err)
883 goto out;
884
885 *promisc_uc = MLX5_GET(query_nic_vport_context_out, out,
886 nic_vport_context.promisc_uc);
887 *promisc_mc = MLX5_GET(query_nic_vport_context_out, out,
888 nic_vport_context.promisc_mc);
889 *promisc_all = MLX5_GET(query_nic_vport_context_out, out,
890 nic_vport_context.promisc_all);
891
892 out:
893 kvfree(out);
894 return err;
895 }
896 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_promisc);
897
mlx5_modify_nic_vport_promisc(struct mlx5_core_dev * mdev,int promisc_uc,int promisc_mc,int promisc_all)898 int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev,
899 int promisc_uc,
900 int promisc_mc,
901 int promisc_all)
902 {
903 void *in;
904 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
905 int err;
906
907 in = kvzalloc(inlen, GFP_KERNEL);
908 if (!in)
909 return -ENOMEM;
910
911 MLX5_SET(modify_nic_vport_context_in, in, field_select.promisc, 1);
912 MLX5_SET(modify_nic_vport_context_in, in,
913 nic_vport_context.promisc_uc, promisc_uc);
914 MLX5_SET(modify_nic_vport_context_in, in,
915 nic_vport_context.promisc_mc, promisc_mc);
916 MLX5_SET(modify_nic_vport_context_in, in,
917 nic_vport_context.promisc_all, promisc_all);
918 MLX5_SET(modify_nic_vport_context_in, in, opcode,
919 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
920
921 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
922
923 kvfree(in);
924
925 return err;
926 }
927 EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_promisc);
928
929 enum {
930 UC_LOCAL_LB,
931 MC_LOCAL_LB
932 };
933
mlx5_nic_vport_update_local_lb(struct mlx5_core_dev * mdev,bool enable)934 int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable)
935 {
936 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
937 void *in;
938 int err;
939
940 if (!MLX5_CAP_GEN(mdev, disable_local_lb_mc) &&
941 !MLX5_CAP_GEN(mdev, disable_local_lb_uc))
942 return 0;
943
944 in = kvzalloc(inlen, GFP_KERNEL);
945 if (!in)
946 return -ENOMEM;
947
948 MLX5_SET(modify_nic_vport_context_in, in,
949 nic_vport_context.disable_mc_local_lb, !enable);
950 MLX5_SET(modify_nic_vport_context_in, in,
951 nic_vport_context.disable_uc_local_lb, !enable);
952
953 if (MLX5_CAP_GEN(mdev, disable_local_lb_mc))
954 MLX5_SET(modify_nic_vport_context_in, in,
955 field_select.disable_mc_local_lb, 1);
956
957 if (MLX5_CAP_GEN(mdev, disable_local_lb_uc))
958 MLX5_SET(modify_nic_vport_context_in, in,
959 field_select.disable_uc_local_lb, 1);
960 MLX5_SET(modify_nic_vport_context_in, in, opcode,
961 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
962
963 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
964
965 if (!err)
966 mlx5_core_dbg(mdev, "%s local_lb\n",
967 enable ? "enable" : "disable");
968
969 kvfree(in);
970 return err;
971 }
972 EXPORT_SYMBOL_GPL(mlx5_nic_vport_update_local_lb);
973
mlx5_nic_vport_query_local_lb(struct mlx5_core_dev * mdev,bool * status)974 int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status)
975 {
976 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
977 u32 *out;
978 int value;
979 int err;
980
981 out = kvzalloc(outlen, GFP_KERNEL);
982 if (!out)
983 return -ENOMEM;
984
985 err = mlx5_query_nic_vport_context(mdev, 0, false, out);
986 if (err)
987 goto out;
988
989 value = MLX5_GET(query_nic_vport_context_out, out,
990 nic_vport_context.disable_mc_local_lb) << MC_LOCAL_LB;
991
992 value |= MLX5_GET(query_nic_vport_context_out, out,
993 nic_vport_context.disable_uc_local_lb) << UC_LOCAL_LB;
994
995 *status = !value;
996
997 out:
998 kvfree(out);
999 return err;
1000 }
1001 EXPORT_SYMBOL_GPL(mlx5_nic_vport_query_local_lb);
1002
1003 enum mlx5_vport_roce_state {
1004 MLX5_VPORT_ROCE_DISABLED = 0,
1005 MLX5_VPORT_ROCE_ENABLED = 1,
1006 };
1007
mlx5_nic_vport_update_roce_state(struct mlx5_core_dev * mdev,enum mlx5_vport_roce_state state)1008 static int mlx5_nic_vport_update_roce_state(struct mlx5_core_dev *mdev,
1009 enum mlx5_vport_roce_state state)
1010 {
1011 void *in;
1012 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1013 int err;
1014
1015 in = kvzalloc(inlen, GFP_KERNEL);
1016 if (!in)
1017 return -ENOMEM;
1018
1019 MLX5_SET(modify_nic_vport_context_in, in, field_select.roce_en, 1);
1020 MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.roce_en,
1021 state);
1022 MLX5_SET(modify_nic_vport_context_in, in, opcode,
1023 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
1024
1025 err = mlx5_cmd_exec_in(mdev, modify_nic_vport_context, in);
1026
1027 kvfree(in);
1028
1029 return err;
1030 }
1031
mlx5_nic_vport_enable_roce(struct mlx5_core_dev * mdev)1032 int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev)
1033 {
1034 int err = 0;
1035
1036 mutex_lock(&mlx5_roce_en_lock);
1037 if (!mdev->roce.roce_en)
1038 err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_ENABLED);
1039
1040 if (!err)
1041 mdev->roce.roce_en++;
1042 mutex_unlock(&mlx5_roce_en_lock);
1043
1044 return err;
1045 }
1046 EXPORT_SYMBOL_GPL(mlx5_nic_vport_enable_roce);
1047
mlx5_nic_vport_disable_roce(struct mlx5_core_dev * mdev)1048 int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev)
1049 {
1050 int err = 0;
1051
1052 mutex_lock(&mlx5_roce_en_lock);
1053 if (mdev->roce.roce_en) {
1054 mdev->roce.roce_en--;
1055 if (mdev->roce.roce_en == 0)
1056 err = mlx5_nic_vport_update_roce_state(mdev, MLX5_VPORT_ROCE_DISABLED);
1057
1058 if (err)
1059 mdev->roce.roce_en++;
1060 }
1061 mutex_unlock(&mlx5_roce_en_lock);
1062 return err;
1063 }
1064 EXPORT_SYMBOL(mlx5_nic_vport_disable_roce);
1065
mlx5_core_query_vport_counter(struct mlx5_core_dev * dev,u8 other_vport,int vf,u8 port_num,void * out)1066 int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport,
1067 int vf, u8 port_num, void *out)
1068 {
1069 int in_sz = MLX5_ST_SZ_BYTES(query_vport_counter_in);
1070 int is_group_manager;
1071 void *in;
1072 int err;
1073
1074 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1075 in = kvzalloc(in_sz, GFP_KERNEL);
1076 if (!in) {
1077 err = -ENOMEM;
1078 return err;
1079 }
1080
1081 MLX5_SET(query_vport_counter_in, in, opcode,
1082 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
1083 if (other_vport) {
1084 if (is_group_manager) {
1085 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
1086 MLX5_SET(query_vport_counter_in, in, vport_number, vf + 1);
1087 } else {
1088 err = -EPERM;
1089 goto free;
1090 }
1091 }
1092 if (MLX5_CAP_GEN(dev, num_ports) == 2)
1093 MLX5_SET(query_vport_counter_in, in, port_num, port_num);
1094
1095 err = mlx5_cmd_exec_inout(dev, query_vport_counter, in, out);
1096 free:
1097 kvfree(in);
1098 return err;
1099 }
1100 EXPORT_SYMBOL_GPL(mlx5_core_query_vport_counter);
1101
mlx5_query_vport_down_stats(struct mlx5_core_dev * mdev,u16 vport,u8 other_vport,u64 * rx_discard_vport_down,u64 * tx_discard_vport_down)1102 int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport,
1103 u8 other_vport, u64 *rx_discard_vport_down,
1104 u64 *tx_discard_vport_down)
1105 {
1106 u32 out[MLX5_ST_SZ_DW(query_vnic_env_out)] = {};
1107 u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};
1108 int err;
1109
1110 MLX5_SET(query_vnic_env_in, in, opcode,
1111 MLX5_CMD_OP_QUERY_VNIC_ENV);
1112 MLX5_SET(query_vnic_env_in, in, op_mod, 0);
1113 MLX5_SET(query_vnic_env_in, in, vport_number, vport);
1114 MLX5_SET(query_vnic_env_in, in, other_vport, other_vport);
1115
1116 err = mlx5_cmd_exec_inout(mdev, query_vnic_env, in, out);
1117 if (err)
1118 return err;
1119
1120 *rx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1121 vport_env.receive_discard_vport_down);
1122 *tx_discard_vport_down = MLX5_GET64(query_vnic_env_out, out,
1123 vport_env.transmit_discard_vport_down);
1124 return 0;
1125 }
1126
mlx5_core_modify_hca_vport_context(struct mlx5_core_dev * dev,u8 other_vport,u8 port_num,int vf,struct mlx5_hca_vport_context * req)1127 int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev,
1128 u8 other_vport, u8 port_num,
1129 int vf,
1130 struct mlx5_hca_vport_context *req)
1131 {
1132 int in_sz = MLX5_ST_SZ_BYTES(modify_hca_vport_context_in);
1133 int is_group_manager;
1134 void *ctx;
1135 void *in;
1136 int err;
1137
1138 mlx5_core_dbg(dev, "vf %d\n", vf);
1139 is_group_manager = MLX5_CAP_GEN(dev, vport_group_manager);
1140 in = kvzalloc(in_sz, GFP_KERNEL);
1141 if (!in)
1142 return -ENOMEM;
1143
1144 MLX5_SET(modify_hca_vport_context_in, in, opcode, MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT);
1145 if (other_vport) {
1146 if (is_group_manager) {
1147 MLX5_SET(modify_hca_vport_context_in, in, other_vport, 1);
1148 MLX5_SET(modify_hca_vport_context_in, in, vport_number, vf);
1149 } else {
1150 err = -EPERM;
1151 goto ex;
1152 }
1153 }
1154
1155 if (MLX5_CAP_GEN(dev, num_ports) > 1)
1156 MLX5_SET(modify_hca_vport_context_in, in, port_num, port_num);
1157
1158 ctx = MLX5_ADDR_OF(modify_hca_vport_context_in, in, hca_vport_context);
1159 MLX5_SET(hca_vport_context, ctx, field_select, req->field_select);
1160 if (req->field_select & MLX5_HCA_VPORT_SEL_STATE_POLICY)
1161 MLX5_SET(hca_vport_context, ctx, vport_state_policy,
1162 req->policy);
1163 if (req->field_select & MLX5_HCA_VPORT_SEL_PORT_GUID)
1164 MLX5_SET64(hca_vport_context, ctx, port_guid, req->port_guid);
1165 if (req->field_select & MLX5_HCA_VPORT_SEL_NODE_GUID)
1166 MLX5_SET64(hca_vport_context, ctx, node_guid, req->node_guid);
1167 MLX5_SET(hca_vport_context, ctx, cap_mask1, req->cap_mask1);
1168 MLX5_SET(hca_vport_context, ctx, cap_mask1_field_select,
1169 req->cap_mask1_perm);
1170 err = mlx5_cmd_exec_in(dev, modify_hca_vport_context, in);
1171 ex:
1172 kvfree(in);
1173 return err;
1174 }
1175 EXPORT_SYMBOL_GPL(mlx5_core_modify_hca_vport_context);
1176
mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev * master_mdev,struct mlx5_core_dev * port_mdev)1177 int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
1178 struct mlx5_core_dev *port_mdev)
1179 {
1180 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1181 void *in;
1182 int err;
1183
1184 in = kvzalloc(inlen, GFP_KERNEL);
1185 if (!in)
1186 return -ENOMEM;
1187
1188 err = mlx5_nic_vport_enable_roce(port_mdev);
1189 if (err)
1190 goto free;
1191
1192 MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1193 if (MLX5_CAP_GEN_2(master_mdev, sw_vhca_id_valid)) {
1194 MLX5_SET(modify_nic_vport_context_in, in,
1195 nic_vport_context.vhca_id_type, VHCA_ID_TYPE_SW);
1196 MLX5_SET(modify_nic_vport_context_in, in,
1197 nic_vport_context.affiliated_vhca_id,
1198 MLX5_CAP_GEN_2(master_mdev, sw_vhca_id));
1199 } else {
1200 MLX5_SET(modify_nic_vport_context_in, in,
1201 nic_vport_context.affiliated_vhca_id,
1202 MLX5_CAP_GEN(master_mdev, vhca_id));
1203 }
1204 MLX5_SET(modify_nic_vport_context_in, in,
1205 nic_vport_context.affiliation_criteria,
1206 MLX5_CAP_GEN(port_mdev, affiliate_nic_vport_criteria));
1207 MLX5_SET(modify_nic_vport_context_in, in, opcode,
1208 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
1209
1210 err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
1211 if (err)
1212 mlx5_nic_vport_disable_roce(port_mdev);
1213
1214 free:
1215 kvfree(in);
1216 return err;
1217 }
1218 EXPORT_SYMBOL_GPL(mlx5_nic_vport_affiliate_multiport);
1219
mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev * port_mdev)1220 int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
1221 {
1222 int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
1223 void *in;
1224 int err;
1225
1226 in = kvzalloc(inlen, GFP_KERNEL);
1227 if (!in)
1228 return -ENOMEM;
1229
1230 MLX5_SET(modify_nic_vport_context_in, in, field_select.affiliation, 1);
1231 MLX5_SET(modify_nic_vport_context_in, in,
1232 nic_vport_context.affiliated_vhca_id, 0);
1233 MLX5_SET(modify_nic_vport_context_in, in,
1234 nic_vport_context.affiliation_criteria, 0);
1235 MLX5_SET(modify_nic_vport_context_in, in, opcode,
1236 MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
1237
1238 err = mlx5_cmd_exec_in(port_mdev, modify_nic_vport_context, in);
1239 if (!err)
1240 mlx5_nic_vport_disable_roce(port_mdev);
1241
1242 kvfree(in);
1243 return err;
1244 }
1245 EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
1246
mlx5_query_nic_system_image_guid(struct mlx5_core_dev * mdev)1247 u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
1248 {
1249 int port_type_cap = MLX5_CAP_GEN(mdev, port_type);
1250 u64 tmp;
1251 int err;
1252
1253 if (mdev->sys_image_guid)
1254 return mdev->sys_image_guid;
1255
1256 if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH)
1257 err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
1258 else
1259 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
1260
1261 mdev->sys_image_guid = err ? 0 : tmp;
1262
1263 return mdev->sys_image_guid;
1264 }
1265 EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
1266
mlx5_query_nic_sw_system_image_guid(struct mlx5_core_dev * mdev,u8 * buf,u8 * len)1267 void mlx5_query_nic_sw_system_image_guid(struct mlx5_core_dev *mdev, u8 *buf,
1268 u8 *len)
1269 {
1270 u64 fw_system_image_guid;
1271
1272 *len = 0;
1273
1274 fw_system_image_guid = mlx5_query_nic_system_image_guid(mdev);
1275 if (!fw_system_image_guid)
1276 return;
1277
1278 memcpy(buf, &fw_system_image_guid, sizeof(fw_system_image_guid));
1279 *len += sizeof(fw_system_image_guid);
1280
1281 if (MLX5_CAP_GEN_2(mdev, load_balance_id) &&
1282 MLX5_CAP_GEN_2(mdev, lag_per_mp_group))
1283 buf[(*len)++] = MLX5_CAP_GEN_2(mdev, load_balance_id);
1284 }
1285
mlx5_vport_use_vhca_id_as_func_id(struct mlx5_core_dev * dev,u16 vport_num,u16 * vhca_id)1286 static bool mlx5_vport_use_vhca_id_as_func_id(struct mlx5_core_dev *dev,
1287 u16 vport_num, u16 *vhca_id)
1288 {
1289 if (!MLX5_CAP_GEN_2(dev, function_id_type_vhca_id))
1290 return false;
1291
1292 return mlx5_esw_vport_vhca_id(dev->priv.eswitch, vport_num, vhca_id);
1293 }
1294
mlx5_vport_get_other_func_cap(struct mlx5_core_dev * dev,u16 vport,void * out,u16 opmod)1295 int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out,
1296 u16 opmod)
1297 {
1298 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
1299 u16 vhca_id = 0, function_id = 0;
1300 bool ec_vf_func = false;
1301
1302 /* if this vport is referring to a vport on the ec PF (embedded cpu )
1303 * let the FW know which domain we are querying since vport numbers or
1304 * function_ids are not unique across the different PF domains,
1305 * unless we use vhca_id as the function_id below.
1306 */
1307 ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
1308 function_id = mlx5_vport_to_func_id(dev, vport, ec_vf_func);
1309
1310 if (mlx5_vport_use_vhca_id_as_func_id(dev, vport, &vhca_id)) {
1311 MLX5_SET(query_hca_cap_in, in, function_id_type, 1);
1312 function_id = vhca_id;
1313 ec_vf_func = false;
1314 mlx5_core_dbg(dev, "%s using vhca_id as function_id for vport %d vhca_id 0x%x\n",
1315 __func__, vport, vhca_id);
1316 }
1317
1318 opmod = (opmod << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
1319 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
1320 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
1321 MLX5_SET(query_hca_cap_in, in, other_function, true);
1322 MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
1323 MLX5_SET(query_hca_cap_in, in, function_id, function_id);
1324 return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
1325 }
1326 EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap);
1327
mlx5_vport_get_vhca_id(struct mlx5_core_dev * dev,u16 vport,u16 * vhca_id)1328 int mlx5_vport_get_vhca_id(struct mlx5_core_dev *dev, u16 vport, u16 *vhca_id)
1329 {
1330 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
1331 void *query_ctx;
1332 void *hca_caps;
1333 int err;
1334
1335 /* try get vhca_id via eswitch */
1336 if (mlx5_esw_vport_vhca_id(dev->priv.eswitch, vport, vhca_id))
1337 return 0;
1338
1339 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
1340 if (!query_ctx)
1341 return -ENOMEM;
1342
1343 err = mlx5_vport_get_other_func_general_cap(dev, vport, query_ctx);
1344 if (err)
1345 goto out_free;
1346
1347 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
1348 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
1349
1350 out_free:
1351 kfree(query_ctx);
1352 return err;
1353 }
1354 EXPORT_SYMBOL_GPL(mlx5_vport_get_vhca_id);
1355
mlx5_vport_set_other_func_cap(struct mlx5_core_dev * dev,const void * hca_cap,u16 vport,u16 opmod)1356 int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap,
1357 u16 vport, u16 opmod)
1358 {
1359 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
1360 u16 vhca_id = 0, function_id = 0;
1361 bool ec_vf_func = false;
1362 void *set_hca_cap;
1363 void *set_ctx;
1364 int ret;
1365
1366 set_ctx = kzalloc(set_sz, GFP_KERNEL);
1367 if (!set_ctx)
1368 return -ENOMEM;
1369
1370 /* if this vport is referring to a vport on the ec PF (embedded cpu )
1371 * let the FW know which domain we are querying since vport numbers or
1372 * function_ids are not unique across the different PF domains,
1373 * unless we use vhca_id as the function_id below.
1374 */
1375 ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
1376 function_id = mlx5_vport_to_func_id(dev, vport, ec_vf_func);
1377
1378 if (mlx5_vport_use_vhca_id_as_func_id(dev, vport, &vhca_id)) {
1379 MLX5_SET(set_hca_cap_in, set_ctx, function_id_type, 1);
1380 function_id = vhca_id;
1381 ec_vf_func = false;
1382 mlx5_core_dbg(dev, "%s using vhca_id as function_id for vport %d vhca_id 0x%x\n",
1383 __func__, vport, vhca_id);
1384 }
1385
1386 MLX5_SET(set_hca_cap_in, set_ctx, opcode, MLX5_CMD_OP_SET_HCA_CAP);
1387 MLX5_SET(set_hca_cap_in, set_ctx, op_mod, opmod << 1);
1388 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
1389 memcpy(set_hca_cap, hca_cap, MLX5_ST_SZ_BYTES(cmd_hca_cap));
1390 MLX5_SET(set_hca_cap_in, set_ctx, other_function, true);
1391 MLX5_SET(set_hca_cap_in, set_ctx, ec_vf_function, ec_vf_func);
1392 MLX5_SET(set_hca_cap_in, set_ctx, function_id, function_id);
1393 ret = mlx5_cmd_exec_in(dev, set_hca_cap, set_ctx);
1394
1395 kfree(set_ctx);
1396 return ret;
1397 }
1398