1 /*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/init.h>
39 #include <linux/errno.h>
40 #include <linux/pci.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/slab.h>
43 #include <linux/io-mapping.h>
44 #include <linux/delay.h>
45 #include <linux/etherdevice.h>
46 #include <net/devlink.h>
47
48 #include <uapi/rdma/mlx4-abi.h>
49 #include <linux/mlx4/device.h>
50 #include <linux/mlx4/doorbell.h>
51
52 #include <rdma/ib_verbs.h>
53
54 #include "mlx4.h"
55 #include "fw.h"
56 #include "icm.h"
57
58 MODULE_AUTHOR("Roland Dreier");
59 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
60 MODULE_LICENSE("Dual BSD/GPL");
61 MODULE_VERSION(DRV_VERSION);
62
63 struct workqueue_struct *mlx4_wq;
64
65 #ifdef CONFIG_MLX4_DEBUG
66
67 int mlx4_debug_level; /* 0 by default */
68 module_param_named(debug_level, mlx4_debug_level, int, 0644);
69 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
70
71 #endif /* CONFIG_MLX4_DEBUG */
72
73 #ifdef CONFIG_PCI_MSI
74
75 static int msi_x = 1;
76 module_param(msi_x, int, 0444);
77 MODULE_PARM_DESC(msi_x, "0 - don't use MSI-X, 1 - use MSI-X, >1 - limit number of MSI-X irqs to msi_x");
78
79 #else /* CONFIG_PCI_MSI */
80
81 #define msi_x (0)
82
83 #endif /* CONFIG_PCI_MSI */
84
85 static uint8_t num_vfs[3] = {0, 0, 0};
86 static int num_vfs_argc;
87 module_param_array(num_vfs, byte, &num_vfs_argc, 0444);
88 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n"
89 "num_vfs=port1,port2,port1+2");
90
91 static uint8_t probe_vf[3] = {0, 0, 0};
92 static int probe_vfs_argc;
93 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444);
94 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n"
95 "probe_vf=port1,port2,port1+2");
96
97 static int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
98 module_param_named(log_num_mgm_entry_size,
99 mlx4_log_num_mgm_entry_size, int, 0444);
100 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
101 " of qp per mcg, for example:"
102 " 10 gives 248.range: 7 <="
103 " log_num_mgm_entry_size <= 12."
104 " To activate device managed"
105 " flow steering when available, set to -1");
106
107 static bool enable_64b_cqe_eqe = true;
108 module_param(enable_64b_cqe_eqe, bool, 0444);
109 MODULE_PARM_DESC(enable_64b_cqe_eqe,
110 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
111
112 static bool enable_4k_uar;
113 module_param(enable_4k_uar, bool, 0444);
114 MODULE_PARM_DESC(enable_4k_uar,
115 "Enable using 4K UAR. Should not be enabled if have VFs which do not support 4K UARs (default: false)");
116
117 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \
118 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \
119 MLX4_FUNC_CAP_DMFS_A0_STATIC)
120
121 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV)
122
123 static char mlx4_version[] =
124 DRV_NAME ": Mellanox ConnectX core driver v"
125 DRV_VERSION "\n";
126
127 static const struct mlx4_profile default_profile = {
128 .num_qp = 1 << 18,
129 .num_srq = 1 << 16,
130 .rdmarc_per_qp = 1 << 4,
131 .num_cq = 1 << 16,
132 .num_mcg = 1 << 13,
133 .num_mpt = 1 << 19,
134 .num_mtt = 1 << 20, /* It is really num mtt segments */
135 };
136
137 static const struct mlx4_profile low_mem_profile = {
138 .num_qp = 1 << 17,
139 .num_srq = 1 << 6,
140 .rdmarc_per_qp = 1 << 4,
141 .num_cq = 1 << 8,
142 .num_mcg = 1 << 8,
143 .num_mpt = 1 << 9,
144 .num_mtt = 1 << 7,
145 };
146
147 static int log_num_mac = 7;
148 module_param_named(log_num_mac, log_num_mac, int, 0444);
149 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
150
151 static int log_num_vlan;
152 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
153 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
154 /* Log2 max number of VLANs per ETH port (0-7) */
155 #define MLX4_LOG_NUM_VLANS 7
156 #define MLX4_MIN_LOG_NUM_VLANS 0
157 #define MLX4_MIN_LOG_NUM_MAC 1
158
159 static bool use_prio;
160 module_param_named(use_prio, use_prio, bool, 0444);
161 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
162
163 int log_mtts_per_seg = ilog2(1);
164 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
165 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment "
166 "(0-7) (default: 0)");
167
168 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
169 static int arr_argc = 2;
170 module_param_array(port_type_array, int, &arr_argc, 0444);
171 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
172 "1 for IB, 2 for Ethernet");
173
174 static atomic_t pf_loading = ATOMIC_INIT(0);
175
mlx4_devlink_ierr_reset_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)176 static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
177 struct devlink_param_gset_ctx *ctx,
178 struct netlink_ext_ack *extack)
179 {
180 ctx->val.vbool = !!mlx4_internal_err_reset;
181 return 0;
182 }
183
mlx4_devlink_ierr_reset_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)184 static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
185 struct devlink_param_gset_ctx *ctx,
186 struct netlink_ext_ack *extack)
187 {
188 mlx4_internal_err_reset = ctx->val.vbool;
189 return 0;
190 }
191
mlx4_devlink_crdump_snapshot_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)192 static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
193 struct devlink_param_gset_ctx *ctx,
194 struct netlink_ext_ack *extack)
195 {
196 struct mlx4_priv *priv = devlink_priv(devlink);
197 struct mlx4_dev *dev = &priv->dev;
198
199 ctx->val.vbool = dev->persist->crdump.snapshot_enable;
200 return 0;
201 }
202
mlx4_devlink_crdump_snapshot_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)203 static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id,
204 struct devlink_param_gset_ctx *ctx,
205 struct netlink_ext_ack *extack)
206 {
207 struct mlx4_priv *priv = devlink_priv(devlink);
208 struct mlx4_dev *dev = &priv->dev;
209
210 dev->persist->crdump.snapshot_enable = ctx->val.vbool;
211 return 0;
212 }
213
214 static int
mlx4_devlink_max_macs_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)215 mlx4_devlink_max_macs_validate(struct devlink *devlink, u32 id,
216 union devlink_param_value val,
217 struct netlink_ext_ack *extack)
218 {
219 u32 value = val.vu32;
220
221 if (value < 1 || value > 128)
222 return -ERANGE;
223
224 if (!is_power_of_2(value)) {
225 NL_SET_ERR_MSG_MOD(extack, "max_macs supported must be power of 2");
226 return -EINVAL;
227 }
228
229 return 0;
230 }
231
232 enum mlx4_devlink_param_id {
233 MLX4_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
234 MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
235 MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
236 };
237
238 static const struct devlink_param mlx4_devlink_params[] = {
239 DEVLINK_PARAM_GENERIC(INT_ERR_RESET,
240 BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
241 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
242 mlx4_devlink_ierr_reset_get,
243 mlx4_devlink_ierr_reset_set, NULL),
244 DEVLINK_PARAM_GENERIC(MAX_MACS,
245 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
246 NULL, NULL, mlx4_devlink_max_macs_validate),
247 DEVLINK_PARAM_GENERIC(REGION_SNAPSHOT,
248 BIT(DEVLINK_PARAM_CMODE_RUNTIME) |
249 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
250 mlx4_devlink_crdump_snapshot_get,
251 mlx4_devlink_crdump_snapshot_set, NULL),
252 DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
253 "enable_64b_cqe_eqe", DEVLINK_PARAM_TYPE_BOOL,
254 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
255 NULL, NULL, NULL),
256 DEVLINK_PARAM_DRIVER(MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
257 "enable_4k_uar", DEVLINK_PARAM_TYPE_BOOL,
258 BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
259 NULL, NULL, NULL),
260 };
261
mlx4_devlink_set_params_init_values(struct devlink * devlink)262 static void mlx4_devlink_set_params_init_values(struct devlink *devlink)
263 {
264 union devlink_param_value value;
265
266 value.vbool = !!mlx4_internal_err_reset;
267 devl_param_driverinit_value_set(devlink,
268 DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
269 value);
270
271 value.vu32 = 1UL << log_num_mac;
272 devl_param_driverinit_value_set(devlink,
273 DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
274 value);
275
276 value.vbool = enable_64b_cqe_eqe;
277 devl_param_driverinit_value_set(devlink,
278 MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
279 value);
280
281 value.vbool = enable_4k_uar;
282 devl_param_driverinit_value_set(devlink,
283 MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
284 value);
285
286 value.vbool = false;
287 devl_param_driverinit_value_set(devlink,
288 DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
289 value);
290 }
291
mlx4_set_num_reserved_uars(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap)292 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
293 struct mlx4_dev_cap *dev_cap)
294 {
295 /* The reserved_uars is calculated by system page size unit.
296 * Therefore, adjustment is added when the uar page size is less
297 * than the system page size
298 */
299 dev->caps.reserved_uars =
300 max_t(int,
301 mlx4_get_num_reserved_uar(dev),
302 dev_cap->reserved_uars /
303 (1 << (PAGE_SHIFT - dev->uar_page_shift)));
304 }
305
mlx4_check_port_params(struct mlx4_dev * dev,enum mlx4_port_type * port_type)306 int mlx4_check_port_params(struct mlx4_dev *dev,
307 enum mlx4_port_type *port_type)
308 {
309 int i;
310
311 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
312 for (i = 0; i < dev->caps.num_ports - 1; i++) {
313 if (port_type[i] != port_type[i + 1]) {
314 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
315 return -EOPNOTSUPP;
316 }
317 }
318 }
319
320 for (i = 0; i < dev->caps.num_ports; i++) {
321 if (!(port_type[i] & dev->caps.supported_type[i+1])) {
322 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
323 i + 1);
324 return -EOPNOTSUPP;
325 }
326 }
327 return 0;
328 }
329
mlx4_set_port_mask(struct mlx4_dev * dev)330 static void mlx4_set_port_mask(struct mlx4_dev *dev)
331 {
332 int i;
333
334 for (i = 1; i <= dev->caps.num_ports; ++i)
335 dev->caps.port_mask[i] = dev->caps.port_type[i];
336 }
337
338 enum {
339 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0,
340 };
341
mlx4_query_func(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap)342 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
343 {
344 int err = 0;
345 struct mlx4_func func;
346
347 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
348 err = mlx4_QUERY_FUNC(dev, &func, 0);
349 if (err) {
350 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
351 return err;
352 }
353 dev_cap->max_eqs = func.max_eq;
354 dev_cap->reserved_eqs = func.rsvd_eqs;
355 dev_cap->reserved_uars = func.rsvd_uars;
356 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS;
357 }
358 return err;
359 }
360
mlx4_enable_cqe_eqe_stride(struct mlx4_dev * dev)361 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev)
362 {
363 struct mlx4_caps *dev_cap = &dev->caps;
364
365 /* FW not supporting or cancelled by user */
366 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) ||
367 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE))
368 return;
369
370 /* Must have 64B CQE_EQE enabled by FW to use bigger stride
371 * When FW has NCSI it may decide not to report 64B CQE/EQEs
372 */
373 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) ||
374 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) {
375 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
376 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
377 return;
378 }
379
380 if (cache_line_size() == 128 || cache_line_size() == 256) {
381 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n");
382 /* Changing the real data inside CQE size to 32B */
383 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
384 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
385
386 if (mlx4_is_master(dev))
387 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE;
388 } else {
389 if (cache_line_size() != 32 && cache_line_size() != 64)
390 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n");
391 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
392 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
393 }
394 }
395
_mlx4_dev_port(struct mlx4_dev * dev,int port,struct mlx4_port_cap * port_cap)396 static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
397 struct mlx4_port_cap *port_cap)
398 {
399 dev->caps.vl_cap[port] = port_cap->max_vl;
400 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu;
401 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids;
402 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys;
403 /* set gid and pkey table operating lengths by default
404 * to non-sriov values
405 */
406 dev->caps.gid_table_len[port] = port_cap->max_gids;
407 dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
408 dev->caps.port_width_cap[port] = port_cap->max_port_width;
409 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu;
410 dev->caps.max_tc_eth = port_cap->max_tc_eth;
411 dev->caps.def_mac[port] = port_cap->def_mac;
412 dev->caps.supported_type[port] = port_cap->supported_port_types;
413 dev->caps.suggested_type[port] = port_cap->suggested_type;
414 dev->caps.default_sense[port] = port_cap->default_sense;
415 dev->caps.trans_type[port] = port_cap->trans_type;
416 dev->caps.vendor_oui[port] = port_cap->vendor_oui;
417 dev->caps.wavelength[port] = port_cap->wavelength;
418 dev->caps.trans_code[port] = port_cap->trans_code;
419
420 return 0;
421 }
422
mlx4_dev_port(struct mlx4_dev * dev,int port,struct mlx4_port_cap * port_cap)423 static int mlx4_dev_port(struct mlx4_dev *dev, int port,
424 struct mlx4_port_cap *port_cap)
425 {
426 int err = 0;
427
428 err = mlx4_QUERY_PORT(dev, port, port_cap);
429
430 if (err)
431 mlx4_err(dev, "QUERY_PORT command failed.\n");
432
433 return err;
434 }
435
mlx4_enable_ignore_fcs(struct mlx4_dev * dev)436 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev)
437 {
438 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS))
439 return;
440
441 if (mlx4_is_mfunc(dev)) {
442 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS");
443 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
444 return;
445 }
446
447 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) {
448 mlx4_dbg(dev,
449 "Keep FCS is not supported - Disabling Ignore FCS");
450 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS;
451 return;
452 }
453 }
454
455 #define MLX4_A0_STEERING_TABLE_SIZE 256
mlx4_dev_cap(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap)456 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
457 {
458 int err;
459 int i;
460
461 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
462 if (err) {
463 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
464 return err;
465 }
466 mlx4_dev_cap_dump(dev, dev_cap);
467
468 if (dev_cap->min_page_sz > PAGE_SIZE) {
469 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
470 dev_cap->min_page_sz, PAGE_SIZE);
471 return -ENODEV;
472 }
473 if (dev_cap->num_ports > MLX4_MAX_PORTS) {
474 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
475 dev_cap->num_ports, MLX4_MAX_PORTS);
476 return -ENODEV;
477 }
478
479 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) {
480 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
481 dev_cap->uar_size,
482 (unsigned long long)
483 pci_resource_len(dev->persist->pdev, 2));
484 return -ENODEV;
485 }
486
487 dev->caps.num_ports = dev_cap->num_ports;
488 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs;
489 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ?
490 dev->caps.num_sys_eqs :
491 MLX4_MAX_EQ_NUM;
492 for (i = 1; i <= dev->caps.num_ports; ++i) {
493 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i);
494 if (err) {
495 mlx4_err(dev, "QUERY_PORT command failed, aborting\n");
496 return err;
497 }
498 }
499
500 dev->caps.map_clock_to_user = dev_cap->map_clock_to_user;
501 dev->caps.uar_page_size = PAGE_SIZE;
502 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
503 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
504 dev->caps.bf_reg_size = dev_cap->bf_reg_size;
505 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
506 dev->caps.max_sq_sg = dev_cap->max_sq_sg;
507 dev->caps.max_rq_sg = dev_cap->max_rq_sg;
508 dev->caps.max_wqes = dev_cap->max_qp_sz;
509 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
510 dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
511 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
512 dev->caps.reserved_srqs = dev_cap->reserved_srqs;
513 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
514 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
515 /*
516 * Subtract 1 from the limit because we need to allocate a
517 * spare CQE to enable resizing the CQ.
518 */
519 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
520 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
521 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
522 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
523 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
524
525 dev->caps.reserved_pds = dev_cap->reserved_pds;
526 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
527 dev_cap->reserved_xrcds : 0;
528 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
529 dev_cap->max_xrcds : 0;
530 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
531
532 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
533 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
534 dev->caps.flags = dev_cap->flags;
535 dev->caps.flags2 = dev_cap->flags2;
536 dev->caps.bmme_flags = dev_cap->bmme_flags;
537 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
538 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
539 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
540 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
541 dev->caps.wol_port[1] = dev_cap->wol_port[1];
542 dev->caps.wol_port[2] = dev_cap->wol_port[2];
543 dev->caps.health_buffer_addrs = dev_cap->health_buffer_addrs;
544
545 /* Save uar page shift */
546 if (!mlx4_is_slave(dev)) {
547 /* Virtual PCI function needs to determine UAR page size from
548 * firmware. Only master PCI function can set the uar page size
549 */
550 if (enable_4k_uar || !dev->persist->num_vfs)
551 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
552 else
553 dev->uar_page_shift = PAGE_SHIFT;
554
555 mlx4_set_num_reserved_uars(dev, dev_cap);
556 }
557
558 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
559 struct mlx4_init_hca_param hca_param;
560
561 memset(&hca_param, 0, sizeof(hca_param));
562 err = mlx4_QUERY_HCA(dev, &hca_param);
563 /* Turn off PHV_EN flag in case phv_check_en is set.
564 * phv_check_en is a HW check that parse the packet and verify
565 * phv bit was reported correctly in the wqe. To allow QinQ
566 * PHV_EN flag should be set and phv_check_en must be cleared
567 * otherwise QinQ packets will be drop by the HW.
568 */
569 if (err || hca_param.phv_check_en)
570 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN;
571 }
572
573 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */
574 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT)
575 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
576 /* Don't do sense port on multifunction devices (for now at least) */
577 if (mlx4_is_mfunc(dev))
578 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
579
580 if (mlx4_low_memory_profile()) {
581 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC;
582 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
583 } else {
584 dev->caps.log_num_macs = log_num_mac;
585 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
586 }
587
588 for (i = 1; i <= dev->caps.num_ports; ++i) {
589 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
590 if (dev->caps.supported_type[i]) {
591 /* if only ETH is supported - assign ETH */
592 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
593 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
594 /* if only IB is supported, assign IB */
595 else if (dev->caps.supported_type[i] ==
596 MLX4_PORT_TYPE_IB)
597 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
598 else {
599 /* if IB and ETH are supported, we set the port
600 * type according to user selection of port type;
601 * if user selected none, take the FW hint */
602 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE)
603 dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
604 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
605 else
606 dev->caps.port_type[i] = port_type_array[i - 1];
607 }
608 }
609 /*
610 * Link sensing is allowed on the port if 3 conditions are true:
611 * 1. Both protocols are supported on the port.
612 * 2. Different types are supported on the port
613 * 3. FW declared that it supports link sensing
614 */
615 mlx4_priv(dev)->sense.sense_allowed[i] =
616 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
617 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
618 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
619
620 /*
621 * If "default_sense" bit is set, we move the port to "AUTO" mode
622 * and perform sense_port FW command to try and set the correct
623 * port type from beginning
624 */
625 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
626 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
627 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
628 mlx4_SENSE_PORT(dev, i, &sensed_port);
629 if (sensed_port != MLX4_PORT_TYPE_NONE)
630 dev->caps.port_type[i] = sensed_port;
631 } else {
632 dev->caps.possible_type[i] = dev->caps.port_type[i];
633 }
634
635 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) {
636 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs;
637 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
638 i, 1 << dev->caps.log_num_macs);
639 }
640 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) {
641 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans;
642 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
643 i, 1 << dev->caps.log_num_vlans);
644 }
645 }
646
647 if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) &&
648 (port_type_array[0] == MLX4_PORT_TYPE_IB) &&
649 (port_type_array[1] == MLX4_PORT_TYPE_ETH)) {
650 mlx4_warn(dev,
651 "Granular QoS per VF not supported with IB/Eth configuration\n");
652 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP;
653 }
654
655 dev->caps.max_counters = dev_cap->max_counters;
656
657 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
658 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
659 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
660 (1 << dev->caps.log_num_macs) *
661 (1 << dev->caps.log_num_vlans) *
662 dev->caps.num_ports;
663 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
664
665 if (dev_cap->dmfs_high_rate_qpn_base > 0 &&
666 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)
667 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base;
668 else
669 dev->caps.dmfs_high_rate_qpn_base =
670 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
671
672 if (dev_cap->dmfs_high_rate_qpn_range > 0 &&
673 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) {
674 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range;
675 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT;
676 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0;
677 } else {
678 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED;
679 dev->caps.dmfs_high_rate_qpn_base =
680 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
681 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE;
682 }
683
684 dev->caps.rl_caps = dev_cap->rl_caps;
685
686 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] =
687 dev->caps.dmfs_high_rate_qpn_range;
688
689 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
690 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
691 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
692 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
693
694 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
695
696 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
697 if (dev_cap->flags &
698 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
699 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
700 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE;
701 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE;
702 }
703
704 if (dev_cap->flags2 &
705 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE |
706 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) {
707 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n");
708 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE;
709 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE;
710 }
711 }
712
713 if ((dev->caps.flags &
714 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) &&
715 mlx4_is_master(dev))
716 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE;
717
718 if (!mlx4_is_slave(dev)) {
719 mlx4_enable_cqe_eqe_stride(dev);
720 dev->caps.alloc_res_qp_mask =
721 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) |
722 MLX4_RESERVE_A0_QP;
723
724 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) &&
725 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
726 mlx4_warn(dev, "Old device ETS support detected\n");
727 mlx4_warn(dev, "Consider upgrading device FW.\n");
728 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG;
729 }
730
731 } else {
732 dev->caps.alloc_res_qp_mask = 0;
733 }
734
735 mlx4_enable_ignore_fcs(dev);
736
737 return 0;
738 }
739
740 /*The function checks if there are live vf, return the num of them*/
mlx4_how_many_lives_vf(struct mlx4_dev * dev)741 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
742 {
743 struct mlx4_priv *priv = mlx4_priv(dev);
744 struct mlx4_slave_state *s_state;
745 int i;
746 int ret = 0;
747
748 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
749 s_state = &priv->mfunc.master.slave_state[i];
750 if (s_state->active && s_state->last_cmd !=
751 MLX4_COMM_CMD_RESET) {
752 mlx4_warn(dev, "%s: slave: %d is still active\n",
753 __func__, i);
754 ret++;
755 }
756 }
757 return ret;
758 }
759
mlx4_get_parav_qkey(struct mlx4_dev * dev,u32 qpn,u32 * qkey)760 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey)
761 {
762 u32 qk = MLX4_RESERVED_QKEY_BASE;
763
764 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX ||
765 qpn < dev->phys_caps.base_proxy_sqpn)
766 return -EINVAL;
767
768 if (qpn >= dev->phys_caps.base_tunnel_sqpn)
769 /* tunnel qp */
770 qk += qpn - dev->phys_caps.base_tunnel_sqpn;
771 else
772 qk += qpn - dev->phys_caps.base_proxy_sqpn;
773 *qkey = qk;
774 return 0;
775 }
776 EXPORT_SYMBOL(mlx4_get_parav_qkey);
777
mlx4_sync_pkey_table(struct mlx4_dev * dev,int slave,int port,int i,int val)778 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val)
779 {
780 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
781
782 if (!mlx4_is_master(dev))
783 return;
784
785 priv->virt2phys_pkey[slave][port - 1][i] = val;
786 }
787 EXPORT_SYMBOL(mlx4_sync_pkey_table);
788
mlx4_put_slave_node_guid(struct mlx4_dev * dev,int slave,__be64 guid)789 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid)
790 {
791 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
792
793 if (!mlx4_is_master(dev))
794 return;
795
796 priv->slave_node_guids[slave] = guid;
797 }
798 EXPORT_SYMBOL(mlx4_put_slave_node_guid);
799
mlx4_get_slave_node_guid(struct mlx4_dev * dev,int slave)800 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave)
801 {
802 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev);
803
804 if (!mlx4_is_master(dev))
805 return 0;
806
807 return priv->slave_node_guids[slave];
808 }
809 EXPORT_SYMBOL(mlx4_get_slave_node_guid);
810
mlx4_is_slave_active(struct mlx4_dev * dev,int slave)811 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
812 {
813 struct mlx4_priv *priv = mlx4_priv(dev);
814 struct mlx4_slave_state *s_slave;
815
816 if (!mlx4_is_master(dev))
817 return 0;
818
819 s_slave = &priv->mfunc.master.slave_state[slave];
820 return !!s_slave->active;
821 }
822 EXPORT_SYMBOL(mlx4_is_slave_active);
823
mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl * ctrl,struct _rule_hw * eth_header)824 void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
825 struct _rule_hw *eth_header)
826 {
827 if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
828 is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
829 struct mlx4_net_trans_rule_hw_eth *eth =
830 (struct mlx4_net_trans_rule_hw_eth *)eth_header;
831 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
832 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
833 next_rule->rsvd == 0;
834
835 if (last_rule)
836 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
837 }
838 }
839 EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio);
840
slave_adjust_steering_mode(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap,struct mlx4_init_hca_param * hca_param)841 static void slave_adjust_steering_mode(struct mlx4_dev *dev,
842 struct mlx4_dev_cap *dev_cap,
843 struct mlx4_init_hca_param *hca_param)
844 {
845 dev->caps.steering_mode = hca_param->steering_mode;
846 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
847 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
848 dev->caps.fs_log_max_ucast_qp_range_size =
849 dev_cap->fs_log_max_ucast_qp_range_size;
850 } else
851 dev->caps.num_qp_per_mgm =
852 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2);
853
854 mlx4_dbg(dev, "Steering mode is: %s\n",
855 mlx4_steering_mode_str(dev->caps.steering_mode));
856 }
857
mlx4_slave_destroy_special_qp_cap(struct mlx4_dev * dev)858 static void mlx4_slave_destroy_special_qp_cap(struct mlx4_dev *dev)
859 {
860 kfree(dev->caps.spec_qps);
861 dev->caps.spec_qps = NULL;
862 }
863
mlx4_slave_special_qp_cap(struct mlx4_dev * dev)864 static int mlx4_slave_special_qp_cap(struct mlx4_dev *dev)
865 {
866 struct mlx4_func_cap *func_cap;
867 struct mlx4_caps *caps = &dev->caps;
868 int i, err = 0;
869
870 func_cap = kzalloc_obj(*func_cap);
871 caps->spec_qps = kzalloc_objs(*caps->spec_qps, caps->num_ports);
872
873 if (!func_cap || !caps->spec_qps) {
874 mlx4_err(dev, "Failed to allocate memory for special qps cap\n");
875 err = -ENOMEM;
876 goto err_mem;
877 }
878
879 for (i = 1; i <= caps->num_ports; ++i) {
880 err = mlx4_QUERY_FUNC_CAP(dev, i, func_cap);
881 if (err) {
882 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
883 i, err);
884 goto err_mem;
885 }
886 caps->spec_qps[i - 1] = func_cap->spec_qps;
887 caps->port_mask[i] = caps->port_type[i];
888 caps->phys_port_id[i] = func_cap->phys_port_id;
889 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
890 &caps->gid_table_len[i],
891 &caps->pkey_table_len[i]);
892 if (err) {
893 mlx4_err(dev, "QUERY_PORT command failed for port %d, aborting (%d)\n",
894 i, err);
895 goto err_mem;
896 }
897 }
898
899 err_mem:
900 if (err)
901 mlx4_slave_destroy_special_qp_cap(dev);
902 kfree(func_cap);
903 return err;
904 }
905
mlx4_slave_cap(struct mlx4_dev * dev)906 static int mlx4_slave_cap(struct mlx4_dev *dev)
907 {
908 int err;
909 u32 page_size;
910 struct mlx4_dev_cap *dev_cap;
911 struct mlx4_func_cap *func_cap;
912 struct mlx4_init_hca_param *hca_param;
913
914 hca_param = kzalloc_obj(*hca_param);
915 func_cap = kzalloc_obj(*func_cap);
916 dev_cap = kzalloc_obj(*dev_cap);
917 if (!hca_param || !func_cap || !dev_cap) {
918 mlx4_err(dev, "Failed to allocate memory for slave_cap\n");
919 err = -ENOMEM;
920 goto free_mem;
921 }
922
923 err = mlx4_QUERY_HCA(dev, hca_param);
924 if (err) {
925 mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
926 goto free_mem;
927 }
928
929 /* fail if the hca has an unknown global capability
930 * at this time global_caps should be always zeroed
931 */
932 if (hca_param->global_caps) {
933 mlx4_err(dev, "Unknown hca global capabilities\n");
934 err = -EINVAL;
935 goto free_mem;
936 }
937
938 dev->caps.hca_core_clock = hca_param->hca_core_clock;
939
940 dev->caps.max_qp_dest_rdma = 1 << hca_param->log_rd_per_qp;
941 err = mlx4_dev_cap(dev, dev_cap);
942 if (err) {
943 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
944 goto free_mem;
945 }
946
947 err = mlx4_QUERY_FW(dev);
948 if (err)
949 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
950
951 page_size = ~dev->caps.page_size_cap + 1;
952 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
953 if (page_size > PAGE_SIZE) {
954 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
955 page_size, PAGE_SIZE);
956 err = -ENODEV;
957 goto free_mem;
958 }
959
960 /* Set uar_page_shift for VF */
961 dev->uar_page_shift = hca_param->uar_page_sz + 12;
962
963 /* Make sure the master uar page size is valid */
964 if (dev->uar_page_shift > PAGE_SHIFT) {
965 mlx4_err(dev,
966 "Invalid configuration: uar page size is larger than system page size\n");
967 err = -ENODEV;
968 goto free_mem;
969 }
970
971 /* Set reserved_uars based on the uar_page_shift */
972 mlx4_set_num_reserved_uars(dev, dev_cap);
973
974 /* Although uar page size in FW differs from system page size,
975 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
976 * still works with assumption that uar page size == system page size
977 */
978 dev->caps.uar_page_size = PAGE_SIZE;
979
980 err = mlx4_QUERY_FUNC_CAP(dev, 0, func_cap);
981 if (err) {
982 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
983 err);
984 goto free_mem;
985 }
986
987 if ((func_cap->pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
988 PF_CONTEXT_BEHAVIOUR_MASK) {
989 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
990 func_cap->pf_context_behaviour,
991 PF_CONTEXT_BEHAVIOUR_MASK);
992 err = -EINVAL;
993 goto free_mem;
994 }
995
996 dev->caps.num_ports = func_cap->num_ports;
997 dev->quotas.qp = func_cap->qp_quota;
998 dev->quotas.srq = func_cap->srq_quota;
999 dev->quotas.cq = func_cap->cq_quota;
1000 dev->quotas.mpt = func_cap->mpt_quota;
1001 dev->quotas.mtt = func_cap->mtt_quota;
1002 dev->caps.num_qps = 1 << hca_param->log_num_qps;
1003 dev->caps.num_srqs = 1 << hca_param->log_num_srqs;
1004 dev->caps.num_cqs = 1 << hca_param->log_num_cqs;
1005 dev->caps.num_mpts = 1 << hca_param->log_mpt_sz;
1006 dev->caps.num_eqs = func_cap->max_eq;
1007 dev->caps.reserved_eqs = func_cap->reserved_eq;
1008 dev->caps.reserved_lkey = func_cap->reserved_lkey;
1009 dev->caps.num_pds = MLX4_NUM_PDS;
1010 dev->caps.num_mgms = 0;
1011 dev->caps.num_amgms = 0;
1012
1013 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
1014 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
1015 dev->caps.num_ports, MLX4_MAX_PORTS);
1016 err = -ENODEV;
1017 goto free_mem;
1018 }
1019
1020 mlx4_replace_zero_macs(dev);
1021
1022 err = mlx4_slave_special_qp_cap(dev);
1023 if (err) {
1024 mlx4_err(dev, "Set special QP caps failed. aborting\n");
1025 goto free_mem;
1026 }
1027
1028 if (dev->caps.uar_page_size * (dev->caps.num_uars -
1029 dev->caps.reserved_uars) >
1030 pci_resource_len(dev->persist->pdev,
1031 2)) {
1032 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
1033 dev->caps.uar_page_size * dev->caps.num_uars,
1034 (unsigned long long)
1035 pci_resource_len(dev->persist->pdev, 2));
1036 err = -ENOMEM;
1037 goto err_mem;
1038 }
1039
1040 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) {
1041 dev->caps.eqe_size = 64;
1042 dev->caps.eqe_factor = 1;
1043 } else {
1044 dev->caps.eqe_size = 32;
1045 dev->caps.eqe_factor = 0;
1046 }
1047
1048 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) {
1049 dev->caps.cqe_size = 64;
1050 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1051 } else {
1052 dev->caps.cqe_size = 32;
1053 }
1054
1055 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) {
1056 dev->caps.eqe_size = hca_param->eqe_size;
1057 dev->caps.eqe_factor = 0;
1058 }
1059
1060 if (hca_param->dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) {
1061 dev->caps.cqe_size = hca_param->cqe_size;
1062 /* User still need to know when CQE > 32B */
1063 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE;
1064 }
1065
1066 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
1067 mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
1068
1069 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_USER_MAC_EN;
1070 mlx4_dbg(dev, "User MAC FW update is not supported in slave mode\n");
1071
1072 slave_adjust_steering_mode(dev, dev_cap, hca_param);
1073 mlx4_dbg(dev, "RSS support for IP fragments is %s\n",
1074 hca_param->rss_ip_frags ? "on" : "off");
1075
1076 if (func_cap->extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP &&
1077 dev->caps.bf_reg_size)
1078 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP;
1079
1080 if (func_cap->extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP)
1081 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP;
1082
1083 err_mem:
1084 if (err)
1085 mlx4_slave_destroy_special_qp_cap(dev);
1086 free_mem:
1087 kfree(hca_param);
1088 kfree(func_cap);
1089 kfree(dev_cap);
1090 return err;
1091 }
1092
1093 /*
1094 * Change the port configuration of the device.
1095 * Every user of this function must hold the port mutex.
1096 */
mlx4_change_port_types(struct mlx4_dev * dev,enum mlx4_port_type * port_types)1097 int mlx4_change_port_types(struct mlx4_dev *dev,
1098 enum mlx4_port_type *port_types)
1099 {
1100 int err = 0;
1101 int change = 0;
1102 int port;
1103
1104 for (port = 0; port < dev->caps.num_ports; port++) {
1105 /* Change the port type only if the new type is different
1106 * from the current, and not set to Auto */
1107 if (port_types[port] != dev->caps.port_type[port + 1])
1108 change = 1;
1109 }
1110 if (change) {
1111 mlx4_unregister_device(dev);
1112 for (port = 1; port <= dev->caps.num_ports; port++) {
1113 mlx4_CLOSE_PORT(dev, port);
1114 dev->caps.port_type[port] = port_types[port - 1];
1115 err = mlx4_SET_PORT(dev, port, -1);
1116 if (err) {
1117 mlx4_err(dev, "Failed to set port %d, aborting\n",
1118 port);
1119 goto out;
1120 }
1121 }
1122 mlx4_set_port_mask(dev);
1123 err = mlx4_register_device(dev);
1124 if (err) {
1125 mlx4_err(dev, "Failed to register device\n");
1126 goto out;
1127 }
1128 }
1129
1130 out:
1131 return err;
1132 }
1133
show_port_type(struct device * dev,struct device_attribute * attr,char * buf)1134 static ssize_t show_port_type(struct device *dev,
1135 struct device_attribute *attr,
1136 char *buf)
1137 {
1138 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1139 port_attr);
1140 struct mlx4_dev *mdev = info->dev;
1141 char type[8];
1142
1143 sprintf(type, "%s",
1144 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
1145 "ib" : "eth");
1146 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
1147 sprintf(buf, "auto (%s)\n", type);
1148 else
1149 sprintf(buf, "%s\n", type);
1150
1151 return strlen(buf);
1152 }
1153
__set_port_type(struct mlx4_port_info * info,enum mlx4_port_type port_type)1154 static int __set_port_type(struct mlx4_port_info *info,
1155 enum mlx4_port_type port_type)
1156 {
1157 struct mlx4_dev *mdev = info->dev;
1158 struct mlx4_priv *priv = mlx4_priv(mdev);
1159 enum mlx4_port_type types[MLX4_MAX_PORTS];
1160 enum mlx4_port_type new_types[MLX4_MAX_PORTS];
1161 int i;
1162 int err = 0;
1163
1164 if ((port_type & mdev->caps.supported_type[info->port]) != port_type) {
1165 mlx4_err(mdev,
1166 "Requested port type for port %d is not supported on this HCA\n",
1167 info->port);
1168 return -EOPNOTSUPP;
1169 }
1170
1171 mlx4_stop_sense(mdev);
1172 mutex_lock(&priv->port_mutex);
1173 info->tmp_type = port_type;
1174
1175 /* Possible type is always the one that was delivered */
1176 mdev->caps.possible_type[info->port] = info->tmp_type;
1177
1178 for (i = 0; i < mdev->caps.num_ports; i++) {
1179 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
1180 mdev->caps.possible_type[i+1];
1181 if (types[i] == MLX4_PORT_TYPE_AUTO)
1182 types[i] = mdev->caps.port_type[i+1];
1183 }
1184
1185 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
1186 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
1187 for (i = 1; i <= mdev->caps.num_ports; i++) {
1188 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
1189 mdev->caps.possible_type[i] = mdev->caps.port_type[i];
1190 err = -EOPNOTSUPP;
1191 }
1192 }
1193 }
1194 if (err) {
1195 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
1196 goto out;
1197 }
1198
1199 mlx4_do_sense_ports(mdev, new_types, types);
1200
1201 err = mlx4_check_port_params(mdev, new_types);
1202 if (err)
1203 goto out;
1204
1205 /* We are about to apply the changes after the configuration
1206 * was verified, no need to remember the temporary types
1207 * any more */
1208 for (i = 0; i < mdev->caps.num_ports; i++)
1209 priv->port[i + 1].tmp_type = 0;
1210
1211 err = mlx4_change_port_types(mdev, new_types);
1212
1213 out:
1214 mlx4_start_sense(mdev);
1215 mutex_unlock(&priv->port_mutex);
1216
1217 return err;
1218 }
1219
set_port_type(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1220 static ssize_t set_port_type(struct device *dev,
1221 struct device_attribute *attr,
1222 const char *buf, size_t count)
1223 {
1224 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1225 port_attr);
1226 struct mlx4_dev *mdev = info->dev;
1227 enum mlx4_port_type port_type;
1228 static DEFINE_MUTEX(set_port_type_mutex);
1229 int err;
1230
1231 mutex_lock(&set_port_type_mutex);
1232
1233 if (!strcmp(buf, "ib\n")) {
1234 port_type = MLX4_PORT_TYPE_IB;
1235 } else if (!strcmp(buf, "eth\n")) {
1236 port_type = MLX4_PORT_TYPE_ETH;
1237 } else if (!strcmp(buf, "auto\n")) {
1238 port_type = MLX4_PORT_TYPE_AUTO;
1239 } else {
1240 mlx4_err(mdev, "%s is not supported port type\n", buf);
1241 err = -EINVAL;
1242 goto err_out;
1243 }
1244
1245 err = __set_port_type(info, port_type);
1246
1247 err_out:
1248 mutex_unlock(&set_port_type_mutex);
1249
1250 return err ? err : count;
1251 }
1252
int_to_ibta_mtu(int mtu)1253 static inline int int_to_ibta_mtu(int mtu)
1254 {
1255 switch (mtu) {
1256 case 256: return IB_MTU_256;
1257 case 512: return IB_MTU_512;
1258 case 1024: return IB_MTU_1024;
1259 case 2048: return IB_MTU_2048;
1260 case 4096: return IB_MTU_4096;
1261 default: return -1;
1262 }
1263 }
1264
ibta_mtu_to_int(enum ib_mtu mtu)1265 static inline int ibta_mtu_to_int(enum ib_mtu mtu)
1266 {
1267 switch (mtu) {
1268 case IB_MTU_256: return 256;
1269 case IB_MTU_512: return 512;
1270 case IB_MTU_1024: return 1024;
1271 case IB_MTU_2048: return 2048;
1272 case IB_MTU_4096: return 4096;
1273 default: return -1;
1274 }
1275 }
1276
show_port_ib_mtu(struct device * dev,struct device_attribute * attr,char * buf)1277 static ssize_t show_port_ib_mtu(struct device *dev,
1278 struct device_attribute *attr,
1279 char *buf)
1280 {
1281 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1282 port_mtu_attr);
1283 struct mlx4_dev *mdev = info->dev;
1284
1285 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH)
1286 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1287
1288 sprintf(buf, "%d\n",
1289 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port]));
1290 return strlen(buf);
1291 }
1292
set_port_ib_mtu(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1293 static ssize_t set_port_ib_mtu(struct device *dev,
1294 struct device_attribute *attr,
1295 const char *buf, size_t count)
1296 {
1297 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
1298 port_mtu_attr);
1299 struct mlx4_dev *mdev = info->dev;
1300 struct mlx4_priv *priv = mlx4_priv(mdev);
1301 int err, port, mtu, ibta_mtu = -1;
1302
1303 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) {
1304 mlx4_warn(mdev, "port level mtu is only used for IB ports\n");
1305 return -EINVAL;
1306 }
1307
1308 err = kstrtoint(buf, 0, &mtu);
1309 if (!err)
1310 ibta_mtu = int_to_ibta_mtu(mtu);
1311
1312 if (err || ibta_mtu < 0) {
1313 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf);
1314 return -EINVAL;
1315 }
1316
1317 mdev->caps.port_ib_mtu[info->port] = ibta_mtu;
1318
1319 mlx4_stop_sense(mdev);
1320 mutex_lock(&priv->port_mutex);
1321 mlx4_unregister_device(mdev);
1322 for (port = 1; port <= mdev->caps.num_ports; port++) {
1323 mlx4_CLOSE_PORT(mdev, port);
1324 err = mlx4_SET_PORT(mdev, port, -1);
1325 if (err) {
1326 mlx4_err(mdev, "Failed to set port %d, aborting\n",
1327 port);
1328 goto err_set_port;
1329 }
1330 }
1331 err = mlx4_register_device(mdev);
1332 err_set_port:
1333 mutex_unlock(&priv->port_mutex);
1334 mlx4_start_sense(mdev);
1335 return err ? err : count;
1336 }
1337
1338 /* bond for multi-function device */
1339 #define MAX_MF_BOND_ALLOWED_SLAVES 63
mlx4_mf_bond(struct mlx4_dev * dev)1340 static int mlx4_mf_bond(struct mlx4_dev *dev)
1341 {
1342 int err = 0;
1343 int nvfs;
1344 struct mlx4_slaves_pport slaves_port1;
1345 struct mlx4_slaves_pport slaves_port2;
1346
1347 slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1);
1348 slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2);
1349
1350 /* only single port vfs are allowed */
1351 if (bitmap_weight_and(slaves_port1.slaves, slaves_port2.slaves,
1352 dev->persist->num_vfs + 1) > 1) {
1353 mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n");
1354 return -EINVAL;
1355 }
1356
1357 /* number of virtual functions is number of total functions minus one
1358 * physical function for each port.
1359 */
1360 nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
1361 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2;
1362
1363 /* limit on maximum allowed VFs */
1364 if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) {
1365 mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n",
1366 nvfs, MAX_MF_BOND_ALLOWED_SLAVES);
1367 return -EINVAL;
1368 }
1369
1370 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1371 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
1372 return -EINVAL;
1373 }
1374
1375 err = mlx4_bond_mac_table(dev);
1376 if (err)
1377 return err;
1378 err = mlx4_bond_vlan_table(dev);
1379 if (err)
1380 goto err1;
1381 err = mlx4_bond_fs_rules(dev);
1382 if (err)
1383 goto err2;
1384
1385 return 0;
1386 err2:
1387 (void)mlx4_unbond_vlan_table(dev);
1388 err1:
1389 (void)mlx4_unbond_mac_table(dev);
1390 return err;
1391 }
1392
mlx4_mf_unbond(struct mlx4_dev * dev)1393 static int mlx4_mf_unbond(struct mlx4_dev *dev)
1394 {
1395 int ret, ret1;
1396
1397 ret = mlx4_unbond_fs_rules(dev);
1398 if (ret)
1399 mlx4_warn(dev, "multifunction unbond for flow rules failed (%d)\n", ret);
1400 ret1 = mlx4_unbond_mac_table(dev);
1401 if (ret1) {
1402 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1);
1403 ret = ret1;
1404 }
1405 ret1 = mlx4_unbond_vlan_table(dev);
1406 if (ret1) {
1407 mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1);
1408 ret = ret1;
1409 }
1410 return ret;
1411 }
1412
mlx4_bond(struct mlx4_dev * dev)1413 static int mlx4_bond(struct mlx4_dev *dev)
1414 {
1415 int ret = 0;
1416 struct mlx4_priv *priv = mlx4_priv(dev);
1417
1418 mutex_lock(&priv->bond_mutex);
1419
1420 if (!mlx4_is_bonded(dev)) {
1421 ret = mlx4_do_bond(dev, true);
1422 if (ret)
1423 mlx4_err(dev, "Failed to bond device: %d\n", ret);
1424 if (!ret && mlx4_is_master(dev)) {
1425 ret = mlx4_mf_bond(dev);
1426 if (ret) {
1427 mlx4_err(dev, "bond for multifunction failed\n");
1428 mlx4_do_bond(dev, false);
1429 }
1430 }
1431 }
1432
1433 mutex_unlock(&priv->bond_mutex);
1434 if (!ret)
1435 mlx4_dbg(dev, "Device is bonded\n");
1436
1437 return ret;
1438 }
1439
mlx4_unbond(struct mlx4_dev * dev)1440 static int mlx4_unbond(struct mlx4_dev *dev)
1441 {
1442 int ret = 0;
1443 struct mlx4_priv *priv = mlx4_priv(dev);
1444
1445 mutex_lock(&priv->bond_mutex);
1446
1447 if (mlx4_is_bonded(dev)) {
1448 int ret2 = 0;
1449
1450 ret = mlx4_do_bond(dev, false);
1451 if (ret)
1452 mlx4_err(dev, "Failed to unbond device: %d\n", ret);
1453 if (mlx4_is_master(dev))
1454 ret2 = mlx4_mf_unbond(dev);
1455 if (ret2) {
1456 mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2);
1457 ret = ret2;
1458 }
1459 }
1460
1461 mutex_unlock(&priv->bond_mutex);
1462 if (!ret)
1463 mlx4_dbg(dev, "Device is unbonded\n");
1464
1465 return ret;
1466 }
1467
mlx4_port_map_set(struct mlx4_dev * dev,struct mlx4_port_map * v2p)1468 static int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p)
1469 {
1470 u8 port1 = v2p->port1;
1471 u8 port2 = v2p->port2;
1472 struct mlx4_priv *priv = mlx4_priv(dev);
1473 int err;
1474
1475 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
1476 return -EOPNOTSUPP;
1477
1478 mutex_lock(&priv->bond_mutex);
1479
1480 /* zero means keep current mapping for this port */
1481 if (port1 == 0)
1482 port1 = priv->v2p.port1;
1483 if (port2 == 0)
1484 port2 = priv->v2p.port2;
1485
1486 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) ||
1487 (port2 < 1) || (port2 > MLX4_MAX_PORTS) ||
1488 (port1 == 2 && port2 == 1)) {
1489 /* besides boundary checks cross mapping makes
1490 * no sense and therefore not allowed */
1491 err = -EINVAL;
1492 } else if ((port1 == priv->v2p.port1) &&
1493 (port2 == priv->v2p.port2)) {
1494 err = 0;
1495 } else {
1496 err = mlx4_virt2phy_port_map(dev, port1, port2);
1497 if (!err) {
1498 mlx4_dbg(dev, "port map changed: [%d][%d]\n",
1499 port1, port2);
1500 priv->v2p.port1 = port1;
1501 priv->v2p.port2 = port2;
1502 } else {
1503 mlx4_err(dev, "Failed to change port map: %d\n", err);
1504 }
1505 }
1506
1507 mutex_unlock(&priv->bond_mutex);
1508 return err;
1509 }
1510
1511 struct mlx4_bond {
1512 struct work_struct work;
1513 struct mlx4_dev *dev;
1514 int is_bonded;
1515 struct mlx4_port_map port_map;
1516 };
1517
mlx4_bond_work(struct work_struct * work)1518 static void mlx4_bond_work(struct work_struct *work)
1519 {
1520 struct mlx4_bond *bond = container_of(work, struct mlx4_bond, work);
1521 int err = 0;
1522
1523 if (bond->is_bonded) {
1524 if (!mlx4_is_bonded(bond->dev)) {
1525 err = mlx4_bond(bond->dev);
1526 if (err)
1527 mlx4_err(bond->dev, "Fail to bond device\n");
1528 }
1529 if (!err) {
1530 err = mlx4_port_map_set(bond->dev, &bond->port_map);
1531 if (err)
1532 mlx4_err(bond->dev,
1533 "Fail to set port map [%d][%d]: %d\n",
1534 bond->port_map.port1,
1535 bond->port_map.port2, err);
1536 }
1537 } else if (mlx4_is_bonded(bond->dev)) {
1538 err = mlx4_unbond(bond->dev);
1539 if (err)
1540 mlx4_err(bond->dev, "Fail to unbond device\n");
1541 }
1542 put_device(&bond->dev->persist->pdev->dev);
1543 kfree(bond);
1544 }
1545
mlx4_queue_bond_work(struct mlx4_dev * dev,int is_bonded,u8 v2p_p1,u8 v2p_p2)1546 int mlx4_queue_bond_work(struct mlx4_dev *dev, int is_bonded, u8 v2p_p1,
1547 u8 v2p_p2)
1548 {
1549 struct mlx4_bond *bond;
1550
1551 bond = kzalloc_obj(*bond, GFP_ATOMIC);
1552 if (!bond)
1553 return -ENOMEM;
1554
1555 INIT_WORK(&bond->work, mlx4_bond_work);
1556 get_device(&dev->persist->pdev->dev);
1557 bond->dev = dev;
1558 bond->is_bonded = is_bonded;
1559 bond->port_map.port1 = v2p_p1;
1560 bond->port_map.port2 = v2p_p2;
1561 queue_work(mlx4_wq, &bond->work);
1562 return 0;
1563 }
1564 EXPORT_SYMBOL(mlx4_queue_bond_work);
1565
mlx4_load_fw(struct mlx4_dev * dev)1566 static int mlx4_load_fw(struct mlx4_dev *dev)
1567 {
1568 struct mlx4_priv *priv = mlx4_priv(dev);
1569 int err;
1570
1571 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
1572 GFP_HIGHUSER | __GFP_NOWARN, 0);
1573 if (!priv->fw.fw_icm) {
1574 mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
1575 return -ENOMEM;
1576 }
1577
1578 err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
1579 if (err) {
1580 mlx4_err(dev, "MAP_FA command failed, aborting\n");
1581 goto err_free;
1582 }
1583
1584 err = mlx4_RUN_FW(dev);
1585 if (err) {
1586 mlx4_err(dev, "RUN_FW command failed, aborting\n");
1587 goto err_unmap_fa;
1588 }
1589
1590 return 0;
1591
1592 err_unmap_fa:
1593 mlx4_UNMAP_FA(dev);
1594
1595 err_free:
1596 mlx4_free_icm(dev, priv->fw.fw_icm, 0);
1597 return err;
1598 }
1599
mlx4_init_cmpt_table(struct mlx4_dev * dev,u64 cmpt_base,int cmpt_entry_sz)1600 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
1601 int cmpt_entry_sz)
1602 {
1603 struct mlx4_priv *priv = mlx4_priv(dev);
1604 int err;
1605 int num_eqs;
1606
1607 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
1608 cmpt_base +
1609 ((u64) (MLX4_CMPT_TYPE_QP *
1610 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1611 cmpt_entry_sz, dev->caps.num_qps,
1612 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1613 0, 0);
1614 if (err)
1615 goto err;
1616
1617 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
1618 cmpt_base +
1619 ((u64) (MLX4_CMPT_TYPE_SRQ *
1620 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1621 cmpt_entry_sz, dev->caps.num_srqs,
1622 dev->caps.reserved_srqs, 0, 0);
1623 if (err)
1624 goto err_qp;
1625
1626 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
1627 cmpt_base +
1628 ((u64) (MLX4_CMPT_TYPE_CQ *
1629 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1630 cmpt_entry_sz, dev->caps.num_cqs,
1631 dev->caps.reserved_cqs, 0, 0);
1632 if (err)
1633 goto err_srq;
1634
1635 num_eqs = dev->phys_caps.num_phys_eqs;
1636 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
1637 cmpt_base +
1638 ((u64) (MLX4_CMPT_TYPE_EQ *
1639 cmpt_entry_sz) << MLX4_CMPT_SHIFT),
1640 cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
1641 if (err)
1642 goto err_cq;
1643
1644 return 0;
1645
1646 err_cq:
1647 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1648
1649 err_srq:
1650 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1651
1652 err_qp:
1653 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1654
1655 err:
1656 return err;
1657 }
1658
mlx4_init_icm(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap,struct mlx4_init_hca_param * init_hca,u64 icm_size)1659 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
1660 struct mlx4_init_hca_param *init_hca, u64 icm_size)
1661 {
1662 struct mlx4_priv *priv = mlx4_priv(dev);
1663 u64 aux_pages;
1664 int num_eqs;
1665 int err;
1666
1667 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
1668 if (err) {
1669 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
1670 return err;
1671 }
1672
1673 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
1674 (unsigned long long) icm_size >> 10,
1675 (unsigned long long) aux_pages << 2);
1676
1677 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
1678 GFP_HIGHUSER | __GFP_NOWARN, 0);
1679 if (!priv->fw.aux_icm) {
1680 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
1681 return -ENOMEM;
1682 }
1683
1684 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
1685 if (err) {
1686 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
1687 goto err_free_aux;
1688 }
1689
1690 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
1691 if (err) {
1692 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
1693 goto err_unmap_aux;
1694 }
1695
1696
1697 num_eqs = dev->phys_caps.num_phys_eqs;
1698 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
1699 init_hca->eqc_base, dev_cap->eqc_entry_sz,
1700 num_eqs, num_eqs, 0, 0);
1701 if (err) {
1702 mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
1703 goto err_unmap_cmpt;
1704 }
1705
1706 /*
1707 * Reserved MTT entries must be aligned up to a cacheline
1708 * boundary, since the FW will write to them, while the driver
1709 * writes to all other MTT entries. (The variable
1710 * dev->caps.mtt_entry_sz below is really the MTT segment
1711 * size, not the raw entry size)
1712 */
1713 dev->caps.reserved_mtts =
1714 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
1715 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
1716
1717 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
1718 init_hca->mtt_base,
1719 dev->caps.mtt_entry_sz,
1720 dev->caps.num_mtts,
1721 dev->caps.reserved_mtts, 1, 0);
1722 if (err) {
1723 mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
1724 goto err_unmap_eq;
1725 }
1726
1727 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
1728 init_hca->dmpt_base,
1729 dev_cap->dmpt_entry_sz,
1730 dev->caps.num_mpts,
1731 dev->caps.reserved_mrws, 1, 1);
1732 if (err) {
1733 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
1734 goto err_unmap_mtt;
1735 }
1736
1737 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
1738 init_hca->qpc_base,
1739 dev_cap->qpc_entry_sz,
1740 dev->caps.num_qps,
1741 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1742 0, 0);
1743 if (err) {
1744 mlx4_err(dev, "Failed to map QP context memory, aborting\n");
1745 goto err_unmap_dmpt;
1746 }
1747
1748 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
1749 init_hca->auxc_base,
1750 dev_cap->aux_entry_sz,
1751 dev->caps.num_qps,
1752 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1753 0, 0);
1754 if (err) {
1755 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
1756 goto err_unmap_qp;
1757 }
1758
1759 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
1760 init_hca->altc_base,
1761 dev_cap->altc_entry_sz,
1762 dev->caps.num_qps,
1763 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1764 0, 0);
1765 if (err) {
1766 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
1767 goto err_unmap_auxc;
1768 }
1769
1770 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
1771 init_hca->rdmarc_base,
1772 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
1773 dev->caps.num_qps,
1774 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
1775 0, 0);
1776 if (err) {
1777 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
1778 goto err_unmap_altc;
1779 }
1780
1781 err = mlx4_init_icm_table(dev, &priv->cq_table.table,
1782 init_hca->cqc_base,
1783 dev_cap->cqc_entry_sz,
1784 dev->caps.num_cqs,
1785 dev->caps.reserved_cqs, 0, 0);
1786 if (err) {
1787 mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
1788 goto err_unmap_rdmarc;
1789 }
1790
1791 err = mlx4_init_icm_table(dev, &priv->srq_table.table,
1792 init_hca->srqc_base,
1793 dev_cap->srq_entry_sz,
1794 dev->caps.num_srqs,
1795 dev->caps.reserved_srqs, 0, 0);
1796 if (err) {
1797 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
1798 goto err_unmap_cq;
1799 }
1800
1801 /*
1802 * For flow steering device managed mode it is required to use
1803 * mlx4_init_icm_table. For B0 steering mode it's not strictly
1804 * required, but for simplicity just map the whole multicast
1805 * group table now. The table isn't very big and it's a lot
1806 * easier than trying to track ref counts.
1807 */
1808 err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
1809 init_hca->mc_base,
1810 mlx4_get_mgm_entry_size(dev),
1811 dev->caps.num_mgms + dev->caps.num_amgms,
1812 dev->caps.num_mgms + dev->caps.num_amgms,
1813 0, 0);
1814 if (err) {
1815 mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
1816 goto err_unmap_srq;
1817 }
1818
1819 return 0;
1820
1821 err_unmap_srq:
1822 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1823
1824 err_unmap_cq:
1825 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1826
1827 err_unmap_rdmarc:
1828 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1829
1830 err_unmap_altc:
1831 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1832
1833 err_unmap_auxc:
1834 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1835
1836 err_unmap_qp:
1837 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1838
1839 err_unmap_dmpt:
1840 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1841
1842 err_unmap_mtt:
1843 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1844
1845 err_unmap_eq:
1846 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1847
1848 err_unmap_cmpt:
1849 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1850 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1851 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1852 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1853
1854 err_unmap_aux:
1855 mlx4_UNMAP_ICM_AUX(dev);
1856
1857 err_free_aux:
1858 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1859
1860 return err;
1861 }
1862
mlx4_free_icms(struct mlx4_dev * dev)1863 static void mlx4_free_icms(struct mlx4_dev *dev)
1864 {
1865 struct mlx4_priv *priv = mlx4_priv(dev);
1866
1867 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
1868 mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
1869 mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
1870 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
1871 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
1872 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
1873 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
1874 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
1875 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
1876 mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
1877 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
1878 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
1879 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
1880 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
1881
1882 mlx4_UNMAP_ICM_AUX(dev);
1883 mlx4_free_icm(dev, priv->fw.aux_icm, 0);
1884 }
1885
mlx4_slave_exit(struct mlx4_dev * dev)1886 static void mlx4_slave_exit(struct mlx4_dev *dev)
1887 {
1888 struct mlx4_priv *priv = mlx4_priv(dev);
1889
1890 mutex_lock(&priv->cmd.slave_cmd_mutex);
1891 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP,
1892 MLX4_COMM_TIME))
1893 mlx4_warn(dev, "Failed to close slave function\n");
1894 mutex_unlock(&priv->cmd.slave_cmd_mutex);
1895 }
1896
map_bf_area(struct mlx4_dev * dev)1897 static int map_bf_area(struct mlx4_dev *dev)
1898 {
1899 struct mlx4_priv *priv = mlx4_priv(dev);
1900 resource_size_t bf_start;
1901 resource_size_t bf_len;
1902 int err = 0;
1903
1904 if (!dev->caps.bf_reg_size)
1905 return -ENXIO;
1906
1907 bf_start = pci_resource_start(dev->persist->pdev, 2) +
1908 (dev->caps.num_uars << PAGE_SHIFT);
1909 bf_len = pci_resource_len(dev->persist->pdev, 2) -
1910 (dev->caps.num_uars << PAGE_SHIFT);
1911 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
1912 if (!priv->bf_mapping)
1913 err = -ENOMEM;
1914
1915 return err;
1916 }
1917
unmap_bf_area(struct mlx4_dev * dev)1918 static void unmap_bf_area(struct mlx4_dev *dev)
1919 {
1920 if (mlx4_priv(dev)->bf_mapping)
1921 io_mapping_free(mlx4_priv(dev)->bf_mapping);
1922 }
1923
mlx4_read_clock(struct mlx4_dev * dev)1924 u64 mlx4_read_clock(struct mlx4_dev *dev)
1925 {
1926 u32 clockhi, clocklo, clockhi1;
1927 u64 cycles;
1928 int i;
1929 struct mlx4_priv *priv = mlx4_priv(dev);
1930
1931 for (i = 0; i < 10; i++) {
1932 clockhi = swab32(readl(priv->clock_mapping));
1933 clocklo = swab32(readl(priv->clock_mapping + 4));
1934 clockhi1 = swab32(readl(priv->clock_mapping));
1935 if (clockhi == clockhi1)
1936 break;
1937 }
1938
1939 cycles = (u64) clockhi << 32 | (u64) clocklo;
1940
1941 return cycles;
1942 }
1943 EXPORT_SYMBOL_GPL(mlx4_read_clock);
1944
1945
map_internal_clock(struct mlx4_dev * dev)1946 static int map_internal_clock(struct mlx4_dev *dev)
1947 {
1948 struct mlx4_priv *priv = mlx4_priv(dev);
1949
1950 priv->clock_mapping =
1951 ioremap(pci_resource_start(dev->persist->pdev,
1952 priv->fw.clock_bar) +
1953 priv->fw.clock_offset, MLX4_CLOCK_SIZE);
1954
1955 if (!priv->clock_mapping)
1956 return -ENOMEM;
1957
1958 return 0;
1959 }
1960
mlx4_get_internal_clock_params(struct mlx4_dev * dev,struct mlx4_clock_params * params)1961 int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
1962 struct mlx4_clock_params *params)
1963 {
1964 struct mlx4_priv *priv = mlx4_priv(dev);
1965
1966 if (mlx4_is_slave(dev))
1967 return -EOPNOTSUPP;
1968
1969 if (!dev->caps.map_clock_to_user) {
1970 mlx4_dbg(dev, "Map clock to user is not supported.\n");
1971 return -EOPNOTSUPP;
1972 }
1973
1974 if (!params)
1975 return -EINVAL;
1976
1977 params->bar = priv->fw.clock_bar;
1978 params->offset = priv->fw.clock_offset;
1979 params->size = MLX4_CLOCK_SIZE;
1980
1981 return 0;
1982 }
1983 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params);
1984
unmap_internal_clock(struct mlx4_dev * dev)1985 static void unmap_internal_clock(struct mlx4_dev *dev)
1986 {
1987 struct mlx4_priv *priv = mlx4_priv(dev);
1988
1989 if (priv->clock_mapping)
1990 iounmap(priv->clock_mapping);
1991 }
1992
mlx4_close_hca(struct mlx4_dev * dev)1993 static void mlx4_close_hca(struct mlx4_dev *dev)
1994 {
1995 unmap_internal_clock(dev);
1996 unmap_bf_area(dev);
1997 if (mlx4_is_slave(dev))
1998 mlx4_slave_exit(dev);
1999 else {
2000 mlx4_CLOSE_HCA(dev, 0);
2001 mlx4_free_icms(dev);
2002 }
2003 }
2004
mlx4_close_fw(struct mlx4_dev * dev)2005 static void mlx4_close_fw(struct mlx4_dev *dev)
2006 {
2007 if (!mlx4_is_slave(dev)) {
2008 mlx4_UNMAP_FA(dev);
2009 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
2010 }
2011 }
2012
mlx4_comm_check_offline(struct mlx4_dev * dev)2013 static int mlx4_comm_check_offline(struct mlx4_dev *dev)
2014 {
2015 #define COMM_CHAN_OFFLINE_OFFSET 0x09
2016
2017 u32 comm_flags;
2018 u32 offline_bit;
2019 unsigned long end;
2020 struct mlx4_priv *priv = mlx4_priv(dev);
2021
2022 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies;
2023 while (time_before(jiffies, end)) {
2024 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm +
2025 MLX4_COMM_CHAN_FLAGS));
2026 offline_bit = (comm_flags &
2027 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET));
2028 if (!offline_bit)
2029 return 0;
2030
2031 /* If device removal has been requested,
2032 * do not continue retrying.
2033 */
2034 if (dev->persist->interface_state &
2035 MLX4_INTERFACE_STATE_NOWAIT)
2036 break;
2037
2038 /* There are cases as part of AER/Reset flow that PF needs
2039 * around 100 msec to load. We therefore sleep for 100 msec
2040 * to allow other tasks to make use of that CPU during this
2041 * time interval.
2042 */
2043 msleep(100);
2044 }
2045 mlx4_err(dev, "Communication channel is offline.\n");
2046 return -EIO;
2047 }
2048
mlx4_reset_vf_support(struct mlx4_dev * dev)2049 static void mlx4_reset_vf_support(struct mlx4_dev *dev)
2050 {
2051 #define COMM_CHAN_RST_OFFSET 0x1e
2052
2053 struct mlx4_priv *priv = mlx4_priv(dev);
2054 u32 comm_rst;
2055 u32 comm_caps;
2056
2057 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm +
2058 MLX4_COMM_CHAN_CAPS));
2059 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET));
2060
2061 if (comm_rst)
2062 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET;
2063 }
2064
mlx4_init_slave(struct mlx4_dev * dev)2065 static int mlx4_init_slave(struct mlx4_dev *dev)
2066 {
2067 struct mlx4_priv *priv = mlx4_priv(dev);
2068 u64 dma = (u64) priv->mfunc.vhcr_dma;
2069 int ret_from_reset = 0;
2070 u32 slave_read;
2071 u32 cmd_channel_ver;
2072
2073 if (atomic_read(&pf_loading)) {
2074 mlx4_warn(dev, "PF is not ready - Deferring probe\n");
2075 return -EPROBE_DEFER;
2076 }
2077
2078 mutex_lock(&priv->cmd.slave_cmd_mutex);
2079 priv->cmd.max_cmds = 1;
2080 if (mlx4_comm_check_offline(dev)) {
2081 mlx4_err(dev, "PF is not responsive, skipping initialization\n");
2082 goto err_offline;
2083 }
2084
2085 mlx4_reset_vf_support(dev);
2086 mlx4_warn(dev, "Sending reset\n");
2087 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
2088 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME);
2089 /* if we are in the middle of flr the slave will try
2090 * NUM_OF_RESET_RETRIES times before leaving.*/
2091 if (ret_from_reset) {
2092 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
2093 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
2094 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2095 return -EPROBE_DEFER;
2096 } else
2097 goto err;
2098 }
2099
2100 /* check the driver version - the slave I/F revision
2101 * must match the master's */
2102 slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
2103 cmd_channel_ver = mlx4_comm_get_version();
2104
2105 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
2106 MLX4_COMM_GET_IF_REV(slave_read)) {
2107 mlx4_err(dev, "slave driver version is not supported by the master\n");
2108 goto err;
2109 }
2110
2111 mlx4_warn(dev, "Sending vhcr0\n");
2112 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
2113 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2114 goto err;
2115 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
2116 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2117 goto err;
2118 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
2119 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2120 goto err;
2121 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma,
2122 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME))
2123 goto err;
2124
2125 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2126 return 0;
2127
2128 err:
2129 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0);
2130 err_offline:
2131 mutex_unlock(&priv->cmd.slave_cmd_mutex);
2132 return -EIO;
2133 }
2134
mlx4_parav_master_pf_caps(struct mlx4_dev * dev)2135 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev)
2136 {
2137 int i;
2138
2139 for (i = 1; i <= dev->caps.num_ports; i++) {
2140 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH)
2141 dev->caps.gid_table_len[i] =
2142 mlx4_get_slave_num_gids(dev, 0, i);
2143 else
2144 dev->caps.gid_table_len[i] = 1;
2145 dev->caps.pkey_table_len[i] =
2146 dev->phys_caps.pkey_phys_table_len[i] - 1;
2147 }
2148 }
2149
choose_log_fs_mgm_entry_size(int qp_per_entry)2150 static int choose_log_fs_mgm_entry_size(int qp_per_entry)
2151 {
2152 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE;
2153
2154 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE;
2155 i++) {
2156 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2))
2157 break;
2158 }
2159
2160 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1;
2161 }
2162
dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode)2163 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode)
2164 {
2165 switch (dmfs_high_steer_mode) {
2166 case MLX4_STEERING_DMFS_A0_DEFAULT:
2167 return "default performance";
2168
2169 case MLX4_STEERING_DMFS_A0_DYNAMIC:
2170 return "dynamic hybrid mode";
2171
2172 case MLX4_STEERING_DMFS_A0_STATIC:
2173 return "performance optimized for limited rule configuration (static)";
2174
2175 case MLX4_STEERING_DMFS_A0_DISABLE:
2176 return "disabled performance optimized steering";
2177
2178 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED:
2179 return "performance optimized steering not supported";
2180
2181 default:
2182 return "Unrecognized mode";
2183 }
2184 }
2185
2186 #define MLX4_DMFS_A0_STEERING (1UL << 2)
2187
choose_steering_mode(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap)2188 static void choose_steering_mode(struct mlx4_dev *dev,
2189 struct mlx4_dev_cap *dev_cap)
2190 {
2191 if (mlx4_log_num_mgm_entry_size <= 0) {
2192 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) {
2193 if (dev->caps.dmfs_high_steer_mode ==
2194 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2195 mlx4_err(dev, "DMFS high rate mode not supported\n");
2196 else
2197 dev->caps.dmfs_high_steer_mode =
2198 MLX4_STEERING_DMFS_A0_STATIC;
2199 }
2200 }
2201
2202 if (mlx4_log_num_mgm_entry_size <= 0 &&
2203 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN &&
2204 (!mlx4_is_mfunc(dev) ||
2205 (dev_cap->fs_max_num_qp_per_entry >=
2206 (dev->persist->num_vfs + 1))) &&
2207 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >=
2208 MLX4_MIN_MGM_LOG_ENTRY_SIZE) {
2209 dev->oper_log_mgm_entry_size =
2210 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry);
2211 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED;
2212 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry;
2213 dev->caps.fs_log_max_ucast_qp_range_size =
2214 dev_cap->fs_log_max_ucast_qp_range_size;
2215 } else {
2216 if (dev->caps.dmfs_high_steer_mode !=
2217 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2218 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE;
2219 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER &&
2220 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2221 dev->caps.steering_mode = MLX4_STEERING_MODE_B0;
2222 else {
2223 dev->caps.steering_mode = MLX4_STEERING_MODE_A0;
2224
2225 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
2226 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
2227 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
2228 }
2229 dev->oper_log_mgm_entry_size =
2230 mlx4_log_num_mgm_entry_size > 0 ?
2231 mlx4_log_num_mgm_entry_size :
2232 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
2233 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
2234 }
2235 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
2236 mlx4_steering_mode_str(dev->caps.steering_mode),
2237 dev->oper_log_mgm_entry_size,
2238 mlx4_log_num_mgm_entry_size);
2239 }
2240
choose_tunnel_offload_mode(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap)2241 static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
2242 struct mlx4_dev_cap *dev_cap)
2243 {
2244 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
2245 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
2246 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
2247 else
2248 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
2249
2250 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode
2251 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none");
2252 }
2253
mlx4_validate_optimized_steering(struct mlx4_dev * dev)2254 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
2255 {
2256 int i;
2257 struct mlx4_port_cap port_cap;
2258
2259 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED)
2260 return -EINVAL;
2261
2262 for (i = 1; i <= dev->caps.num_ports; i++) {
2263 if (mlx4_dev_port(dev, i, &port_cap)) {
2264 mlx4_err(dev,
2265 "QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n");
2266 } else if ((dev->caps.dmfs_high_steer_mode !=
2267 MLX4_STEERING_DMFS_A0_DEFAULT) &&
2268 (port_cap.dmfs_optimized_state ==
2269 !!(dev->caps.dmfs_high_steer_mode ==
2270 MLX4_STEERING_DMFS_A0_DISABLE))) {
2271 mlx4_err(dev,
2272 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n",
2273 dmfs_high_rate_steering_mode_str(
2274 dev->caps.dmfs_high_steer_mode),
2275 (port_cap.dmfs_optimized_state ?
2276 "enabled" : "disabled"));
2277 }
2278 }
2279
2280 return 0;
2281 }
2282
mlx4_init_fw(struct mlx4_dev * dev)2283 static int mlx4_init_fw(struct mlx4_dev *dev)
2284 {
2285 struct mlx4_mod_stat_cfg mlx4_cfg;
2286 int err = 0;
2287
2288 if (!mlx4_is_slave(dev)) {
2289 err = mlx4_QUERY_FW(dev);
2290 if (err) {
2291 if (err == -EACCES)
2292 mlx4_info(dev, "non-primary physical function, skipping\n");
2293 else
2294 mlx4_err(dev, "QUERY_FW command failed, aborting\n");
2295 return err;
2296 }
2297
2298 err = mlx4_load_fw(dev);
2299 if (err) {
2300 mlx4_err(dev, "Failed to start FW, aborting\n");
2301 return err;
2302 }
2303
2304 mlx4_cfg.log_pg_sz_m = 1;
2305 mlx4_cfg.log_pg_sz = 0;
2306 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
2307 if (err)
2308 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
2309 }
2310
2311 return err;
2312 }
2313
mlx4_init_hca(struct mlx4_dev * dev)2314 static int mlx4_init_hca(struct mlx4_dev *dev)
2315 {
2316 struct mlx4_priv *priv = mlx4_priv(dev);
2317 struct mlx4_init_hca_param *init_hca = NULL;
2318 struct mlx4_dev_cap *dev_cap = NULL;
2319 struct mlx4_adapter adapter;
2320 struct mlx4_profile profile;
2321 u64 icm_size;
2322 struct mlx4_config_dev_params params;
2323 int err;
2324
2325 if (!mlx4_is_slave(dev)) {
2326 dev_cap = kzalloc_obj(*dev_cap);
2327 init_hca = kzalloc_obj(*init_hca);
2328
2329 if (!dev_cap || !init_hca) {
2330 err = -ENOMEM;
2331 goto out_free;
2332 }
2333
2334 err = mlx4_dev_cap(dev, dev_cap);
2335 if (err) {
2336 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
2337 goto out_free;
2338 }
2339
2340 choose_steering_mode(dev, dev_cap);
2341 choose_tunnel_offload_mode(dev, dev_cap);
2342
2343 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC &&
2344 mlx4_is_master(dev))
2345 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC;
2346
2347 err = mlx4_get_phys_port_id(dev);
2348 if (err)
2349 mlx4_err(dev, "Fail to get physical port id\n");
2350
2351 if (mlx4_is_master(dev))
2352 mlx4_parav_master_pf_caps(dev);
2353
2354 if (mlx4_low_memory_profile()) {
2355 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
2356 profile = low_mem_profile;
2357 } else {
2358 profile = default_profile;
2359 }
2360 if (dev->caps.steering_mode ==
2361 MLX4_STEERING_MODE_DEVICE_MANAGED)
2362 profile.num_mcg = MLX4_FS_NUM_MCG;
2363
2364 icm_size = mlx4_make_profile(dev, &profile, dev_cap,
2365 init_hca);
2366 if ((long long) icm_size < 0) {
2367 err = icm_size;
2368 goto out_free;
2369 }
2370
2371 if (enable_4k_uar || !dev->persist->num_vfs) {
2372 init_hca->log_uar_sz = ilog2(dev->caps.num_uars) +
2373 PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
2374 init_hca->uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
2375 } else {
2376 init_hca->log_uar_sz = ilog2(dev->caps.num_uars);
2377 init_hca->uar_page_sz = PAGE_SHIFT - 12;
2378 }
2379
2380 init_hca->mw_enabled = 0;
2381 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2382 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
2383 init_hca->mw_enabled = INIT_HCA_TPT_MW_ENABLE;
2384
2385 err = mlx4_init_icm(dev, dev_cap, init_hca, icm_size);
2386 if (err)
2387 goto out_free;
2388
2389 err = mlx4_INIT_HCA(dev, init_hca);
2390 if (err) {
2391 mlx4_err(dev, "INIT_HCA command failed, aborting\n");
2392 goto err_free_icm;
2393 }
2394
2395 if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) {
2396 err = mlx4_query_func(dev, dev_cap);
2397 if (err < 0) {
2398 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
2399 goto err_close;
2400 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
2401 dev->caps.num_eqs = dev_cap->max_eqs;
2402 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
2403 dev->caps.reserved_uars = dev_cap->reserved_uars;
2404 }
2405 }
2406
2407 /*
2408 * If TS is supported by FW
2409 * read HCA frequency by QUERY_HCA command
2410 */
2411 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) {
2412 err = mlx4_QUERY_HCA(dev, init_hca);
2413 if (err) {
2414 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
2415 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2416 } else {
2417 dev->caps.hca_core_clock =
2418 init_hca->hca_core_clock;
2419 }
2420
2421 /* In case we got HCA frequency 0 - disable timestamping
2422 * to avoid dividing by zero
2423 */
2424 if (!dev->caps.hca_core_clock) {
2425 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2426 mlx4_err(dev,
2427 "HCA frequency is 0 - timestamping is not supported\n");
2428 } else if (map_internal_clock(dev)) {
2429 /*
2430 * Map internal clock,
2431 * in case of failure disable timestamping
2432 */
2433 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
2434 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
2435 }
2436 }
2437
2438 if (dev->caps.dmfs_high_steer_mode !=
2439 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) {
2440 if (mlx4_validate_optimized_steering(dev))
2441 mlx4_warn(dev, "Optimized steering validation failed\n");
2442
2443 if (dev->caps.dmfs_high_steer_mode ==
2444 MLX4_STEERING_DMFS_A0_DISABLE) {
2445 dev->caps.dmfs_high_rate_qpn_base =
2446 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
2447 dev->caps.dmfs_high_rate_qpn_range =
2448 MLX4_A0_STEERING_TABLE_SIZE;
2449 }
2450
2451 mlx4_info(dev, "DMFS high rate steer mode is: %s\n",
2452 dmfs_high_rate_steering_mode_str(
2453 dev->caps.dmfs_high_steer_mode));
2454 }
2455 } else {
2456 err = mlx4_init_slave(dev);
2457 if (err) {
2458 if (err != -EPROBE_DEFER)
2459 mlx4_err(dev, "Failed to initialize slave\n");
2460 return err;
2461 }
2462
2463 err = mlx4_slave_cap(dev);
2464 if (err) {
2465 mlx4_err(dev, "Failed to obtain slave caps\n");
2466 goto err_close;
2467 }
2468 }
2469
2470 if (map_bf_area(dev))
2471 mlx4_dbg(dev, "Failed to map blue flame area\n");
2472
2473 /*Only the master set the ports, all the rest got it from it.*/
2474 if (!mlx4_is_slave(dev))
2475 mlx4_set_port_mask(dev);
2476
2477 err = mlx4_QUERY_ADAPTER(dev, &adapter);
2478 if (err) {
2479 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
2480 goto unmap_bf;
2481 }
2482
2483 /* Query CONFIG_DEV parameters */
2484 err = mlx4_config_dev_retrieval(dev, ¶ms);
2485 if (err && err != -EOPNOTSUPP) {
2486 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n");
2487 } else if (!err) {
2488 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1;
2489 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2;
2490 }
2491 priv->eq_table.inta_pin = adapter.inta_pin;
2492 memcpy(dev->board_id, adapter.board_id, sizeof(dev->board_id));
2493
2494 err = 0;
2495 goto out_free;
2496
2497 unmap_bf:
2498 unmap_internal_clock(dev);
2499 unmap_bf_area(dev);
2500
2501 if (mlx4_is_slave(dev))
2502 mlx4_slave_destroy_special_qp_cap(dev);
2503
2504 err_close:
2505 if (mlx4_is_slave(dev))
2506 mlx4_slave_exit(dev);
2507 else
2508 mlx4_CLOSE_HCA(dev, 0);
2509
2510 err_free_icm:
2511 if (!mlx4_is_slave(dev))
2512 mlx4_free_icms(dev);
2513
2514 out_free:
2515 kfree(dev_cap);
2516 kfree(init_hca);
2517
2518 return err;
2519 }
2520
mlx4_init_counters_table(struct mlx4_dev * dev)2521 static int mlx4_init_counters_table(struct mlx4_dev *dev)
2522 {
2523 struct mlx4_priv *priv = mlx4_priv(dev);
2524 int nent_pow2;
2525
2526 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2527 return -ENOENT;
2528
2529 if (!dev->caps.max_counters)
2530 return -ENOSPC;
2531
2532 nent_pow2 = roundup_pow_of_two(dev->caps.max_counters);
2533 /* reserve last counter index for sink counter */
2534 return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2,
2535 nent_pow2 - 1, 0,
2536 nent_pow2 - dev->caps.max_counters + 1);
2537 }
2538
mlx4_cleanup_counters_table(struct mlx4_dev * dev)2539 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
2540 {
2541 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2542 return;
2543
2544 if (!dev->caps.max_counters)
2545 return;
2546
2547 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
2548 }
2549
mlx4_cleanup_default_counters(struct mlx4_dev * dev)2550 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev)
2551 {
2552 struct mlx4_priv *priv = mlx4_priv(dev);
2553 int port;
2554
2555 for (port = 0; port < dev->caps.num_ports; port++)
2556 if (priv->def_counter[port] != -1)
2557 mlx4_counter_free(dev, priv->def_counter[port]);
2558 }
2559
mlx4_allocate_default_counters(struct mlx4_dev * dev)2560 static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2561 {
2562 struct mlx4_priv *priv = mlx4_priv(dev);
2563 int port, err = 0;
2564 u32 idx;
2565
2566 for (port = 0; port < dev->caps.num_ports; port++)
2567 priv->def_counter[port] = -1;
2568
2569 for (port = 0; port < dev->caps.num_ports; port++) {
2570 err = mlx4_counter_alloc(dev, &idx, MLX4_RES_USAGE_DRIVER);
2571
2572 if (!err || err == -ENOSPC) {
2573 priv->def_counter[port] = idx;
2574 err = 0;
2575 } else if (err == -ENOENT) {
2576 err = 0;
2577 continue;
2578 } else if (mlx4_is_slave(dev) && err == -EINVAL) {
2579 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
2580 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
2581 MLX4_SINK_COUNTER_INDEX(dev));
2582 err = 0;
2583 } else {
2584 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2585 __func__, port + 1, err);
2586 mlx4_cleanup_default_counters(dev);
2587 return err;
2588 }
2589
2590 mlx4_dbg(dev, "%s: default counter index %d for port %d\n",
2591 __func__, priv->def_counter[port], port + 1);
2592 }
2593
2594 return err;
2595 }
2596
__mlx4_counter_alloc(struct mlx4_dev * dev,u32 * idx)2597 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
2598 {
2599 struct mlx4_priv *priv = mlx4_priv(dev);
2600
2601 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2602 return -ENOENT;
2603
2604 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
2605 if (*idx == -1) {
2606 *idx = MLX4_SINK_COUNTER_INDEX(dev);
2607 return -ENOSPC;
2608 }
2609
2610 return 0;
2611 }
2612
mlx4_counter_alloc(struct mlx4_dev * dev,u32 * idx,u8 usage)2613 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx, u8 usage)
2614 {
2615 u32 in_modifier = RES_COUNTER | (((u32)usage & 3) << 30);
2616 u64 out_param;
2617 int err;
2618
2619 if (mlx4_is_mfunc(dev)) {
2620 err = mlx4_cmd_imm(dev, 0, &out_param, in_modifier,
2621 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES,
2622 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
2623 if (!err)
2624 *idx = get_param_l(&out_param);
2625 if (WARN_ON(err == -ENOSPC))
2626 err = -EINVAL;
2627 return err;
2628 }
2629 return __mlx4_counter_alloc(dev, idx);
2630 }
2631 EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
2632
__mlx4_clear_if_stat(struct mlx4_dev * dev,u8 counter_index)2633 static int __mlx4_clear_if_stat(struct mlx4_dev *dev,
2634 u8 counter_index)
2635 {
2636 struct mlx4_cmd_mailbox *if_stat_mailbox;
2637 int err;
2638 u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET;
2639
2640 if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev);
2641 if (IS_ERR(if_stat_mailbox))
2642 return PTR_ERR(if_stat_mailbox);
2643
2644 err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0,
2645 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
2646 MLX4_CMD_NATIVE);
2647
2648 mlx4_free_cmd_mailbox(dev, if_stat_mailbox);
2649 return err;
2650 }
2651
__mlx4_counter_free(struct mlx4_dev * dev,u32 idx)2652 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2653 {
2654 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
2655 return;
2656
2657 if (idx == MLX4_SINK_COUNTER_INDEX(dev))
2658 return;
2659
2660 __mlx4_clear_if_stat(dev, idx);
2661
2662 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR);
2663 return;
2664 }
2665
mlx4_counter_free(struct mlx4_dev * dev,u32 idx)2666 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
2667 {
2668 u64 in_param = 0;
2669
2670 if (mlx4_is_mfunc(dev)) {
2671 set_param_l(&in_param, idx);
2672 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE,
2673 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
2674 MLX4_CMD_WRAPPED);
2675 return;
2676 }
2677 __mlx4_counter_free(dev, idx);
2678 }
2679 EXPORT_SYMBOL_GPL(mlx4_counter_free);
2680
mlx4_get_default_counter_index(struct mlx4_dev * dev,int port)2681 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port)
2682 {
2683 struct mlx4_priv *priv = mlx4_priv(dev);
2684
2685 return priv->def_counter[port - 1];
2686 }
2687 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index);
2688
mlx4_set_admin_guid(struct mlx4_dev * dev,__be64 guid,int entry,int port)2689 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port)
2690 {
2691 struct mlx4_priv *priv = mlx4_priv(dev);
2692
2693 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2694 }
2695 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid);
2696
mlx4_get_admin_guid(struct mlx4_dev * dev,int entry,int port)2697 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port)
2698 {
2699 struct mlx4_priv *priv = mlx4_priv(dev);
2700
2701 return priv->mfunc.master.vf_admin[entry].vport[port].guid;
2702 }
2703 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid);
2704
mlx4_set_random_admin_guid(struct mlx4_dev * dev,int entry,int port)2705 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port)
2706 {
2707 struct mlx4_priv *priv = mlx4_priv(dev);
2708 __be64 guid;
2709
2710 /* hw GUID */
2711 if (entry == 0)
2712 return;
2713
2714 get_random_bytes((char *)&guid, sizeof(guid));
2715 guid &= ~(cpu_to_be64(1ULL << 56));
2716 guid |= cpu_to_be64(1ULL << 57);
2717 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid;
2718 }
2719
mlx4_setup_hca(struct mlx4_dev * dev)2720 static int mlx4_setup_hca(struct mlx4_dev *dev)
2721 {
2722 struct mlx4_priv *priv = mlx4_priv(dev);
2723 int err;
2724 int port;
2725 __be32 ib_port_default_caps;
2726
2727 err = mlx4_init_uar_table(dev);
2728 if (err) {
2729 mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
2730 return err;
2731 }
2732
2733 err = mlx4_uar_alloc(dev, &priv->driver_uar);
2734 if (err) {
2735 mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
2736 goto err_uar_table_free;
2737 }
2738
2739 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
2740 if (!priv->kar) {
2741 mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
2742 err = -ENOMEM;
2743 goto err_uar_free;
2744 }
2745
2746 err = mlx4_init_pd_table(dev);
2747 if (err) {
2748 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
2749 goto err_kar_unmap;
2750 }
2751
2752 err = mlx4_init_xrcd_table(dev);
2753 if (err) {
2754 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
2755 goto err_pd_table_free;
2756 }
2757
2758 err = mlx4_init_mr_table(dev);
2759 if (err) {
2760 mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
2761 goto err_xrcd_table_free;
2762 }
2763
2764 if (!mlx4_is_slave(dev)) {
2765 err = mlx4_init_mcg_table(dev);
2766 if (err) {
2767 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
2768 goto err_mr_table_free;
2769 }
2770 err = mlx4_config_mad_demux(dev);
2771 if (err) {
2772 mlx4_err(dev, "Failed in config_mad_demux, aborting\n");
2773 goto err_mcg_table_free;
2774 }
2775 }
2776
2777 err = mlx4_init_eq_table(dev);
2778 if (err) {
2779 mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
2780 goto err_mcg_table_free;
2781 }
2782
2783 err = mlx4_cmd_use_events(dev);
2784 if (err) {
2785 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
2786 goto err_eq_table_free;
2787 }
2788
2789 err = mlx4_NOP(dev);
2790 if (err) {
2791 if (dev->flags & MLX4_FLAG_MSI_X) {
2792 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
2793 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2794 mlx4_warn(dev, "Trying again without MSI-X\n");
2795 } else {
2796 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
2797 priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
2798 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
2799 }
2800
2801 goto err_cmd_poll;
2802 }
2803
2804 mlx4_dbg(dev, "NOP command IRQ test passed\n");
2805
2806 err = mlx4_init_cq_table(dev);
2807 if (err) {
2808 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
2809 goto err_cmd_poll;
2810 }
2811
2812 err = mlx4_init_srq_table(dev);
2813 if (err) {
2814 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
2815 goto err_cq_table_free;
2816 }
2817
2818 err = mlx4_init_qp_table(dev);
2819 if (err) {
2820 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
2821 goto err_srq_table_free;
2822 }
2823
2824 if (!mlx4_is_slave(dev)) {
2825 err = mlx4_init_counters_table(dev);
2826 if (err && err != -ENOENT) {
2827 mlx4_err(dev, "Failed to initialize counters table, aborting\n");
2828 goto err_qp_table_free;
2829 }
2830 }
2831
2832 err = mlx4_allocate_default_counters(dev);
2833 if (err) {
2834 mlx4_err(dev, "Failed to allocate default counters, aborting\n");
2835 goto err_counters_table_free;
2836 }
2837
2838 if (!mlx4_is_slave(dev)) {
2839 for (port = 1; port <= dev->caps.num_ports; port++) {
2840 ib_port_default_caps = 0;
2841 err = mlx4_get_port_ib_caps(dev, port,
2842 &ib_port_default_caps);
2843 if (err)
2844 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
2845 port, err);
2846 dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
2847
2848 /* initialize per-slave default ib port capabilities */
2849 if (mlx4_is_master(dev)) {
2850 int i;
2851 for (i = 0; i < dev->num_slaves; i++) {
2852 if (i == mlx4_master_func_num(dev))
2853 continue;
2854 priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
2855 ib_port_default_caps;
2856 }
2857 }
2858
2859 if (mlx4_is_mfunc(dev))
2860 dev->caps.port_ib_mtu[port] = IB_MTU_2048;
2861 else
2862 dev->caps.port_ib_mtu[port] = IB_MTU_4096;
2863
2864 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ?
2865 dev->caps.pkey_table_len[port] : -1);
2866 if (err) {
2867 mlx4_err(dev, "Failed to set port %d, aborting\n",
2868 port);
2869 goto err_default_countes_free;
2870 }
2871 }
2872 }
2873
2874 return 0;
2875
2876 err_default_countes_free:
2877 mlx4_cleanup_default_counters(dev);
2878
2879 err_counters_table_free:
2880 if (!mlx4_is_slave(dev))
2881 mlx4_cleanup_counters_table(dev);
2882
2883 err_qp_table_free:
2884 mlx4_cleanup_qp_table(dev);
2885
2886 err_srq_table_free:
2887 mlx4_cleanup_srq_table(dev);
2888
2889 err_cq_table_free:
2890 mlx4_cleanup_cq_table(dev);
2891
2892 err_cmd_poll:
2893 mlx4_cmd_use_polling(dev);
2894
2895 err_eq_table_free:
2896 mlx4_cleanup_eq_table(dev);
2897
2898 err_mcg_table_free:
2899 if (!mlx4_is_slave(dev))
2900 mlx4_cleanup_mcg_table(dev);
2901
2902 err_mr_table_free:
2903 mlx4_cleanup_mr_table(dev);
2904
2905 err_xrcd_table_free:
2906 mlx4_cleanup_xrcd_table(dev);
2907
2908 err_pd_table_free:
2909 mlx4_cleanup_pd_table(dev);
2910
2911 err_kar_unmap:
2912 iounmap(priv->kar);
2913
2914 err_uar_free:
2915 mlx4_uar_free(dev, &priv->driver_uar);
2916
2917 err_uar_table_free:
2918 mlx4_cleanup_uar_table(dev);
2919 return err;
2920 }
2921
mlx4_init_affinity_hint(struct mlx4_dev * dev,int port,int eqn)2922 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
2923 {
2924 int requested_cpu = 0;
2925 struct mlx4_priv *priv = mlx4_priv(dev);
2926 struct mlx4_eq *eq;
2927 int off = 0;
2928 int i;
2929
2930 if (eqn > dev->caps.num_comp_vectors)
2931 return -EINVAL;
2932
2933 for (i = 1; i < port; i++)
2934 off += mlx4_get_eqs_per_port(dev, i);
2935
2936 requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
2937
2938 /* Meaning EQs are shared, and this call comes from the second port */
2939 if (requested_cpu < 0)
2940 return 0;
2941
2942 eq = &priv->eq_table.eq[eqn];
2943
2944 if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
2945 return -ENOMEM;
2946
2947 cpumask_set_cpu(requested_cpu, eq->affinity_mask);
2948
2949 return 0;
2950 }
2951
mlx4_enable_msi_x(struct mlx4_dev * dev)2952 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
2953 {
2954 struct mlx4_priv *priv = mlx4_priv(dev);
2955 struct msix_entry *entries;
2956 int i;
2957 int port = 0;
2958
2959 if (msi_x) {
2960 int nreq = min3(dev->caps.num_ports *
2961 (int)num_online_cpus() + 1,
2962 dev->caps.num_eqs - dev->caps.reserved_eqs,
2963 MAX_MSIX);
2964
2965 if (msi_x > 1)
2966 nreq = min_t(int, nreq, msi_x);
2967
2968 entries = kzalloc_objs(*entries, nreq);
2969 if (!entries)
2970 goto no_msi;
2971
2972 for (i = 0; i < nreq; ++i)
2973 entries[i].entry = i;
2974
2975 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
2976 nreq);
2977
2978 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
2979 kfree(entries);
2980 goto no_msi;
2981 }
2982 /* 1 is reserved for events (asyncrounous EQ) */
2983 dev->caps.num_comp_vectors = nreq - 1;
2984
2985 priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
2986 bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
2987 dev->caps.num_ports);
2988
2989 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
2990 if (i == MLX4_EQ_ASYNC)
2991 continue;
2992
2993 priv->eq_table.eq[i].irq =
2994 entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
2995
2996 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
2997 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
2998 dev->caps.num_ports);
2999 /* We don't set affinity hint when there
3000 * aren't enough EQs
3001 */
3002 } else {
3003 set_bit(port,
3004 priv->eq_table.eq[i].actv_ports.ports);
3005 if (mlx4_init_affinity_hint(dev, port + 1, i))
3006 mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
3007 i);
3008 }
3009 /* We divide the Eqs evenly between the two ports.
3010 * (dev->caps.num_comp_vectors / dev->caps.num_ports)
3011 * refers to the number of Eqs per port
3012 * (i.e eqs_per_port). Theoretically, we would like to
3013 * write something like (i + 1) % eqs_per_port == 0.
3014 * However, since there's an asynchronous Eq, we have
3015 * to skip over it by comparing this condition to
3016 * !!((i + 1) > MLX4_EQ_ASYNC).
3017 */
3018 if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
3019 ((i + 1) %
3020 (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
3021 !!((i + 1) > MLX4_EQ_ASYNC))
3022 /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
3023 * everything is shared anyway.
3024 */
3025 port++;
3026 }
3027
3028 dev->flags |= MLX4_FLAG_MSI_X;
3029
3030 kfree(entries);
3031 return;
3032 }
3033
3034 no_msi:
3035 dev->caps.num_comp_vectors = 1;
3036
3037 BUG_ON(MLX4_EQ_ASYNC >= 2);
3038 for (i = 0; i < 2; ++i) {
3039 priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
3040 if (i != MLX4_EQ_ASYNC) {
3041 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
3042 dev->caps.num_ports);
3043 }
3044 }
3045 }
3046
mlx4_devlink_port_type_set(struct devlink_port * devlink_port,enum devlink_port_type port_type)3047 static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
3048 enum devlink_port_type port_type)
3049 {
3050 struct mlx4_port_info *info = container_of(devlink_port,
3051 struct mlx4_port_info,
3052 devlink_port);
3053 enum mlx4_port_type mlx4_port_type;
3054
3055 switch (port_type) {
3056 case DEVLINK_PORT_TYPE_AUTO:
3057 mlx4_port_type = MLX4_PORT_TYPE_AUTO;
3058 break;
3059 case DEVLINK_PORT_TYPE_ETH:
3060 mlx4_port_type = MLX4_PORT_TYPE_ETH;
3061 break;
3062 case DEVLINK_PORT_TYPE_IB:
3063 mlx4_port_type = MLX4_PORT_TYPE_IB;
3064 break;
3065 default:
3066 return -EOPNOTSUPP;
3067 }
3068
3069 return __set_port_type(info, mlx4_port_type);
3070 }
3071
3072 static const struct devlink_port_ops mlx4_devlink_port_ops = {
3073 .port_type_set = mlx4_devlink_port_type_set,
3074 };
3075
mlx4_init_port_info(struct mlx4_dev * dev,int port)3076 static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
3077 {
3078 struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
3079 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
3080 int err;
3081
3082 err = devl_port_register_with_ops(devlink, &info->devlink_port, port,
3083 &mlx4_devlink_port_ops);
3084 if (err)
3085 return err;
3086
3087 /* Ethernet and IB drivers will normally set the port type,
3088 * but if they are not built set the type now to prevent
3089 * devlink_port_type_warn() from firing.
3090 */
3091 if (!IS_ENABLED(CONFIG_MLX4_EN) &&
3092 dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
3093 devlink_port_type_eth_set(&info->devlink_port);
3094 else if (!IS_ENABLED(CONFIG_MLX4_INFINIBAND) &&
3095 dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
3096 devlink_port_type_ib_set(&info->devlink_port, NULL);
3097
3098 info->dev = dev;
3099 info->port = port;
3100 if (!mlx4_is_slave(dev)) {
3101 mlx4_init_mac_table(dev, &info->mac_table);
3102 mlx4_init_vlan_table(dev, &info->vlan_table);
3103 mlx4_init_roce_gid_table(dev, &info->gid_table);
3104 info->base_qpn = mlx4_get_base_qpn(dev, port);
3105 }
3106
3107 sprintf(info->dev_name, "mlx4_port%d", port);
3108 info->port_attr.attr.name = info->dev_name;
3109 if (mlx4_is_mfunc(dev)) {
3110 info->port_attr.attr.mode = 0444;
3111 } else {
3112 info->port_attr.attr.mode = 0644;
3113 info->port_attr.store = set_port_type;
3114 }
3115 info->port_attr.show = show_port_type;
3116 sysfs_attr_init(&info->port_attr.attr);
3117
3118 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr);
3119 if (err) {
3120 mlx4_err(dev, "Failed to create file for port %d\n", port);
3121 devlink_port_type_clear(&info->devlink_port);
3122 devl_port_unregister(&info->devlink_port);
3123 info->port = -1;
3124 return err;
3125 }
3126
3127 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port);
3128 info->port_mtu_attr.attr.name = info->dev_mtu_name;
3129 if (mlx4_is_mfunc(dev)) {
3130 info->port_mtu_attr.attr.mode = 0444;
3131 } else {
3132 info->port_mtu_attr.attr.mode = 0644;
3133 info->port_mtu_attr.store = set_port_ib_mtu;
3134 }
3135 info->port_mtu_attr.show = show_port_ib_mtu;
3136 sysfs_attr_init(&info->port_mtu_attr.attr);
3137
3138 err = device_create_file(&dev->persist->pdev->dev,
3139 &info->port_mtu_attr);
3140 if (err) {
3141 mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
3142 device_remove_file(&info->dev->persist->pdev->dev,
3143 &info->port_attr);
3144 devlink_port_type_clear(&info->devlink_port);
3145 devl_port_unregister(&info->devlink_port);
3146 info->port = -1;
3147 return err;
3148 }
3149
3150 return 0;
3151 }
3152
mlx4_cleanup_port_info(struct mlx4_port_info * info)3153 static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
3154 {
3155 if (info->port < 0)
3156 return;
3157
3158 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
3159 device_remove_file(&info->dev->persist->pdev->dev,
3160 &info->port_mtu_attr);
3161 devlink_port_type_clear(&info->devlink_port);
3162 devl_port_unregister(&info->devlink_port);
3163
3164 #ifdef CONFIG_RFS_ACCEL
3165 free_irq_cpu_rmap(info->rmap);
3166 info->rmap = NULL;
3167 #endif
3168 }
3169
mlx4_init_steering(struct mlx4_dev * dev)3170 static int mlx4_init_steering(struct mlx4_dev *dev)
3171 {
3172 struct mlx4_priv *priv = mlx4_priv(dev);
3173 int num_entries = dev->caps.num_ports;
3174 int i, j;
3175
3176 priv->steer = kzalloc_objs(struct mlx4_steer, num_entries);
3177 if (!priv->steer)
3178 return -ENOMEM;
3179
3180 for (i = 0; i < num_entries; i++)
3181 for (j = 0; j < MLX4_NUM_STEERS; j++) {
3182 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
3183 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
3184 }
3185 return 0;
3186 }
3187
mlx4_clear_steering(struct mlx4_dev * dev)3188 static void mlx4_clear_steering(struct mlx4_dev *dev)
3189 {
3190 struct mlx4_priv *priv = mlx4_priv(dev);
3191 struct mlx4_steer_index *entry, *tmp_entry;
3192 struct mlx4_promisc_qp *pqp, *tmp_pqp;
3193 int num_entries = dev->caps.num_ports;
3194 int i, j;
3195
3196 for (i = 0; i < num_entries; i++) {
3197 for (j = 0; j < MLX4_NUM_STEERS; j++) {
3198 list_for_each_entry_safe(pqp, tmp_pqp,
3199 &priv->steer[i].promisc_qps[j],
3200 list) {
3201 list_del(&pqp->list);
3202 kfree(pqp);
3203 }
3204 list_for_each_entry_safe(entry, tmp_entry,
3205 &priv->steer[i].steer_entries[j],
3206 list) {
3207 list_del(&entry->list);
3208 list_for_each_entry_safe(pqp, tmp_pqp,
3209 &entry->duplicates,
3210 list) {
3211 list_del(&pqp->list);
3212 kfree(pqp);
3213 }
3214 kfree(entry);
3215 }
3216 }
3217 }
3218 kfree(priv->steer);
3219 }
3220
extended_func_num(struct pci_dev * pdev)3221 static int extended_func_num(struct pci_dev *pdev)
3222 {
3223 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
3224 }
3225
3226 #define MLX4_OWNER_BASE 0x8069c
3227 #define MLX4_OWNER_SIZE 4
3228
mlx4_get_ownership(struct mlx4_dev * dev)3229 static int mlx4_get_ownership(struct mlx4_dev *dev)
3230 {
3231 void __iomem *owner;
3232 u32 ret;
3233
3234 if (pci_channel_offline(dev->persist->pdev))
3235 return -EIO;
3236
3237 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3238 MLX4_OWNER_BASE,
3239 MLX4_OWNER_SIZE);
3240 if (!owner) {
3241 mlx4_err(dev, "Failed to obtain ownership bit\n");
3242 return -ENOMEM;
3243 }
3244
3245 ret = readl(owner);
3246 iounmap(owner);
3247 return (int) !!ret;
3248 }
3249
mlx4_free_ownership(struct mlx4_dev * dev)3250 static void mlx4_free_ownership(struct mlx4_dev *dev)
3251 {
3252 void __iomem *owner;
3253
3254 if (pci_channel_offline(dev->persist->pdev))
3255 return;
3256
3257 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) +
3258 MLX4_OWNER_BASE,
3259 MLX4_OWNER_SIZE);
3260 if (!owner) {
3261 mlx4_err(dev, "Failed to obtain ownership bit\n");
3262 return;
3263 }
3264 writel(0, owner);
3265 msleep(1000);
3266 iounmap(owner);
3267 }
3268
3269 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\
3270 !!((flags) & MLX4_FLAG_MASTER))
3271
mlx4_enable_sriov(struct mlx4_dev * dev,struct pci_dev * pdev,u8 total_vfs,int existing_vfs,int reset_flow)3272 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev,
3273 u8 total_vfs, int existing_vfs, int reset_flow)
3274 {
3275 u64 dev_flags = dev->flags;
3276 int err = 0;
3277 int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev),
3278 MLX4_MAX_NUM_VF);
3279
3280 if (reset_flow) {
3281 dev->dev_vfs = kzalloc_objs(*dev->dev_vfs, total_vfs);
3282 if (!dev->dev_vfs)
3283 goto free_mem;
3284 return dev_flags;
3285 }
3286
3287 atomic_inc(&pf_loading);
3288 if (dev->flags & MLX4_FLAG_SRIOV) {
3289 if (existing_vfs != total_vfs) {
3290 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n",
3291 existing_vfs, total_vfs);
3292 total_vfs = existing_vfs;
3293 }
3294 }
3295
3296 dev->dev_vfs = kzalloc_objs(*dev->dev_vfs, total_vfs);
3297 if (NULL == dev->dev_vfs) {
3298 mlx4_err(dev, "Failed to allocate memory for VFs\n");
3299 goto disable_sriov;
3300 }
3301
3302 if (!(dev->flags & MLX4_FLAG_SRIOV)) {
3303 if (total_vfs > fw_enabled_sriov_vfs) {
3304 mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n",
3305 total_vfs, fw_enabled_sriov_vfs);
3306 err = -ENOMEM;
3307 goto disable_sriov;
3308 }
3309 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs);
3310 err = pci_enable_sriov(pdev, total_vfs);
3311 }
3312 if (err) {
3313 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
3314 err);
3315 goto disable_sriov;
3316 } else {
3317 mlx4_warn(dev, "Running in master mode\n");
3318 dev_flags |= MLX4_FLAG_SRIOV |
3319 MLX4_FLAG_MASTER;
3320 dev_flags &= ~MLX4_FLAG_SLAVE;
3321 dev->persist->num_vfs = total_vfs;
3322 }
3323 return dev_flags;
3324
3325 disable_sriov:
3326 atomic_dec(&pf_loading);
3327 free_mem:
3328 dev->persist->num_vfs = 0;
3329 kfree(dev->dev_vfs);
3330 dev->dev_vfs = NULL;
3331 return dev_flags & ~MLX4_FLAG_MASTER;
3332 }
3333
3334 enum {
3335 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1,
3336 };
3337
mlx4_check_dev_cap(struct mlx4_dev * dev,struct mlx4_dev_cap * dev_cap,int * nvfs)3338 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
3339 int *nvfs)
3340 {
3341 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2];
3342 /* Checking for 64 VFs as a limitation of CX2 */
3343 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) &&
3344 requested_vfs >= 64) {
3345 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n",
3346 requested_vfs);
3347 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64;
3348 }
3349 return 0;
3350 }
3351
mlx4_pci_enable_device(struct mlx4_dev * dev)3352 static int mlx4_pci_enable_device(struct mlx4_dev *dev)
3353 {
3354 struct pci_dev *pdev = dev->persist->pdev;
3355 int err = 0;
3356
3357 mutex_lock(&dev->persist->pci_status_mutex);
3358 if (dev->persist->pci_status == MLX4_PCI_STATUS_DISABLED) {
3359 err = pci_enable_device(pdev);
3360 if (!err)
3361 dev->persist->pci_status = MLX4_PCI_STATUS_ENABLED;
3362 }
3363 mutex_unlock(&dev->persist->pci_status_mutex);
3364
3365 return err;
3366 }
3367
mlx4_pci_disable_device(struct mlx4_dev * dev)3368 static void mlx4_pci_disable_device(struct mlx4_dev *dev)
3369 {
3370 struct pci_dev *pdev = dev->persist->pdev;
3371
3372 mutex_lock(&dev->persist->pci_status_mutex);
3373 if (dev->persist->pci_status == MLX4_PCI_STATUS_ENABLED) {
3374 pci_disable_device(pdev);
3375 dev->persist->pci_status = MLX4_PCI_STATUS_DISABLED;
3376 }
3377 mutex_unlock(&dev->persist->pci_status_mutex);
3378 }
3379
mlx4_load_one(struct pci_dev * pdev,int pci_dev_data,int total_vfs,int * nvfs,struct mlx4_priv * priv,int reset_flow)3380 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
3381 int total_vfs, int *nvfs, struct mlx4_priv *priv,
3382 int reset_flow)
3383 {
3384 struct devlink *devlink = priv_to_devlink(priv);
3385 struct mlx4_dev *dev;
3386 unsigned sum = 0;
3387 int err;
3388 int port;
3389 int i;
3390 struct mlx4_dev_cap *dev_cap = NULL;
3391 int existing_vfs = 0;
3392
3393 devl_assert_locked(devlink);
3394 dev = &priv->dev;
3395
3396 err = mlx4_adev_init(dev);
3397 if (err)
3398 return err;
3399
3400 ATOMIC_INIT_NOTIFIER_HEAD(&priv->event_nh);
3401
3402 mutex_init(&priv->port_mutex);
3403 mutex_init(&priv->bond_mutex);
3404
3405 INIT_LIST_HEAD(&priv->pgdir_list);
3406 mutex_init(&priv->pgdir_mutex);
3407 spin_lock_init(&priv->cmd.context_lock);
3408
3409 INIT_LIST_HEAD(&priv->bf_list);
3410 mutex_init(&priv->bf_mutex);
3411
3412 dev->rev_id = pdev->revision;
3413 dev->numa_node = dev_to_node(&pdev->dev);
3414
3415 /* Detect if this device is a virtual function */
3416 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3417 mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
3418 dev->flags |= MLX4_FLAG_SLAVE;
3419 } else {
3420 /* We reset the device and enable SRIOV only for physical
3421 * devices. Try to claim ownership on the device;
3422 * if already taken, skip -- do not allow multiple PFs */
3423 err = mlx4_get_ownership(dev);
3424 if (err) {
3425 if (err < 0)
3426 goto err_adev;
3427 else {
3428 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
3429 err = -EINVAL;
3430 goto err_adev;
3431 }
3432 }
3433
3434 atomic_set(&priv->opreq_count, 0);
3435 INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
3436
3437 /*
3438 * Now reset the HCA before we touch the PCI capabilities or
3439 * attempt a firmware command, since a boot ROM may have left
3440 * the HCA in an undefined state.
3441 */
3442 err = mlx4_reset(dev);
3443 if (err) {
3444 mlx4_err(dev, "Failed to reset HCA, aborting\n");
3445 goto err_sriov;
3446 }
3447
3448 if (total_vfs) {
3449 dev->flags = MLX4_FLAG_MASTER;
3450 existing_vfs = pci_num_vf(pdev);
3451 if (existing_vfs)
3452 dev->flags |= MLX4_FLAG_SRIOV;
3453 dev->persist->num_vfs = total_vfs;
3454 }
3455 }
3456
3457 /* on load remove any previous indication of internal error,
3458 * device is up.
3459 */
3460 dev->persist->state = MLX4_DEVICE_STATE_UP;
3461
3462 slave_start:
3463 err = mlx4_cmd_init(dev);
3464 if (err) {
3465 mlx4_err(dev, "Failed to init command interface, aborting\n");
3466 goto err_sriov;
3467 }
3468
3469 /* In slave functions, the communication channel must be initialized
3470 * before posting commands. Also, init num_slaves before calling
3471 * mlx4_init_hca */
3472 if (mlx4_is_mfunc(dev)) {
3473 if (mlx4_is_master(dev)) {
3474 dev->num_slaves = MLX4_MAX_NUM_SLAVES;
3475
3476 } else {
3477 dev->num_slaves = 0;
3478 err = mlx4_multi_func_init(dev);
3479 if (err) {
3480 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
3481 goto err_cmd;
3482 }
3483 }
3484 }
3485
3486 err = mlx4_init_fw(dev);
3487 if (err) {
3488 mlx4_err(dev, "Failed to init fw, aborting.\n");
3489 goto err_mfunc;
3490 }
3491
3492 if (mlx4_is_master(dev)) {
3493 /* when we hit the goto slave_start below, dev_cap already initialized */
3494 if (!dev_cap) {
3495 dev_cap = kzalloc_obj(*dev_cap);
3496
3497 if (!dev_cap) {
3498 err = -ENOMEM;
3499 goto err_fw;
3500 }
3501
3502 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3503 if (err) {
3504 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3505 goto err_fw;
3506 }
3507
3508 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3509 goto err_fw;
3510
3511 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3512 u64 dev_flags = mlx4_enable_sriov(dev, pdev,
3513 total_vfs,
3514 existing_vfs,
3515 reset_flow);
3516
3517 mlx4_close_fw(dev);
3518 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3519 dev->flags = dev_flags;
3520 if (!SRIOV_VALID_STATE(dev->flags)) {
3521 mlx4_err(dev, "Invalid SRIOV state\n");
3522 goto err_sriov;
3523 }
3524 err = mlx4_reset(dev);
3525 if (err) {
3526 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
3527 goto err_sriov;
3528 }
3529 goto slave_start;
3530 }
3531 } else {
3532 /* Legacy mode FW requires SRIOV to be enabled before
3533 * doing QUERY_DEV_CAP, since max_eq's value is different if
3534 * SRIOV is enabled.
3535 */
3536 memset(dev_cap, 0, sizeof(*dev_cap));
3537 err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
3538 if (err) {
3539 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
3540 goto err_fw;
3541 }
3542
3543 if (mlx4_check_dev_cap(dev, dev_cap, nvfs))
3544 goto err_fw;
3545 }
3546 }
3547
3548 err = mlx4_init_hca(dev);
3549 if (err) {
3550 if (err == -EACCES) {
3551 /* Not primary Physical function
3552 * Running in slave mode */
3553 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3554 /* We're not a PF */
3555 if (dev->flags & MLX4_FLAG_SRIOV) {
3556 if (!existing_vfs)
3557 pci_disable_sriov(pdev);
3558 if (mlx4_is_master(dev) && !reset_flow)
3559 atomic_dec(&pf_loading);
3560 dev->flags &= ~MLX4_FLAG_SRIOV;
3561 }
3562 if (!mlx4_is_slave(dev))
3563 mlx4_free_ownership(dev);
3564 dev->flags |= MLX4_FLAG_SLAVE;
3565 dev->flags &= ~MLX4_FLAG_MASTER;
3566 goto slave_start;
3567 } else
3568 goto err_fw;
3569 }
3570
3571 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) {
3572 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs,
3573 existing_vfs, reset_flow);
3574
3575 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) {
3576 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR);
3577 dev->flags = dev_flags;
3578 err = mlx4_cmd_init(dev);
3579 if (err) {
3580 /* Only VHCR is cleaned up, so could still
3581 * send FW commands
3582 */
3583 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n");
3584 goto err_close;
3585 }
3586 } else {
3587 dev->flags = dev_flags;
3588 }
3589
3590 if (!SRIOV_VALID_STATE(dev->flags)) {
3591 mlx4_err(dev, "Invalid SRIOV state\n");
3592 err = -EINVAL;
3593 goto err_close;
3594 }
3595 }
3596
3597 /* check if the device is functioning at its maximum possible speed.
3598 * No return code for this call, just warn the user in case of PCI
3599 * express device capabilities are under-satisfied by the bus.
3600 */
3601 if (!mlx4_is_slave(dev))
3602 pcie_print_link_status(dev->persist->pdev);
3603
3604 /* In master functions, the communication channel must be initialized
3605 * after obtaining its address from fw */
3606 if (mlx4_is_master(dev)) {
3607 if (dev->caps.num_ports < 2 &&
3608 num_vfs_argc > 1) {
3609 err = -EINVAL;
3610 mlx4_err(dev,
3611 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n",
3612 dev->caps.num_ports);
3613 goto err_close;
3614 }
3615 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs));
3616
3617 for (i = 0;
3618 i < sizeof(dev->persist->nvfs)/
3619 sizeof(dev->persist->nvfs[0]); i++) {
3620 unsigned j;
3621
3622 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) {
3623 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1;
3624 dev->dev_vfs[sum].n_ports = i < 2 ? 1 :
3625 dev->caps.num_ports;
3626 }
3627 }
3628
3629 /* In master functions, the communication channel
3630 * must be initialized after obtaining its address from fw
3631 */
3632 err = mlx4_multi_func_init(dev);
3633 if (err) {
3634 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n");
3635 goto err_close;
3636 }
3637 }
3638
3639 err = mlx4_alloc_eq_table(dev);
3640 if (err)
3641 goto err_master_mfunc;
3642
3643 bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
3644 mutex_init(&priv->msix_ctl.pool_lock);
3645
3646 mlx4_enable_msi_x(dev);
3647 if ((mlx4_is_mfunc(dev)) &&
3648 !(dev->flags & MLX4_FLAG_MSI_X)) {
3649 err = -EOPNOTSUPP;
3650 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
3651 goto err_free_eq;
3652 }
3653
3654 if (!mlx4_is_slave(dev)) {
3655 err = mlx4_init_steering(dev);
3656 if (err)
3657 goto err_disable_msix;
3658 }
3659
3660 mlx4_init_quotas(dev);
3661
3662 err = mlx4_setup_hca(dev);
3663 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
3664 !mlx4_is_mfunc(dev)) {
3665 dev->flags &= ~MLX4_FLAG_MSI_X;
3666 dev->caps.num_comp_vectors = 1;
3667 pci_disable_msix(pdev);
3668 err = mlx4_setup_hca(dev);
3669 }
3670
3671 if (err)
3672 goto err_steer;
3673
3674 /* When PF resources are ready arm its comm channel to enable
3675 * getting commands
3676 */
3677 if (mlx4_is_master(dev)) {
3678 err = mlx4_ARM_COMM_CHANNEL(dev);
3679 if (err) {
3680 mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
3681 err);
3682 goto err_steer;
3683 }
3684 }
3685
3686 for (port = 1; port <= dev->caps.num_ports; port++) {
3687 err = mlx4_init_port_info(dev, port);
3688 if (err)
3689 goto err_port;
3690 }
3691
3692 priv->v2p.port1 = 1;
3693 priv->v2p.port2 = 2;
3694
3695 err = mlx4_register_device(dev);
3696 if (err)
3697 goto err_port;
3698
3699 mlx4_sense_init(dev);
3700 mlx4_start_sense(dev);
3701
3702 priv->removed = 0;
3703
3704 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3705 atomic_dec(&pf_loading);
3706
3707 kfree(dev_cap);
3708 return 0;
3709
3710 err_port:
3711 for (--port; port >= 1; --port)
3712 mlx4_cleanup_port_info(&priv->port[port]);
3713
3714 mlx4_cleanup_default_counters(dev);
3715 if (!mlx4_is_slave(dev))
3716 mlx4_cleanup_counters_table(dev);
3717 mlx4_cleanup_qp_table(dev);
3718 mlx4_cleanup_srq_table(dev);
3719 mlx4_cleanup_cq_table(dev);
3720 mlx4_cmd_use_polling(dev);
3721 mlx4_cleanup_eq_table(dev);
3722 mlx4_cleanup_mcg_table(dev);
3723 mlx4_cleanup_mr_table(dev);
3724 mlx4_cleanup_xrcd_table(dev);
3725 mlx4_cleanup_pd_table(dev);
3726 mlx4_cleanup_uar_table(dev);
3727
3728 err_steer:
3729 if (!mlx4_is_slave(dev))
3730 mlx4_clear_steering(dev);
3731
3732 err_disable_msix:
3733 if (dev->flags & MLX4_FLAG_MSI_X)
3734 pci_disable_msix(pdev);
3735
3736 err_free_eq:
3737 mlx4_free_eq_table(dev);
3738
3739 err_master_mfunc:
3740 if (mlx4_is_master(dev)) {
3741 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY);
3742 mlx4_multi_func_cleanup(dev);
3743 }
3744
3745 if (mlx4_is_slave(dev))
3746 mlx4_slave_destroy_special_qp_cap(dev);
3747
3748 err_close:
3749 mlx4_close_hca(dev);
3750
3751 err_fw:
3752 mlx4_close_fw(dev);
3753
3754 err_mfunc:
3755 if (mlx4_is_slave(dev))
3756 mlx4_multi_func_cleanup(dev);
3757
3758 err_cmd:
3759 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
3760
3761 err_sriov:
3762 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) {
3763 pci_disable_sriov(pdev);
3764 dev->flags &= ~MLX4_FLAG_SRIOV;
3765 }
3766
3767 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow)
3768 atomic_dec(&pf_loading);
3769
3770 kfree(priv->dev.dev_vfs);
3771
3772 if (!mlx4_is_slave(dev))
3773 mlx4_free_ownership(dev);
3774
3775 kfree(dev_cap);
3776
3777 err_adev:
3778 mlx4_adev_cleanup(dev);
3779 return err;
3780 }
3781
__mlx4_init_one(struct pci_dev * pdev,int pci_dev_data,struct mlx4_priv * priv)3782 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data,
3783 struct mlx4_priv *priv)
3784 {
3785 int err;
3786 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3787 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0};
3788 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = {
3789 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} };
3790 unsigned total_vfs = 0;
3791 unsigned int i;
3792
3793 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
3794
3795 err = mlx4_pci_enable_device(&priv->dev);
3796 if (err) {
3797 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
3798 return err;
3799 }
3800
3801 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS
3802 * per port, we must limit the number of VFs to 63 (since their are
3803 * 128 MACs)
3804 */
3805 for (i = 0; i < ARRAY_SIZE(nvfs) && i < num_vfs_argc;
3806 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) {
3807 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i];
3808 if (nvfs[i] < 0) {
3809 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n");
3810 err = -EINVAL;
3811 goto err_disable_pdev;
3812 }
3813 }
3814 for (i = 0; i < ARRAY_SIZE(prb_vf) && i < probe_vfs_argc;
3815 i++) {
3816 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i];
3817 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) {
3818 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n");
3819 err = -EINVAL;
3820 goto err_disable_pdev;
3821 }
3822 }
3823 if (total_vfs > MLX4_MAX_NUM_VF) {
3824 dev_err(&pdev->dev,
3825 "Requested more VF's (%d) than allowed by hw (%d)\n",
3826 total_vfs, MLX4_MAX_NUM_VF);
3827 err = -EINVAL;
3828 goto err_disable_pdev;
3829 }
3830
3831 for (i = 0; i < MLX4_MAX_PORTS; i++) {
3832 if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) {
3833 dev_err(&pdev->dev,
3834 "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n",
3835 nvfs[i] + nvfs[2], i + 1,
3836 MLX4_MAX_NUM_VF_P_PORT);
3837 err = -EINVAL;
3838 goto err_disable_pdev;
3839 }
3840 }
3841
3842 /* Check for BARs. */
3843 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
3844 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3845 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
3846 pci_dev_data, pci_resource_flags(pdev, 0));
3847 err = -ENODEV;
3848 goto err_disable_pdev;
3849 }
3850 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
3851 dev_err(&pdev->dev, "Missing UAR, aborting\n");
3852 err = -ENODEV;
3853 goto err_disable_pdev;
3854 }
3855
3856 err = pci_request_regions(pdev, DRV_NAME);
3857 if (err) {
3858 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
3859 goto err_disable_pdev;
3860 }
3861
3862 pci_set_master(pdev);
3863
3864 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3865 if (err) {
3866 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
3867 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3868 if (err) {
3869 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
3870 goto err_release_regions;
3871 }
3872 }
3873
3874 /* Allow large DMA segments, up to the firmware limit of 1 GB */
3875 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
3876 /* Detect if this device is a virtual function */
3877 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
3878 /* When acting as pf, we normally skip vfs unless explicitly
3879 * requested to probe them.
3880 */
3881 if (total_vfs) {
3882 unsigned vfs_offset = 0;
3883
3884 for (i = 0; i < ARRAY_SIZE(nvfs) &&
3885 vfs_offset + nvfs[i] < extended_func_num(pdev);
3886 vfs_offset += nvfs[i], i++)
3887 ;
3888 if (i == ARRAY_SIZE(nvfs)) {
3889 err = -ENODEV;
3890 goto err_release_regions;
3891 }
3892 if ((extended_func_num(pdev) - vfs_offset)
3893 > prb_vf[i]) {
3894 dev_warn(&pdev->dev, "Skipping virtual function:%d\n",
3895 extended_func_num(pdev));
3896 err = -ENODEV;
3897 goto err_release_regions;
3898 }
3899 }
3900 }
3901
3902 err = mlx4_crdump_init(&priv->dev);
3903 if (err)
3904 goto err_release_regions;
3905
3906 err = mlx4_catas_init(&priv->dev);
3907 if (err)
3908 goto err_crdump;
3909
3910 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0);
3911 if (err)
3912 goto err_catas;
3913
3914 return 0;
3915
3916 err_catas:
3917 mlx4_catas_end(&priv->dev);
3918
3919 err_crdump:
3920 mlx4_crdump_end(&priv->dev);
3921
3922 err_release_regions:
3923 pci_release_regions(pdev);
3924
3925 err_disable_pdev:
3926 mlx4_pci_disable_device(&priv->dev);
3927 return err;
3928 }
3929
mlx4_devlink_param_load_driverinit_values(struct devlink * devlink)3930 static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink)
3931 {
3932 struct mlx4_priv *priv = devlink_priv(devlink);
3933 struct mlx4_dev *dev = &priv->dev;
3934 struct mlx4_fw_crdump *crdump = &dev->persist->crdump;
3935 union devlink_param_value saved_value;
3936 int err;
3937
3938 err = devl_param_driverinit_value_get(devlink,
3939 DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
3940 &saved_value);
3941 if (!err && mlx4_internal_err_reset != saved_value.vbool) {
3942 mlx4_internal_err_reset = saved_value.vbool;
3943 /* Notify on value changed on runtime configuration mode */
3944 devl_param_value_changed(devlink,
3945 DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET);
3946 }
3947 err = devl_param_driverinit_value_get(devlink,
3948 DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
3949 &saved_value);
3950 if (!err)
3951 log_num_mac = order_base_2(saved_value.vu32);
3952 err = devl_param_driverinit_value_get(devlink,
3953 MLX4_DEVLINK_PARAM_ID_ENABLE_64B_CQE_EQE,
3954 &saved_value);
3955 if (!err)
3956 enable_64b_cqe_eqe = saved_value.vbool;
3957 err = devl_param_driverinit_value_get(devlink,
3958 MLX4_DEVLINK_PARAM_ID_ENABLE_4K_UAR,
3959 &saved_value);
3960 if (!err)
3961 enable_4k_uar = saved_value.vbool;
3962 err = devl_param_driverinit_value_get(devlink,
3963 DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
3964 &saved_value);
3965 if (!err && crdump->snapshot_enable != saved_value.vbool) {
3966 crdump->snapshot_enable = saved_value.vbool;
3967 devl_param_value_changed(devlink,
3968 DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT);
3969 }
3970 }
3971
3972 static void mlx4_restart_one_down(struct pci_dev *pdev);
3973 static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload,
3974 struct devlink *devlink);
3975
mlx4_devlink_reload_down(struct devlink * devlink,bool netns_change,enum devlink_reload_action action,enum devlink_reload_limit limit,struct netlink_ext_ack * extack)3976 static int mlx4_devlink_reload_down(struct devlink *devlink, bool netns_change,
3977 enum devlink_reload_action action,
3978 enum devlink_reload_limit limit,
3979 struct netlink_ext_ack *extack)
3980 {
3981 struct mlx4_priv *priv = devlink_priv(devlink);
3982 struct mlx4_dev *dev = &priv->dev;
3983 struct mlx4_dev_persistent *persist = dev->persist;
3984
3985 if (netns_change) {
3986 NL_SET_ERR_MSG_MOD(extack, "Namespace change is not supported");
3987 return -EOPNOTSUPP;
3988 }
3989 if (persist->num_vfs)
3990 mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
3991 mlx4_restart_one_down(persist->pdev);
3992 return 0;
3993 }
3994
mlx4_devlink_reload_up(struct devlink * devlink,enum devlink_reload_action action,enum devlink_reload_limit limit,u32 * actions_performed,struct netlink_ext_ack * extack)3995 static int mlx4_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
3996 enum devlink_reload_limit limit, u32 *actions_performed,
3997 struct netlink_ext_ack *extack)
3998 {
3999 struct mlx4_priv *priv = devlink_priv(devlink);
4000 struct mlx4_dev *dev = &priv->dev;
4001 struct mlx4_dev_persistent *persist = dev->persist;
4002 int err;
4003
4004 *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
4005 err = mlx4_restart_one_up(persist->pdev, true, devlink);
4006 if (err)
4007 mlx4_err(persist->dev, "mlx4_restart_one_up failed, ret=%d\n",
4008 err);
4009
4010 return err;
4011 }
4012
4013 static const struct devlink_ops mlx4_devlink_ops = {
4014 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
4015 .reload_down = mlx4_devlink_reload_down,
4016 .reload_up = mlx4_devlink_reload_up,
4017 };
4018
mlx4_init_one(struct pci_dev * pdev,const struct pci_device_id * id)4019 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
4020 {
4021 struct devlink *devlink;
4022 struct mlx4_priv *priv;
4023 struct mlx4_dev *dev;
4024 int ret;
4025
4026 printk_once(KERN_INFO "%s", mlx4_version);
4027
4028 devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev);
4029 if (!devlink)
4030 return -ENOMEM;
4031 devl_lock(devlink);
4032 priv = devlink_priv(devlink);
4033
4034 dev = &priv->dev;
4035 dev->persist = kzalloc_obj(*dev->persist);
4036 if (!dev->persist) {
4037 ret = -ENOMEM;
4038 goto err_devlink_free;
4039 }
4040 dev->persist->pdev = pdev;
4041 dev->persist->dev = dev;
4042 pci_set_drvdata(pdev, dev->persist);
4043 priv->pci_dev_data = id->driver_data;
4044 mutex_init(&dev->persist->device_state_mutex);
4045 mutex_init(&dev->persist->interface_state_mutex);
4046 mutex_init(&dev->persist->pci_status_mutex);
4047
4048 ret = devl_params_register(devlink, mlx4_devlink_params,
4049 ARRAY_SIZE(mlx4_devlink_params));
4050 if (ret)
4051 goto err_devlink_unregister;
4052 mlx4_devlink_set_params_init_values(devlink);
4053 ret = __mlx4_init_one(pdev, id->driver_data, priv);
4054 if (ret)
4055 goto err_params_unregister;
4056
4057 pci_save_state(pdev);
4058 devl_unlock(devlink);
4059 devlink_register(devlink);
4060 return 0;
4061
4062 err_params_unregister:
4063 devl_params_unregister(devlink, mlx4_devlink_params,
4064 ARRAY_SIZE(mlx4_devlink_params));
4065 err_devlink_unregister:
4066 kfree(dev->persist);
4067 err_devlink_free:
4068 devl_unlock(devlink);
4069 devlink_free(devlink);
4070 return ret;
4071 }
4072
mlx4_clean_dev(struct mlx4_dev * dev)4073 static void mlx4_clean_dev(struct mlx4_dev *dev)
4074 {
4075 struct mlx4_dev_persistent *persist = dev->persist;
4076 struct mlx4_priv *priv = mlx4_priv(dev);
4077 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS);
4078
4079 memset(priv, 0, sizeof(*priv));
4080 priv->dev.persist = persist;
4081 priv->dev.flags = flags;
4082 }
4083
mlx4_unload_one(struct pci_dev * pdev)4084 static void mlx4_unload_one(struct pci_dev *pdev)
4085 {
4086 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4087 struct mlx4_dev *dev = persist->dev;
4088 struct mlx4_priv *priv = mlx4_priv(dev);
4089 int pci_dev_data;
4090 struct devlink *devlink;
4091 int p, i;
4092
4093 devlink = priv_to_devlink(priv);
4094 devl_assert_locked(devlink);
4095 if (priv->removed)
4096 return;
4097
4098 /* saving current ports type for further use */
4099 for (i = 0; i < dev->caps.num_ports; i++) {
4100 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1];
4101 dev->persist->curr_port_poss_type[i] = dev->caps.
4102 possible_type[i + 1];
4103 }
4104
4105 pci_dev_data = priv->pci_dev_data;
4106
4107 mlx4_stop_sense(dev);
4108 mlx4_unregister_device(dev);
4109
4110 for (p = 1; p <= dev->caps.num_ports; p++) {
4111 mlx4_cleanup_port_info(&priv->port[p]);
4112 mlx4_CLOSE_PORT(dev, p);
4113 }
4114
4115 if (mlx4_is_master(dev))
4116 mlx4_free_resource_tracker(dev,
4117 RES_TR_FREE_SLAVES_ONLY);
4118
4119 mlx4_cleanup_default_counters(dev);
4120 if (!mlx4_is_slave(dev))
4121 mlx4_cleanup_counters_table(dev);
4122 mlx4_cleanup_qp_table(dev);
4123 mlx4_cleanup_srq_table(dev);
4124 mlx4_cleanup_cq_table(dev);
4125 mlx4_cmd_use_polling(dev);
4126 mlx4_cleanup_eq_table(dev);
4127 mlx4_cleanup_mcg_table(dev);
4128 mlx4_cleanup_mr_table(dev);
4129 mlx4_cleanup_xrcd_table(dev);
4130 mlx4_cleanup_pd_table(dev);
4131
4132 if (mlx4_is_master(dev))
4133 mlx4_free_resource_tracker(dev,
4134 RES_TR_FREE_STRUCTS_ONLY);
4135
4136 iounmap(priv->kar);
4137 mlx4_uar_free(dev, &priv->driver_uar);
4138 mlx4_cleanup_uar_table(dev);
4139 if (!mlx4_is_slave(dev))
4140 mlx4_clear_steering(dev);
4141 mlx4_free_eq_table(dev);
4142 if (mlx4_is_master(dev))
4143 mlx4_multi_func_cleanup(dev);
4144 mlx4_close_hca(dev);
4145 mlx4_close_fw(dev);
4146 if (mlx4_is_slave(dev))
4147 mlx4_multi_func_cleanup(dev);
4148 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
4149
4150 if (dev->flags & MLX4_FLAG_MSI_X)
4151 pci_disable_msix(pdev);
4152
4153 if (!mlx4_is_slave(dev))
4154 mlx4_free_ownership(dev);
4155
4156 mlx4_slave_destroy_special_qp_cap(dev);
4157 kfree(dev->dev_vfs);
4158
4159 mlx4_adev_cleanup(dev);
4160
4161 mlx4_clean_dev(dev);
4162 priv->pci_dev_data = pci_dev_data;
4163 priv->removed = 1;
4164 }
4165
mlx4_remove_one(struct pci_dev * pdev)4166 static void mlx4_remove_one(struct pci_dev *pdev)
4167 {
4168 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4169 struct mlx4_dev *dev = persist->dev;
4170 struct mlx4_priv *priv = mlx4_priv(dev);
4171 struct devlink *devlink = priv_to_devlink(priv);
4172 int active_vfs = 0;
4173
4174 devlink_unregister(devlink);
4175
4176 devl_lock(devlink);
4177 if (mlx4_is_slave(dev))
4178 persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
4179
4180 mutex_lock(&persist->interface_state_mutex);
4181 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION;
4182 mutex_unlock(&persist->interface_state_mutex);
4183
4184 /* Disabling SR-IOV is not allowed while there are active vf's */
4185 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) {
4186 active_vfs = mlx4_how_many_lives_vf(dev);
4187 if (active_vfs) {
4188 pr_warn("Removing PF when there are active VF's !!\n");
4189 pr_warn("Will not disable SR-IOV.\n");
4190 }
4191 }
4192
4193 /* device marked to be under deletion running now without the lock
4194 * letting other tasks to be terminated
4195 */
4196 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4197 mlx4_unload_one(pdev);
4198 else
4199 mlx4_info(dev, "%s: interface is down\n", __func__);
4200 mlx4_catas_end(dev);
4201 mlx4_crdump_end(dev);
4202 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) {
4203 mlx4_warn(dev, "Disabling SR-IOV\n");
4204 pci_disable_sriov(pdev);
4205 }
4206
4207 pci_release_regions(pdev);
4208 mlx4_pci_disable_device(dev);
4209 devl_params_unregister(devlink, mlx4_devlink_params,
4210 ARRAY_SIZE(mlx4_devlink_params));
4211 kfree(dev->persist);
4212 devl_unlock(devlink);
4213 devlink_free(devlink);
4214 }
4215
restore_current_port_types(struct mlx4_dev * dev,enum mlx4_port_type * types,enum mlx4_port_type * poss_types)4216 static int restore_current_port_types(struct mlx4_dev *dev,
4217 enum mlx4_port_type *types,
4218 enum mlx4_port_type *poss_types)
4219 {
4220 struct mlx4_priv *priv = mlx4_priv(dev);
4221 int err, i;
4222
4223 mlx4_stop_sense(dev);
4224
4225 mutex_lock(&priv->port_mutex);
4226 for (i = 0; i < dev->caps.num_ports; i++)
4227 dev->caps.possible_type[i + 1] = poss_types[i];
4228 err = mlx4_change_port_types(dev, types);
4229 mlx4_start_sense(dev);
4230 mutex_unlock(&priv->port_mutex);
4231
4232 return err;
4233 }
4234
mlx4_restart_one_down(struct pci_dev * pdev)4235 static void mlx4_restart_one_down(struct pci_dev *pdev)
4236 {
4237 mlx4_unload_one(pdev);
4238 }
4239
mlx4_restart_one_up(struct pci_dev * pdev,bool reload,struct devlink * devlink)4240 static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload,
4241 struct devlink *devlink)
4242 {
4243 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4244 struct mlx4_dev *dev = persist->dev;
4245 struct mlx4_priv *priv = mlx4_priv(dev);
4246 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4247 int pci_dev_data, err, total_vfs;
4248
4249 pci_dev_data = priv->pci_dev_data;
4250 total_vfs = dev->persist->num_vfs;
4251 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4252
4253 if (reload)
4254 mlx4_devlink_param_load_driverinit_values(devlink);
4255 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
4256 if (err) {
4257 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n",
4258 __func__, pci_name(pdev), err);
4259 return err;
4260 }
4261
4262 err = restore_current_port_types(dev, dev->persist->curr_port_type,
4263 dev->persist->curr_port_poss_type);
4264 if (err)
4265 mlx4_err(dev, "could not restore original port types (%d)\n",
4266 err);
4267
4268 return err;
4269 }
4270
mlx4_restart_one(struct pci_dev * pdev)4271 int mlx4_restart_one(struct pci_dev *pdev)
4272 {
4273 mlx4_restart_one_down(pdev);
4274 return mlx4_restart_one_up(pdev, false, NULL);
4275 }
4276
4277 #define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT }
4278 #define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF }
4279 #define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 }
4280
4281 static const struct pci_device_id mlx4_pci_table[] = {
4282 #ifdef CONFIG_MLX4_CORE_GEN2
4283 /* MT25408 "Hermon" */
4284 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_SDR), /* SDR */
4285 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR), /* DDR */
4286 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR), /* QDR */
4287 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2), /* DDR Gen2 */
4288 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2), /* QDR Gen2 */
4289 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN), /* EN 10GigE */
4290 MLX_SP(PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2), /* EN 10GigE Gen2 */
4291 /* MT25458 ConnectX EN 10GBASE-T */
4292 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN),
4293 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2), /* Gen2 */
4294 /* MT26468 ConnectX EN 10GigE PCIe Gen2*/
4295 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2),
4296 /* MT26438 ConnectX EN 40GigE PCIe Gen2 5GT/s */
4297 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2),
4298 /* MT26478 ConnectX2 40GigE PCIe Gen2 */
4299 MLX_SP(PCI_DEVICE_ID_MELLANOX_CONNECTX2),
4300 /* MT25400 Family [ConnectX-2] */
4301 MLX_VF(0x1002), /* Virtual Function */
4302 #endif /* CONFIG_MLX4_CORE_GEN2 */
4303 /* MT27500 Family [ConnectX-3] */
4304 MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3),
4305 MLX_VF(0x1004), /* Virtual Function */
4306 MLX_GN(0x1005), /* MT27510 Family */
4307 MLX_GN(0x1006), /* MT27511 Family */
4308 MLX_GN(PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO), /* MT27520 Family */
4309 MLX_GN(0x1008), /* MT27521 Family */
4310 MLX_GN(0x1009), /* MT27530 Family */
4311 MLX_GN(0x100a), /* MT27531 Family */
4312 MLX_GN(0x100b), /* MT27540 Family */
4313 MLX_GN(0x100c), /* MT27541 Family */
4314 MLX_GN(0x100d), /* MT27550 Family */
4315 MLX_GN(0x100e), /* MT27551 Family */
4316 MLX_GN(0x100f), /* MT27560 Family */
4317 MLX_GN(0x1010), /* MT27561 Family */
4318
4319 /*
4320 * See the mellanox_check_broken_intx_masking() quirk when
4321 * adding devices
4322 */
4323
4324 { 0, }
4325 };
4326
4327 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
4328
mlx4_pci_err_detected(struct pci_dev * pdev,pci_channel_state_t state)4329 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
4330 pci_channel_state_t state)
4331 {
4332 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4333 struct mlx4_dev *dev = persist->dev;
4334 struct devlink *devlink;
4335
4336 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n");
4337 mlx4_enter_error_state(persist);
4338
4339 devlink = priv_to_devlink(mlx4_priv(dev));
4340 devl_lock(devlink);
4341 mutex_lock(&persist->interface_state_mutex);
4342 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4343 mlx4_unload_one(pdev);
4344
4345 mutex_unlock(&persist->interface_state_mutex);
4346 devl_unlock(devlink);
4347 if (state == pci_channel_io_perm_failure)
4348 return PCI_ERS_RESULT_DISCONNECT;
4349
4350 mlx4_pci_disable_device(persist->dev);
4351 return PCI_ERS_RESULT_NEED_RESET;
4352 }
4353
mlx4_pci_slot_reset(struct pci_dev * pdev)4354 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
4355 {
4356 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4357 struct mlx4_dev *dev = persist->dev;
4358 int err;
4359
4360 mlx4_err(dev, "mlx4_pci_slot_reset was called\n");
4361 err = mlx4_pci_enable_device(dev);
4362 if (err) {
4363 mlx4_err(dev, "Can not re-enable device, err=%d\n", err);
4364 return PCI_ERS_RESULT_DISCONNECT;
4365 }
4366
4367 pci_set_master(pdev);
4368 pci_restore_state(pdev);
4369 return PCI_ERS_RESULT_RECOVERED;
4370 }
4371
mlx4_pci_resume(struct pci_dev * pdev)4372 static void mlx4_pci_resume(struct pci_dev *pdev)
4373 {
4374 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4375 struct mlx4_dev *dev = persist->dev;
4376 struct mlx4_priv *priv = mlx4_priv(dev);
4377 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4378 struct devlink *devlink;
4379 int total_vfs;
4380 int err;
4381
4382 mlx4_err(dev, "%s was called\n", __func__);
4383 total_vfs = dev->persist->num_vfs;
4384 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4385
4386 devlink = priv_to_devlink(priv);
4387 devl_lock(devlink);
4388 mutex_lock(&persist->interface_state_mutex);
4389 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
4390 err = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs,
4391 priv, 1);
4392 if (err) {
4393 mlx4_err(dev, "%s: mlx4_load_one failed, err=%d\n",
4394 __func__, err);
4395 goto end;
4396 }
4397
4398 err = restore_current_port_types(dev, dev->persist->
4399 curr_port_type, dev->persist->
4400 curr_port_poss_type);
4401 if (err)
4402 mlx4_err(dev, "could not restore original port types (%d)\n", err);
4403 }
4404 end:
4405 mutex_unlock(&persist->interface_state_mutex);
4406 devl_unlock(devlink);
4407 }
4408
mlx4_shutdown(struct pci_dev * pdev)4409 static void mlx4_shutdown(struct pci_dev *pdev)
4410 {
4411 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4412 struct mlx4_dev *dev = persist->dev;
4413 struct devlink *devlink;
4414
4415 mlx4_info(persist->dev, "mlx4_shutdown was called\n");
4416 devlink = priv_to_devlink(mlx4_priv(dev));
4417 devl_lock(devlink);
4418 mutex_lock(&persist->interface_state_mutex);
4419 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4420 mlx4_unload_one(pdev);
4421 mutex_unlock(&persist->interface_state_mutex);
4422 devl_unlock(devlink);
4423 mlx4_pci_disable_device(dev);
4424 }
4425
4426 static const struct pci_error_handlers mlx4_err_handler = {
4427 .error_detected = mlx4_pci_err_detected,
4428 .slot_reset = mlx4_pci_slot_reset,
4429 .resume = mlx4_pci_resume,
4430 };
4431
mlx4_suspend(struct device * dev_d)4432 static int __maybe_unused mlx4_suspend(struct device *dev_d)
4433 {
4434 struct pci_dev *pdev = to_pci_dev(dev_d);
4435 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4436 struct mlx4_dev *dev = persist->dev;
4437 struct devlink *devlink;
4438
4439 mlx4_err(dev, "suspend was called\n");
4440 devlink = priv_to_devlink(mlx4_priv(dev));
4441 devl_lock(devlink);
4442 mutex_lock(&persist->interface_state_mutex);
4443 if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
4444 mlx4_unload_one(pdev);
4445 mutex_unlock(&persist->interface_state_mutex);
4446 devl_unlock(devlink);
4447
4448 return 0;
4449 }
4450
mlx4_resume(struct device * dev_d)4451 static int __maybe_unused mlx4_resume(struct device *dev_d)
4452 {
4453 struct pci_dev *pdev = to_pci_dev(dev_d);
4454 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
4455 struct mlx4_dev *dev = persist->dev;
4456 struct mlx4_priv *priv = mlx4_priv(dev);
4457 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0};
4458 struct devlink *devlink;
4459 int total_vfs;
4460 int ret = 0;
4461
4462 mlx4_err(dev, "resume was called\n");
4463 total_vfs = dev->persist->num_vfs;
4464 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
4465
4466 devlink = priv_to_devlink(priv);
4467 devl_lock(devlink);
4468 mutex_lock(&persist->interface_state_mutex);
4469 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) {
4470 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs,
4471 nvfs, priv, 1);
4472 if (!ret) {
4473 ret = restore_current_port_types(dev,
4474 dev->persist->curr_port_type,
4475 dev->persist->curr_port_poss_type);
4476 if (ret)
4477 mlx4_err(dev, "resume: could not restore original port types (%d)\n", ret);
4478 }
4479 }
4480 mutex_unlock(&persist->interface_state_mutex);
4481 devl_unlock(devlink);
4482
4483 return ret;
4484 }
4485
4486 static SIMPLE_DEV_PM_OPS(mlx4_pm_ops, mlx4_suspend, mlx4_resume);
4487
4488 static struct pci_driver mlx4_driver = {
4489 .name = DRV_NAME,
4490 .id_table = mlx4_pci_table,
4491 .probe = mlx4_init_one,
4492 .shutdown = mlx4_shutdown,
4493 .remove = mlx4_remove_one,
4494 .driver.pm = &mlx4_pm_ops,
4495 .err_handler = &mlx4_err_handler,
4496 };
4497
mlx4_verify_params(void)4498 static int __init mlx4_verify_params(void)
4499 {
4500 if (msi_x < 0) {
4501 pr_warn("mlx4_core: bad msi_x: %d\n", msi_x);
4502 return -1;
4503 }
4504
4505 if ((log_num_mac < 0) || (log_num_mac > 7)) {
4506 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
4507 return -1;
4508 }
4509
4510 if (log_num_vlan != 0)
4511 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
4512 MLX4_LOG_NUM_VLANS);
4513
4514 if (use_prio != 0)
4515 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
4516
4517 if ((log_mtts_per_seg < 0) || (log_mtts_per_seg > 7)) {
4518 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
4519 log_mtts_per_seg);
4520 return -1;
4521 }
4522
4523 /* Check if module param for ports type has legal combination */
4524 if (port_type_array[0] == false && port_type_array[1] == true) {
4525 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
4526 port_type_array[0] = true;
4527 }
4528
4529 if (mlx4_log_num_mgm_entry_size < -7 ||
4530 (mlx4_log_num_mgm_entry_size > 0 &&
4531 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
4532 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) {
4533 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n",
4534 mlx4_log_num_mgm_entry_size,
4535 MLX4_MIN_MGM_LOG_ENTRY_SIZE,
4536 MLX4_MAX_MGM_LOG_ENTRY_SIZE);
4537 return -1;
4538 }
4539
4540 return 0;
4541 }
4542
mlx4_init(void)4543 static int __init mlx4_init(void)
4544 {
4545 int ret;
4546
4547 WARN_ONCE(strcmp(MLX4_ADEV_NAME, KBUILD_MODNAME),
4548 "mlx4_core name not in sync with kernel module name");
4549
4550 if (mlx4_verify_params())
4551 return -EINVAL;
4552
4553
4554 mlx4_wq = create_singlethread_workqueue("mlx4");
4555 if (!mlx4_wq)
4556 return -ENOMEM;
4557
4558 ret = pci_register_driver(&mlx4_driver);
4559 if (ret < 0)
4560 destroy_workqueue(mlx4_wq);
4561 return ret < 0 ? ret : 0;
4562 }
4563
mlx4_cleanup(void)4564 static void __exit mlx4_cleanup(void)
4565 {
4566 pci_unregister_driver(&mlx4_driver);
4567 destroy_workqueue(mlx4_wq);
4568 }
4569
4570 module_init(mlx4_init);
4571 module_exit(mlx4_cleanup);
4572