1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/errno.h> 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/io-mapping.h> 43 #include <linux/delay.h> 44 45 #include <linux/mlx4/device.h> 46 #include <linux/mlx4/doorbell.h> 47 48 #include "mlx4.h" 49 #include "fw.h" 50 #include "icm.h" 51 52 MODULE_AUTHOR("Roland Dreier"); 53 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 54 MODULE_LICENSE("Dual BSD/GPL"); 55 MODULE_VERSION(DRV_VERSION); 56 57 struct workqueue_struct *mlx4_wq; 58 59 #ifdef CONFIG_MLX4_DEBUG 60 61 int mlx4_debug_level = 0; 62 module_param_named(debug_level, mlx4_debug_level, int, 0644); 63 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 64 65 #endif /* CONFIG_MLX4_DEBUG */ 66 67 #ifdef CONFIG_PCI_MSI 68 69 static int msi_x = 1; 70 module_param(msi_x, int, 0444); 71 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 72 73 #else /* CONFIG_PCI_MSI */ 74 75 #define msi_x (0) 76 77 #endif /* CONFIG_PCI_MSI */ 78 79 static int num_vfs; 80 module_param(num_vfs, int, 0444); 81 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0"); 82 83 static int probe_vf; 84 module_param(probe_vf, int, 0644); 85 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)"); 86 87 int mlx4_log_num_mgm_entry_size = 10; 88 module_param_named(log_num_mgm_entry_size, 89 mlx4_log_num_mgm_entry_size, int, 0444); 90 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 91 " of qp per mcg, for example:" 92 " 10 gives 248.range: 9<=" 93 " log_num_mgm_entry_size <= 12"); 94 95 #define MLX4_VF (1 << 0) 96 97 #define HCA_GLOBAL_CAP_MASK 0 98 #define PF_CONTEXT_BEHAVIOUR_MASK 0 99 100 static char mlx4_version[] __devinitdata = 101 DRV_NAME ": Mellanox ConnectX core driver v" 102 DRV_VERSION " (" DRV_RELDATE ")\n"; 103 104 static struct mlx4_profile default_profile = { 105 .num_qp = 1 << 18, 106 .num_srq = 1 << 16, 107 .rdmarc_per_qp = 1 << 4, 108 .num_cq = 1 << 16, 109 .num_mcg = 1 << 13, 110 .num_mpt = 1 << 19, 111 .num_mtt = 1 << 20, 112 }; 113 114 static int log_num_mac = 7; 115 module_param_named(log_num_mac, log_num_mac, int, 0444); 116 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 117 118 static int log_num_vlan; 119 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 120 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 121 /* Log2 max number of VLANs per ETH port (0-7) */ 122 #define MLX4_LOG_NUM_VLANS 7 123 124 static bool use_prio; 125 module_param_named(use_prio, use_prio, bool, 0444); 126 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 127 "(0/1, default 0)"); 128 129 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 130 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 131 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 132 133 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 134 static int arr_argc = 2; 135 module_param_array(port_type_array, int, &arr_argc, 0444); 136 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " 137 "1 for IB, 2 for Ethernet"); 138 139 struct mlx4_port_config { 140 struct list_head list; 141 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 142 struct pci_dev *pdev; 143 }; 144 145 static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev) 146 { 147 return dev->caps.reserved_eqs + 148 MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1); 149 } 150 151 int mlx4_check_port_params(struct mlx4_dev *dev, 152 enum mlx4_port_type *port_type) 153 { 154 int i; 155 156 for (i = 0; i < dev->caps.num_ports - 1; i++) { 157 if (port_type[i] != port_type[i + 1]) { 158 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 159 mlx4_err(dev, "Only same port types supported " 160 "on this HCA, aborting.\n"); 161 return -EINVAL; 162 } 163 if (port_type[i] == MLX4_PORT_TYPE_ETH && 164 port_type[i + 1] == MLX4_PORT_TYPE_IB) 165 return -EINVAL; 166 } 167 } 168 169 for (i = 0; i < dev->caps.num_ports; i++) { 170 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 171 mlx4_err(dev, "Requested port type for port %d is not " 172 "supported on this HCA\n", i + 1); 173 return -EINVAL; 174 } 175 } 176 return 0; 177 } 178 179 static void mlx4_set_port_mask(struct mlx4_dev *dev) 180 { 181 int i; 182 183 for (i = 1; i <= dev->caps.num_ports; ++i) 184 dev->caps.port_mask[i] = dev->caps.port_type[i]; 185 } 186 187 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 188 { 189 int err; 190 int i; 191 192 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 193 if (err) { 194 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 195 return err; 196 } 197 198 if (dev_cap->min_page_sz > PAGE_SIZE) { 199 mlx4_err(dev, "HCA minimum page size of %d bigger than " 200 "kernel PAGE_SIZE of %ld, aborting.\n", 201 dev_cap->min_page_sz, PAGE_SIZE); 202 return -ENODEV; 203 } 204 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 205 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 206 "aborting.\n", 207 dev_cap->num_ports, MLX4_MAX_PORTS); 208 return -ENODEV; 209 } 210 211 if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) { 212 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than " 213 "PCI resource 2 size of 0x%llx, aborting.\n", 214 dev_cap->uar_size, 215 (unsigned long long) pci_resource_len(dev->pdev, 2)); 216 return -ENODEV; 217 } 218 219 dev->caps.num_ports = dev_cap->num_ports; 220 for (i = 1; i <= dev->caps.num_ports; ++i) { 221 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 222 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; 223 dev->caps.gid_table_len[i] = dev_cap->max_gids[i]; 224 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i]; 225 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i]; 226 dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i]; 227 dev->caps.def_mac[i] = dev_cap->def_mac[i]; 228 dev->caps.supported_type[i] = dev_cap->supported_port_types[i]; 229 dev->caps.suggested_type[i] = dev_cap->suggested_type[i]; 230 dev->caps.default_sense[i] = dev_cap->default_sense[i]; 231 dev->caps.trans_type[i] = dev_cap->trans_type[i]; 232 dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i]; 233 dev->caps.wavelength[i] = dev_cap->wavelength[i]; 234 dev->caps.trans_code[i] = dev_cap->trans_code[i]; 235 } 236 237 dev->caps.uar_page_size = PAGE_SIZE; 238 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 239 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 240 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 241 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 242 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 243 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 244 dev->caps.max_wqes = dev_cap->max_qp_sz; 245 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 246 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 247 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 248 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 249 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 250 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 251 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 252 /* 253 * Subtract 1 from the limit because we need to allocate a 254 * spare CQE so the HCA HW can tell the difference between an 255 * empty CQ and a full CQ. 256 */ 257 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 258 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 259 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 260 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 261 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 262 263 /* The first 128 UARs are used for EQ doorbells */ 264 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars); 265 dev->caps.reserved_pds = dev_cap->reserved_pds; 266 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 267 dev_cap->reserved_xrcds : 0; 268 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 269 dev_cap->max_xrcds : 0; 270 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 271 272 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 273 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 274 dev->caps.flags = dev_cap->flags; 275 dev->caps.bmme_flags = dev_cap->bmme_flags; 276 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 277 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 278 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 279 280 /* Sense port always allowed on supported devices for ConnectX1 and 2 */ 281 if (dev->pdev->device != 0x1003) 282 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 283 284 dev->caps.log_num_macs = log_num_mac; 285 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 286 dev->caps.log_num_prios = use_prio ? 3 : 0; 287 288 for (i = 1; i <= dev->caps.num_ports; ++i) { 289 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 290 if (dev->caps.supported_type[i]) { 291 /* if only ETH is supported - assign ETH */ 292 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 293 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 294 /* if only IB is supported, 295 * assign IB only if SRIOV is off*/ 296 else if (dev->caps.supported_type[i] == 297 MLX4_PORT_TYPE_IB) { 298 if (dev->flags & MLX4_FLAG_SRIOV) 299 dev->caps.port_type[i] = 300 MLX4_PORT_TYPE_NONE; 301 else 302 dev->caps.port_type[i] = 303 MLX4_PORT_TYPE_IB; 304 /* if IB and ETH are supported, 305 * first of all check if SRIOV is on */ 306 } else if (dev->flags & MLX4_FLAG_SRIOV) 307 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 308 else { 309 /* In non-SRIOV mode, we set the port type 310 * according to user selection of port type, 311 * if usere selected none, take the FW hint */ 312 if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE) 313 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 314 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 315 else 316 dev->caps.port_type[i] = port_type_array[i-1]; 317 } 318 } 319 /* 320 * Link sensing is allowed on the port if 3 conditions are true: 321 * 1. Both protocols are supported on the port. 322 * 2. Different types are supported on the port 323 * 3. FW declared that it supports link sensing 324 */ 325 mlx4_priv(dev)->sense.sense_allowed[i] = 326 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 327 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 328 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 329 330 /* 331 * If "default_sense" bit is set, we move the port to "AUTO" mode 332 * and perform sense_port FW command to try and set the correct 333 * port type from beginning 334 */ 335 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 336 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 337 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 338 mlx4_SENSE_PORT(dev, i, &sensed_port); 339 if (sensed_port != MLX4_PORT_TYPE_NONE) 340 dev->caps.port_type[i] = sensed_port; 341 } else { 342 dev->caps.possible_type[i] = dev->caps.port_type[i]; 343 } 344 345 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) { 346 dev->caps.log_num_macs = dev_cap->log_max_macs[i]; 347 mlx4_warn(dev, "Requested number of MACs is too much " 348 "for port %d, reducing to %d.\n", 349 i, 1 << dev->caps.log_num_macs); 350 } 351 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) { 352 dev->caps.log_num_vlans = dev_cap->log_max_vlans[i]; 353 mlx4_warn(dev, "Requested number of VLANs is too much " 354 "for port %d, reducing to %d.\n", 355 i, 1 << dev->caps.log_num_vlans); 356 } 357 } 358 359 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters); 360 361 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 362 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 363 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 364 (1 << dev->caps.log_num_macs) * 365 (1 << dev->caps.log_num_vlans) * 366 (1 << dev->caps.log_num_prios) * 367 dev->caps.num_ports; 368 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 369 370 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 371 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 372 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 373 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 374 375 return 0; 376 } 377 /*The function checks if there are live vf, return the num of them*/ 378 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 379 { 380 struct mlx4_priv *priv = mlx4_priv(dev); 381 struct mlx4_slave_state *s_state; 382 int i; 383 int ret = 0; 384 385 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 386 s_state = &priv->mfunc.master.slave_state[i]; 387 if (s_state->active && s_state->last_cmd != 388 MLX4_COMM_CMD_RESET) { 389 mlx4_warn(dev, "%s: slave: %d is still active\n", 390 __func__, i); 391 ret++; 392 } 393 } 394 return ret; 395 } 396 397 static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 398 { 399 struct mlx4_priv *priv = mlx4_priv(dev); 400 struct mlx4_slave_state *s_slave; 401 402 if (!mlx4_is_master(dev)) 403 return 0; 404 405 s_slave = &priv->mfunc.master.slave_state[slave]; 406 return !!s_slave->active; 407 } 408 EXPORT_SYMBOL(mlx4_is_slave_active); 409 410 static int mlx4_slave_cap(struct mlx4_dev *dev) 411 { 412 int err; 413 u32 page_size; 414 struct mlx4_dev_cap dev_cap; 415 struct mlx4_func_cap func_cap; 416 struct mlx4_init_hca_param hca_param; 417 int i; 418 419 memset(&hca_param, 0, sizeof(hca_param)); 420 err = mlx4_QUERY_HCA(dev, &hca_param); 421 if (err) { 422 mlx4_err(dev, "QUERY_HCA command failed, aborting.\n"); 423 return err; 424 } 425 426 /*fail if the hca has an unknown capability */ 427 if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) != 428 HCA_GLOBAL_CAP_MASK) { 429 mlx4_err(dev, "Unknown hca global capabilities\n"); 430 return -ENOSYS; 431 } 432 433 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 434 435 memset(&dev_cap, 0, sizeof(dev_cap)); 436 err = mlx4_dev_cap(dev, &dev_cap); 437 if (err) { 438 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 439 return err; 440 } 441 442 page_size = ~dev->caps.page_size_cap + 1; 443 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 444 if (page_size > PAGE_SIZE) { 445 mlx4_err(dev, "HCA minimum page size of %d bigger than " 446 "kernel PAGE_SIZE of %ld, aborting.\n", 447 page_size, PAGE_SIZE); 448 return -ENODEV; 449 } 450 451 /* slave gets uar page size from QUERY_HCA fw command */ 452 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 453 454 /* TODO: relax this assumption */ 455 if (dev->caps.uar_page_size != PAGE_SIZE) { 456 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 457 dev->caps.uar_page_size, PAGE_SIZE); 458 return -ENODEV; 459 } 460 461 memset(&func_cap, 0, sizeof(func_cap)); 462 err = mlx4_QUERY_FUNC_CAP(dev, &func_cap); 463 if (err) { 464 mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n"); 465 return err; 466 } 467 468 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 469 PF_CONTEXT_BEHAVIOUR_MASK) { 470 mlx4_err(dev, "Unknown pf context behaviour\n"); 471 return -ENOSYS; 472 } 473 474 dev->caps.function = func_cap.function; 475 dev->caps.num_ports = func_cap.num_ports; 476 dev->caps.num_qps = func_cap.qp_quota; 477 dev->caps.num_srqs = func_cap.srq_quota; 478 dev->caps.num_cqs = func_cap.cq_quota; 479 dev->caps.num_eqs = func_cap.max_eq; 480 dev->caps.reserved_eqs = func_cap.reserved_eq; 481 dev->caps.num_mpts = func_cap.mpt_quota; 482 dev->caps.num_mtts = func_cap.mtt_quota; 483 dev->caps.num_pds = MLX4_NUM_PDS; 484 dev->caps.num_mgms = 0; 485 dev->caps.num_amgms = 0; 486 487 for (i = 1; i <= dev->caps.num_ports; ++i) 488 dev->caps.port_mask[i] = dev->caps.port_type[i]; 489 490 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 491 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 492 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); 493 return -ENODEV; 494 } 495 496 if (dev->caps.uar_page_size * (dev->caps.num_uars - 497 dev->caps.reserved_uars) > 498 pci_resource_len(dev->pdev, 2)) { 499 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than " 500 "PCI resource 2 size of 0x%llx, aborting.\n", 501 dev->caps.uar_page_size * dev->caps.num_uars, 502 (unsigned long long) pci_resource_len(dev->pdev, 2)); 503 return -ENODEV; 504 } 505 506 #if 0 507 mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux); 508 mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n", 509 dev->caps.num_uars, dev->caps.reserved_uars, 510 dev->caps.uar_page_size * dev->caps.num_uars, 511 pci_resource_len(dev->pdev, 2)); 512 mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs, 513 dev->caps.reserved_eqs); 514 mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n", 515 dev->caps.num_pds, dev->caps.reserved_pds, 516 dev->caps.slave_pd_shift, dev->caps.pd_base); 517 #endif 518 return 0; 519 } 520 521 /* 522 * Change the port configuration of the device. 523 * Every user of this function must hold the port mutex. 524 */ 525 int mlx4_change_port_types(struct mlx4_dev *dev, 526 enum mlx4_port_type *port_types) 527 { 528 int err = 0; 529 int change = 0; 530 int port; 531 532 for (port = 0; port < dev->caps.num_ports; port++) { 533 /* Change the port type only if the new type is different 534 * from the current, and not set to Auto */ 535 if (port_types[port] != dev->caps.port_type[port + 1]) { 536 change = 1; 537 dev->caps.port_type[port + 1] = port_types[port]; 538 } 539 } 540 if (change) { 541 mlx4_unregister_device(dev); 542 for (port = 1; port <= dev->caps.num_ports; port++) { 543 mlx4_CLOSE_PORT(dev, port); 544 err = mlx4_SET_PORT(dev, port); 545 if (err) { 546 mlx4_err(dev, "Failed to set port %d, " 547 "aborting\n", port); 548 goto out; 549 } 550 } 551 mlx4_set_port_mask(dev); 552 err = mlx4_register_device(dev); 553 } 554 555 out: 556 return err; 557 } 558 559 static ssize_t show_port_type(struct device *dev, 560 struct device_attribute *attr, 561 char *buf) 562 { 563 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 564 port_attr); 565 struct mlx4_dev *mdev = info->dev; 566 char type[8]; 567 568 sprintf(type, "%s", 569 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 570 "ib" : "eth"); 571 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 572 sprintf(buf, "auto (%s)\n", type); 573 else 574 sprintf(buf, "%s\n", type); 575 576 return strlen(buf); 577 } 578 579 static ssize_t set_port_type(struct device *dev, 580 struct device_attribute *attr, 581 const char *buf, size_t count) 582 { 583 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 584 port_attr); 585 struct mlx4_dev *mdev = info->dev; 586 struct mlx4_priv *priv = mlx4_priv(mdev); 587 enum mlx4_port_type types[MLX4_MAX_PORTS]; 588 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 589 int i; 590 int err = 0; 591 592 if (!strcmp(buf, "ib\n")) 593 info->tmp_type = MLX4_PORT_TYPE_IB; 594 else if (!strcmp(buf, "eth\n")) 595 info->tmp_type = MLX4_PORT_TYPE_ETH; 596 else if (!strcmp(buf, "auto\n")) 597 info->tmp_type = MLX4_PORT_TYPE_AUTO; 598 else { 599 mlx4_err(mdev, "%s is not supported port type\n", buf); 600 return -EINVAL; 601 } 602 603 mlx4_stop_sense(mdev); 604 mutex_lock(&priv->port_mutex); 605 /* Possible type is always the one that was delivered */ 606 mdev->caps.possible_type[info->port] = info->tmp_type; 607 608 for (i = 0; i < mdev->caps.num_ports; i++) { 609 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 610 mdev->caps.possible_type[i+1]; 611 if (types[i] == MLX4_PORT_TYPE_AUTO) 612 types[i] = mdev->caps.port_type[i+1]; 613 } 614 615 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 616 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 617 for (i = 1; i <= mdev->caps.num_ports; i++) { 618 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 619 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 620 err = -EINVAL; 621 } 622 } 623 } 624 if (err) { 625 mlx4_err(mdev, "Auto sensing is not supported on this HCA. " 626 "Set only 'eth' or 'ib' for both ports " 627 "(should be the same)\n"); 628 goto out; 629 } 630 631 mlx4_do_sense_ports(mdev, new_types, types); 632 633 err = mlx4_check_port_params(mdev, new_types); 634 if (err) 635 goto out; 636 637 /* We are about to apply the changes after the configuration 638 * was verified, no need to remember the temporary types 639 * any more */ 640 for (i = 0; i < mdev->caps.num_ports; i++) 641 priv->port[i + 1].tmp_type = 0; 642 643 err = mlx4_change_port_types(mdev, new_types); 644 645 out: 646 mlx4_start_sense(mdev); 647 mutex_unlock(&priv->port_mutex); 648 return err ? err : count; 649 } 650 651 static int mlx4_load_fw(struct mlx4_dev *dev) 652 { 653 struct mlx4_priv *priv = mlx4_priv(dev); 654 int err; 655 656 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 657 GFP_HIGHUSER | __GFP_NOWARN, 0); 658 if (!priv->fw.fw_icm) { 659 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n"); 660 return -ENOMEM; 661 } 662 663 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 664 if (err) { 665 mlx4_err(dev, "MAP_FA command failed, aborting.\n"); 666 goto err_free; 667 } 668 669 err = mlx4_RUN_FW(dev); 670 if (err) { 671 mlx4_err(dev, "RUN_FW command failed, aborting.\n"); 672 goto err_unmap_fa; 673 } 674 675 return 0; 676 677 err_unmap_fa: 678 mlx4_UNMAP_FA(dev); 679 680 err_free: 681 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 682 return err; 683 } 684 685 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 686 int cmpt_entry_sz) 687 { 688 struct mlx4_priv *priv = mlx4_priv(dev); 689 int err; 690 int num_eqs; 691 692 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 693 cmpt_base + 694 ((u64) (MLX4_CMPT_TYPE_QP * 695 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 696 cmpt_entry_sz, dev->caps.num_qps, 697 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 698 0, 0); 699 if (err) 700 goto err; 701 702 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 703 cmpt_base + 704 ((u64) (MLX4_CMPT_TYPE_SRQ * 705 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 706 cmpt_entry_sz, dev->caps.num_srqs, 707 dev->caps.reserved_srqs, 0, 0); 708 if (err) 709 goto err_qp; 710 711 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 712 cmpt_base + 713 ((u64) (MLX4_CMPT_TYPE_CQ * 714 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 715 cmpt_entry_sz, dev->caps.num_cqs, 716 dev->caps.reserved_cqs, 0, 0); 717 if (err) 718 goto err_srq; 719 720 num_eqs = (mlx4_is_master(dev)) ? 721 roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : 722 dev->caps.num_eqs; 723 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 724 cmpt_base + 725 ((u64) (MLX4_CMPT_TYPE_EQ * 726 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 727 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 728 if (err) 729 goto err_cq; 730 731 return 0; 732 733 err_cq: 734 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 735 736 err_srq: 737 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 738 739 err_qp: 740 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 741 742 err: 743 return err; 744 } 745 746 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 747 struct mlx4_init_hca_param *init_hca, u64 icm_size) 748 { 749 struct mlx4_priv *priv = mlx4_priv(dev); 750 u64 aux_pages; 751 int num_eqs; 752 int err; 753 754 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 755 if (err) { 756 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n"); 757 return err; 758 } 759 760 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n", 761 (unsigned long long) icm_size >> 10, 762 (unsigned long long) aux_pages << 2); 763 764 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 765 GFP_HIGHUSER | __GFP_NOWARN, 0); 766 if (!priv->fw.aux_icm) { 767 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n"); 768 return -ENOMEM; 769 } 770 771 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 772 if (err) { 773 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n"); 774 goto err_free_aux; 775 } 776 777 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 778 if (err) { 779 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n"); 780 goto err_unmap_aux; 781 } 782 783 784 num_eqs = (mlx4_is_master(dev)) ? 785 roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : 786 dev->caps.num_eqs; 787 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 788 init_hca->eqc_base, dev_cap->eqc_entry_sz, 789 num_eqs, num_eqs, 0, 0); 790 if (err) { 791 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); 792 goto err_unmap_cmpt; 793 } 794 795 /* 796 * Reserved MTT entries must be aligned up to a cacheline 797 * boundary, since the FW will write to them, while the driver 798 * writes to all other MTT entries. (The variable 799 * dev->caps.mtt_entry_sz below is really the MTT segment 800 * size, not the raw entry size) 801 */ 802 dev->caps.reserved_mtts = 803 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 804 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 805 806 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 807 init_hca->mtt_base, 808 dev->caps.mtt_entry_sz, 809 dev->caps.num_mtts, 810 dev->caps.reserved_mtts, 1, 0); 811 if (err) { 812 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n"); 813 goto err_unmap_eq; 814 } 815 816 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 817 init_hca->dmpt_base, 818 dev_cap->dmpt_entry_sz, 819 dev->caps.num_mpts, 820 dev->caps.reserved_mrws, 1, 1); 821 if (err) { 822 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n"); 823 goto err_unmap_mtt; 824 } 825 826 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 827 init_hca->qpc_base, 828 dev_cap->qpc_entry_sz, 829 dev->caps.num_qps, 830 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 831 0, 0); 832 if (err) { 833 mlx4_err(dev, "Failed to map QP context memory, aborting.\n"); 834 goto err_unmap_dmpt; 835 } 836 837 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 838 init_hca->auxc_base, 839 dev_cap->aux_entry_sz, 840 dev->caps.num_qps, 841 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 842 0, 0); 843 if (err) { 844 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n"); 845 goto err_unmap_qp; 846 } 847 848 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 849 init_hca->altc_base, 850 dev_cap->altc_entry_sz, 851 dev->caps.num_qps, 852 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 853 0, 0); 854 if (err) { 855 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n"); 856 goto err_unmap_auxc; 857 } 858 859 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 860 init_hca->rdmarc_base, 861 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 862 dev->caps.num_qps, 863 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 864 0, 0); 865 if (err) { 866 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 867 goto err_unmap_altc; 868 } 869 870 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 871 init_hca->cqc_base, 872 dev_cap->cqc_entry_sz, 873 dev->caps.num_cqs, 874 dev->caps.reserved_cqs, 0, 0); 875 if (err) { 876 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n"); 877 goto err_unmap_rdmarc; 878 } 879 880 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 881 init_hca->srqc_base, 882 dev_cap->srq_entry_sz, 883 dev->caps.num_srqs, 884 dev->caps.reserved_srqs, 0, 0); 885 if (err) { 886 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n"); 887 goto err_unmap_cq; 888 } 889 890 /* 891 * It's not strictly required, but for simplicity just map the 892 * whole multicast group table now. The table isn't very big 893 * and it's a lot easier than trying to track ref counts. 894 */ 895 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 896 init_hca->mc_base, 897 mlx4_get_mgm_entry_size(dev), 898 dev->caps.num_mgms + dev->caps.num_amgms, 899 dev->caps.num_mgms + dev->caps.num_amgms, 900 0, 0); 901 if (err) { 902 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n"); 903 goto err_unmap_srq; 904 } 905 906 return 0; 907 908 err_unmap_srq: 909 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 910 911 err_unmap_cq: 912 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 913 914 err_unmap_rdmarc: 915 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 916 917 err_unmap_altc: 918 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 919 920 err_unmap_auxc: 921 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 922 923 err_unmap_qp: 924 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 925 926 err_unmap_dmpt: 927 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 928 929 err_unmap_mtt: 930 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 931 932 err_unmap_eq: 933 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 934 935 err_unmap_cmpt: 936 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 937 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 938 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 939 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 940 941 err_unmap_aux: 942 mlx4_UNMAP_ICM_AUX(dev); 943 944 err_free_aux: 945 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 946 947 return err; 948 } 949 950 static void mlx4_free_icms(struct mlx4_dev *dev) 951 { 952 struct mlx4_priv *priv = mlx4_priv(dev); 953 954 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 955 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 956 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 957 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 958 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 959 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 960 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 961 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 962 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 963 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 964 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 965 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 966 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 967 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 968 969 mlx4_UNMAP_ICM_AUX(dev); 970 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 971 } 972 973 static void mlx4_slave_exit(struct mlx4_dev *dev) 974 { 975 struct mlx4_priv *priv = mlx4_priv(dev); 976 977 down(&priv->cmd.slave_sem); 978 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME)) 979 mlx4_warn(dev, "Failed to close slave function.\n"); 980 up(&priv->cmd.slave_sem); 981 } 982 983 static int map_bf_area(struct mlx4_dev *dev) 984 { 985 struct mlx4_priv *priv = mlx4_priv(dev); 986 resource_size_t bf_start; 987 resource_size_t bf_len; 988 int err = 0; 989 990 bf_start = pci_resource_start(dev->pdev, 2) + 991 (dev->caps.num_uars << PAGE_SHIFT); 992 bf_len = pci_resource_len(dev->pdev, 2) - 993 (dev->caps.num_uars << PAGE_SHIFT); 994 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 995 if (!priv->bf_mapping) 996 err = -ENOMEM; 997 998 return err; 999 } 1000 1001 static void unmap_bf_area(struct mlx4_dev *dev) 1002 { 1003 if (mlx4_priv(dev)->bf_mapping) 1004 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1005 } 1006 1007 static void mlx4_close_hca(struct mlx4_dev *dev) 1008 { 1009 unmap_bf_area(dev); 1010 if (mlx4_is_slave(dev)) 1011 mlx4_slave_exit(dev); 1012 else { 1013 mlx4_CLOSE_HCA(dev, 0); 1014 mlx4_free_icms(dev); 1015 mlx4_UNMAP_FA(dev); 1016 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1017 } 1018 } 1019 1020 static int mlx4_init_slave(struct mlx4_dev *dev) 1021 { 1022 struct mlx4_priv *priv = mlx4_priv(dev); 1023 u64 dma = (u64) priv->mfunc.vhcr_dma; 1024 int num_of_reset_retries = NUM_OF_RESET_RETRIES; 1025 int ret_from_reset = 0; 1026 u32 slave_read; 1027 u32 cmd_channel_ver; 1028 1029 down(&priv->cmd.slave_sem); 1030 priv->cmd.max_cmds = 1; 1031 mlx4_warn(dev, "Sending reset\n"); 1032 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1033 MLX4_COMM_TIME); 1034 /* if we are in the middle of flr the slave will try 1035 * NUM_OF_RESET_RETRIES times before leaving.*/ 1036 if (ret_from_reset) { 1037 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1038 msleep(SLEEP_TIME_IN_RESET); 1039 while (ret_from_reset && num_of_reset_retries) { 1040 mlx4_warn(dev, "slave is currently in the" 1041 "middle of FLR. retrying..." 1042 "(try num:%d)\n", 1043 (NUM_OF_RESET_RETRIES - 1044 num_of_reset_retries + 1)); 1045 ret_from_reset = 1046 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 1047 0, MLX4_COMM_TIME); 1048 num_of_reset_retries = num_of_reset_retries - 1; 1049 } 1050 } else 1051 goto err; 1052 } 1053 1054 /* check the driver version - the slave I/F revision 1055 * must match the master's */ 1056 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 1057 cmd_channel_ver = mlx4_comm_get_version(); 1058 1059 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1060 MLX4_COMM_GET_IF_REV(slave_read)) { 1061 mlx4_err(dev, "slave driver version is not supported" 1062 " by the master\n"); 1063 goto err; 1064 } 1065 1066 mlx4_warn(dev, "Sending vhcr0\n"); 1067 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 1068 MLX4_COMM_TIME)) 1069 goto err; 1070 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 1071 MLX4_COMM_TIME)) 1072 goto err; 1073 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 1074 MLX4_COMM_TIME)) 1075 goto err; 1076 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME)) 1077 goto err; 1078 up(&priv->cmd.slave_sem); 1079 return 0; 1080 1081 err: 1082 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0); 1083 up(&priv->cmd.slave_sem); 1084 return -EIO; 1085 } 1086 1087 static int mlx4_init_hca(struct mlx4_dev *dev) 1088 { 1089 struct mlx4_priv *priv = mlx4_priv(dev); 1090 struct mlx4_adapter adapter; 1091 struct mlx4_dev_cap dev_cap; 1092 struct mlx4_mod_stat_cfg mlx4_cfg; 1093 struct mlx4_profile profile; 1094 struct mlx4_init_hca_param init_hca; 1095 u64 icm_size; 1096 int err; 1097 1098 if (!mlx4_is_slave(dev)) { 1099 err = mlx4_QUERY_FW(dev); 1100 if (err) { 1101 if (err == -EACCES) 1102 mlx4_info(dev, "non-primary physical function, skipping.\n"); 1103 else 1104 mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); 1105 goto unmap_bf; 1106 } 1107 1108 err = mlx4_load_fw(dev); 1109 if (err) { 1110 mlx4_err(dev, "Failed to start FW, aborting.\n"); 1111 goto unmap_bf; 1112 } 1113 1114 mlx4_cfg.log_pg_sz_m = 1; 1115 mlx4_cfg.log_pg_sz = 0; 1116 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 1117 if (err) 1118 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 1119 1120 err = mlx4_dev_cap(dev, &dev_cap); 1121 if (err) { 1122 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 1123 goto err_stop_fw; 1124 } 1125 1126 profile = default_profile; 1127 1128 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 1129 &init_hca); 1130 if ((long long) icm_size < 0) { 1131 err = icm_size; 1132 goto err_stop_fw; 1133 } 1134 1135 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 1136 init_hca.uar_page_sz = PAGE_SHIFT - 12; 1137 1138 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 1139 if (err) 1140 goto err_stop_fw; 1141 1142 err = mlx4_INIT_HCA(dev, &init_hca); 1143 if (err) { 1144 mlx4_err(dev, "INIT_HCA command failed, aborting.\n"); 1145 goto err_free_icm; 1146 } 1147 } else { 1148 err = mlx4_init_slave(dev); 1149 if (err) { 1150 mlx4_err(dev, "Failed to initialize slave\n"); 1151 goto unmap_bf; 1152 } 1153 1154 err = mlx4_slave_cap(dev); 1155 if (err) { 1156 mlx4_err(dev, "Failed to obtain slave caps\n"); 1157 goto err_close; 1158 } 1159 } 1160 1161 if (map_bf_area(dev)) 1162 mlx4_dbg(dev, "Failed to map blue flame area\n"); 1163 1164 /*Only the master set the ports, all the rest got it from it.*/ 1165 if (!mlx4_is_slave(dev)) 1166 mlx4_set_port_mask(dev); 1167 1168 err = mlx4_QUERY_ADAPTER(dev, &adapter); 1169 if (err) { 1170 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); 1171 goto err_close; 1172 } 1173 1174 priv->eq_table.inta_pin = adapter.inta_pin; 1175 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 1176 1177 return 0; 1178 1179 err_close: 1180 mlx4_close_hca(dev); 1181 1182 err_free_icm: 1183 if (!mlx4_is_slave(dev)) 1184 mlx4_free_icms(dev); 1185 1186 err_stop_fw: 1187 if (!mlx4_is_slave(dev)) { 1188 mlx4_UNMAP_FA(dev); 1189 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1190 } 1191 unmap_bf: 1192 unmap_bf_area(dev); 1193 return err; 1194 } 1195 1196 static int mlx4_init_counters_table(struct mlx4_dev *dev) 1197 { 1198 struct mlx4_priv *priv = mlx4_priv(dev); 1199 int nent; 1200 1201 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 1202 return -ENOENT; 1203 1204 nent = dev->caps.max_counters; 1205 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0); 1206 } 1207 1208 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 1209 { 1210 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 1211 } 1212 1213 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 1214 { 1215 struct mlx4_priv *priv = mlx4_priv(dev); 1216 1217 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 1218 return -ENOENT; 1219 1220 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 1221 if (*idx == -1) 1222 return -ENOMEM; 1223 1224 return 0; 1225 } 1226 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 1227 1228 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 1229 { 1230 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx); 1231 return; 1232 } 1233 EXPORT_SYMBOL_GPL(mlx4_counter_free); 1234 1235 static int mlx4_setup_hca(struct mlx4_dev *dev) 1236 { 1237 struct mlx4_priv *priv = mlx4_priv(dev); 1238 int err; 1239 int port; 1240 __be32 ib_port_default_caps; 1241 1242 err = mlx4_init_uar_table(dev); 1243 if (err) { 1244 mlx4_err(dev, "Failed to initialize " 1245 "user access region table, aborting.\n"); 1246 return err; 1247 } 1248 1249 err = mlx4_uar_alloc(dev, &priv->driver_uar); 1250 if (err) { 1251 mlx4_err(dev, "Failed to allocate driver access region, " 1252 "aborting.\n"); 1253 goto err_uar_table_free; 1254 } 1255 1256 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 1257 if (!priv->kar) { 1258 mlx4_err(dev, "Couldn't map kernel access region, " 1259 "aborting.\n"); 1260 err = -ENOMEM; 1261 goto err_uar_free; 1262 } 1263 1264 err = mlx4_init_pd_table(dev); 1265 if (err) { 1266 mlx4_err(dev, "Failed to initialize " 1267 "protection domain table, aborting.\n"); 1268 goto err_kar_unmap; 1269 } 1270 1271 err = mlx4_init_xrcd_table(dev); 1272 if (err) { 1273 mlx4_err(dev, "Failed to initialize " 1274 "reliable connection domain table, aborting.\n"); 1275 goto err_pd_table_free; 1276 } 1277 1278 err = mlx4_init_mr_table(dev); 1279 if (err) { 1280 mlx4_err(dev, "Failed to initialize " 1281 "memory region table, aborting.\n"); 1282 goto err_xrcd_table_free; 1283 } 1284 1285 err = mlx4_init_eq_table(dev); 1286 if (err) { 1287 mlx4_err(dev, "Failed to initialize " 1288 "event queue table, aborting.\n"); 1289 goto err_mr_table_free; 1290 } 1291 1292 err = mlx4_cmd_use_events(dev); 1293 if (err) { 1294 mlx4_err(dev, "Failed to switch to event-driven " 1295 "firmware commands, aborting.\n"); 1296 goto err_eq_table_free; 1297 } 1298 1299 err = mlx4_NOP(dev); 1300 if (err) { 1301 if (dev->flags & MLX4_FLAG_MSI_X) { 1302 mlx4_warn(dev, "NOP command failed to generate MSI-X " 1303 "interrupt IRQ %d).\n", 1304 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1305 mlx4_warn(dev, "Trying again without MSI-X.\n"); 1306 } else { 1307 mlx4_err(dev, "NOP command failed to generate interrupt " 1308 "(IRQ %d), aborting.\n", 1309 priv->eq_table.eq[dev->caps.num_comp_vectors].irq); 1310 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 1311 } 1312 1313 goto err_cmd_poll; 1314 } 1315 1316 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 1317 1318 err = mlx4_init_cq_table(dev); 1319 if (err) { 1320 mlx4_err(dev, "Failed to initialize " 1321 "completion queue table, aborting.\n"); 1322 goto err_cmd_poll; 1323 } 1324 1325 err = mlx4_init_srq_table(dev); 1326 if (err) { 1327 mlx4_err(dev, "Failed to initialize " 1328 "shared receive queue table, aborting.\n"); 1329 goto err_cq_table_free; 1330 } 1331 1332 err = mlx4_init_qp_table(dev); 1333 if (err) { 1334 mlx4_err(dev, "Failed to initialize " 1335 "queue pair table, aborting.\n"); 1336 goto err_srq_table_free; 1337 } 1338 1339 if (!mlx4_is_slave(dev)) { 1340 err = mlx4_init_mcg_table(dev); 1341 if (err) { 1342 mlx4_err(dev, "Failed to initialize " 1343 "multicast group table, aborting.\n"); 1344 goto err_qp_table_free; 1345 } 1346 } 1347 1348 err = mlx4_init_counters_table(dev); 1349 if (err && err != -ENOENT) { 1350 mlx4_err(dev, "Failed to initialize counters table, aborting.\n"); 1351 goto err_mcg_table_free; 1352 } 1353 1354 if (!mlx4_is_slave(dev)) { 1355 for (port = 1; port <= dev->caps.num_ports; port++) { 1356 ib_port_default_caps = 0; 1357 err = mlx4_get_port_ib_caps(dev, port, 1358 &ib_port_default_caps); 1359 if (err) 1360 mlx4_warn(dev, "failed to get port %d default " 1361 "ib capabilities (%d). Continuing " 1362 "with caps = 0\n", port, err); 1363 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 1364 1365 err = mlx4_check_ext_port_caps(dev, port); 1366 if (err) 1367 mlx4_warn(dev, "failed to get port %d extended " 1368 "port capabilities support info (%d)." 1369 " Assuming not supported\n", 1370 port, err); 1371 1372 err = mlx4_SET_PORT(dev, port); 1373 if (err) { 1374 mlx4_err(dev, "Failed to set port %d, aborting\n", 1375 port); 1376 goto err_counters_table_free; 1377 } 1378 } 1379 } 1380 1381 return 0; 1382 1383 err_counters_table_free: 1384 mlx4_cleanup_counters_table(dev); 1385 1386 err_mcg_table_free: 1387 mlx4_cleanup_mcg_table(dev); 1388 1389 err_qp_table_free: 1390 mlx4_cleanup_qp_table(dev); 1391 1392 err_srq_table_free: 1393 mlx4_cleanup_srq_table(dev); 1394 1395 err_cq_table_free: 1396 mlx4_cleanup_cq_table(dev); 1397 1398 err_cmd_poll: 1399 mlx4_cmd_use_polling(dev); 1400 1401 err_eq_table_free: 1402 mlx4_cleanup_eq_table(dev); 1403 1404 err_mr_table_free: 1405 mlx4_cleanup_mr_table(dev); 1406 1407 err_xrcd_table_free: 1408 mlx4_cleanup_xrcd_table(dev); 1409 1410 err_pd_table_free: 1411 mlx4_cleanup_pd_table(dev); 1412 1413 err_kar_unmap: 1414 iounmap(priv->kar); 1415 1416 err_uar_free: 1417 mlx4_uar_free(dev, &priv->driver_uar); 1418 1419 err_uar_table_free: 1420 mlx4_cleanup_uar_table(dev); 1421 return err; 1422 } 1423 1424 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 1425 { 1426 struct mlx4_priv *priv = mlx4_priv(dev); 1427 struct msix_entry *entries; 1428 int nreq = min_t(int, dev->caps.num_ports * 1429 min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT) 1430 + MSIX_LEGACY_SZ, MAX_MSIX); 1431 int err; 1432 int i; 1433 1434 if (msi_x) { 1435 /* In multifunction mode each function gets 2 msi-X vectors 1436 * one for data path completions anf the other for asynch events 1437 * or command completions */ 1438 if (mlx4_is_mfunc(dev)) { 1439 nreq = 2; 1440 } else { 1441 nreq = min_t(int, dev->caps.num_eqs - 1442 dev->caps.reserved_eqs, nreq); 1443 } 1444 1445 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 1446 if (!entries) 1447 goto no_msi; 1448 1449 for (i = 0; i < nreq; ++i) 1450 entries[i].entry = i; 1451 1452 retry: 1453 err = pci_enable_msix(dev->pdev, entries, nreq); 1454 if (err) { 1455 /* Try again if at least 2 vectors are available */ 1456 if (err > 1) { 1457 mlx4_info(dev, "Requested %d vectors, " 1458 "but only %d MSI-X vectors available, " 1459 "trying again\n", nreq, err); 1460 nreq = err; 1461 goto retry; 1462 } 1463 kfree(entries); 1464 goto no_msi; 1465 } 1466 1467 if (nreq < 1468 MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) { 1469 /*Working in legacy mode , all EQ's shared*/ 1470 dev->caps.comp_pool = 0; 1471 dev->caps.num_comp_vectors = nreq - 1; 1472 } else { 1473 dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ; 1474 dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1; 1475 } 1476 for (i = 0; i < nreq; ++i) 1477 priv->eq_table.eq[i].irq = entries[i].vector; 1478 1479 dev->flags |= MLX4_FLAG_MSI_X; 1480 1481 kfree(entries); 1482 return; 1483 } 1484 1485 no_msi: 1486 dev->caps.num_comp_vectors = 1; 1487 dev->caps.comp_pool = 0; 1488 1489 for (i = 0; i < 2; ++i) 1490 priv->eq_table.eq[i].irq = dev->pdev->irq; 1491 } 1492 1493 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 1494 { 1495 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 1496 int err = 0; 1497 1498 info->dev = dev; 1499 info->port = port; 1500 if (!mlx4_is_slave(dev)) { 1501 INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL); 1502 mlx4_init_mac_table(dev, &info->mac_table); 1503 mlx4_init_vlan_table(dev, &info->vlan_table); 1504 info->base_qpn = 1505 dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] + 1506 (port - 1) * (1 << log_num_mac); 1507 } 1508 1509 sprintf(info->dev_name, "mlx4_port%d", port); 1510 info->port_attr.attr.name = info->dev_name; 1511 if (mlx4_is_mfunc(dev)) 1512 info->port_attr.attr.mode = S_IRUGO; 1513 else { 1514 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 1515 info->port_attr.store = set_port_type; 1516 } 1517 info->port_attr.show = show_port_type; 1518 sysfs_attr_init(&info->port_attr.attr); 1519 1520 err = device_create_file(&dev->pdev->dev, &info->port_attr); 1521 if (err) { 1522 mlx4_err(dev, "Failed to create file for port %d\n", port); 1523 info->port = -1; 1524 } 1525 1526 return err; 1527 } 1528 1529 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 1530 { 1531 if (info->port < 0) 1532 return; 1533 1534 device_remove_file(&info->dev->pdev->dev, &info->port_attr); 1535 } 1536 1537 static int mlx4_init_steering(struct mlx4_dev *dev) 1538 { 1539 struct mlx4_priv *priv = mlx4_priv(dev); 1540 int num_entries = dev->caps.num_ports; 1541 int i, j; 1542 1543 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 1544 if (!priv->steer) 1545 return -ENOMEM; 1546 1547 for (i = 0; i < num_entries; i++) { 1548 for (j = 0; j < MLX4_NUM_STEERS; j++) { 1549 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 1550 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 1551 } 1552 INIT_LIST_HEAD(&priv->steer[i].high_prios); 1553 } 1554 return 0; 1555 } 1556 1557 static void mlx4_clear_steering(struct mlx4_dev *dev) 1558 { 1559 struct mlx4_priv *priv = mlx4_priv(dev); 1560 struct mlx4_steer_index *entry, *tmp_entry; 1561 struct mlx4_promisc_qp *pqp, *tmp_pqp; 1562 int num_entries = dev->caps.num_ports; 1563 int i, j; 1564 1565 for (i = 0; i < num_entries; i++) { 1566 for (j = 0; j < MLX4_NUM_STEERS; j++) { 1567 list_for_each_entry_safe(pqp, tmp_pqp, 1568 &priv->steer[i].promisc_qps[j], 1569 list) { 1570 list_del(&pqp->list); 1571 kfree(pqp); 1572 } 1573 list_for_each_entry_safe(entry, tmp_entry, 1574 &priv->steer[i].steer_entries[j], 1575 list) { 1576 list_del(&entry->list); 1577 list_for_each_entry_safe(pqp, tmp_pqp, 1578 &entry->duplicates, 1579 list) { 1580 list_del(&pqp->list); 1581 kfree(pqp); 1582 } 1583 kfree(entry); 1584 } 1585 } 1586 } 1587 kfree(priv->steer); 1588 } 1589 1590 static int extended_func_num(struct pci_dev *pdev) 1591 { 1592 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 1593 } 1594 1595 #define MLX4_OWNER_BASE 0x8069c 1596 #define MLX4_OWNER_SIZE 4 1597 1598 static int mlx4_get_ownership(struct mlx4_dev *dev) 1599 { 1600 void __iomem *owner; 1601 u32 ret; 1602 1603 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 1604 MLX4_OWNER_SIZE); 1605 if (!owner) { 1606 mlx4_err(dev, "Failed to obtain ownership bit\n"); 1607 return -ENOMEM; 1608 } 1609 1610 ret = readl(owner); 1611 iounmap(owner); 1612 return (int) !!ret; 1613 } 1614 1615 static void mlx4_free_ownership(struct mlx4_dev *dev) 1616 { 1617 void __iomem *owner; 1618 1619 owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE, 1620 MLX4_OWNER_SIZE); 1621 if (!owner) { 1622 mlx4_err(dev, "Failed to obtain ownership bit\n"); 1623 return; 1624 } 1625 writel(0, owner); 1626 msleep(1000); 1627 iounmap(owner); 1628 } 1629 1630 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 1631 { 1632 struct mlx4_priv *priv; 1633 struct mlx4_dev *dev; 1634 int err; 1635 int port; 1636 1637 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 1638 1639 err = pci_enable_device(pdev); 1640 if (err) { 1641 dev_err(&pdev->dev, "Cannot enable PCI device, " 1642 "aborting.\n"); 1643 return err; 1644 } 1645 if (num_vfs > MLX4_MAX_NUM_VF) { 1646 printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n", 1647 num_vfs, MLX4_MAX_NUM_VF); 1648 return -EINVAL; 1649 } 1650 /* 1651 * Check for BARs. 1652 */ 1653 if (((id == NULL) || !(id->driver_data & MLX4_VF)) && 1654 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 1655 dev_err(&pdev->dev, "Missing DCS, aborting." 1656 "(id == 0X%p, id->driver_data: 0x%lx," 1657 " pci_resource_flags(pdev, 0):0x%lx)\n", id, 1658 id ? id->driver_data : 0, pci_resource_flags(pdev, 0)); 1659 err = -ENODEV; 1660 goto err_disable_pdev; 1661 } 1662 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 1663 dev_err(&pdev->dev, "Missing UAR, aborting.\n"); 1664 err = -ENODEV; 1665 goto err_disable_pdev; 1666 } 1667 1668 err = pci_request_regions(pdev, DRV_NAME); 1669 if (err) { 1670 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 1671 goto err_disable_pdev; 1672 } 1673 1674 pci_set_master(pdev); 1675 1676 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1677 if (err) { 1678 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n"); 1679 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1680 if (err) { 1681 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n"); 1682 goto err_release_regions; 1683 } 1684 } 1685 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1686 if (err) { 1687 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit " 1688 "consistent PCI DMA mask.\n"); 1689 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1690 if (err) { 1691 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, " 1692 "aborting.\n"); 1693 goto err_release_regions; 1694 } 1695 } 1696 1697 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 1698 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 1699 1700 priv = kzalloc(sizeof *priv, GFP_KERNEL); 1701 if (!priv) { 1702 dev_err(&pdev->dev, "Device struct alloc failed, " 1703 "aborting.\n"); 1704 err = -ENOMEM; 1705 goto err_release_regions; 1706 } 1707 1708 dev = &priv->dev; 1709 dev->pdev = pdev; 1710 INIT_LIST_HEAD(&priv->ctx_list); 1711 spin_lock_init(&priv->ctx_lock); 1712 1713 mutex_init(&priv->port_mutex); 1714 1715 INIT_LIST_HEAD(&priv->pgdir_list); 1716 mutex_init(&priv->pgdir_mutex); 1717 1718 INIT_LIST_HEAD(&priv->bf_list); 1719 mutex_init(&priv->bf_mutex); 1720 1721 dev->rev_id = pdev->revision; 1722 /* Detect if this device is a virtual function */ 1723 if (id && id->driver_data & MLX4_VF) { 1724 /* When acting as pf, we normally skip vfs unless explicitly 1725 * requested to probe them. */ 1726 if (num_vfs && extended_func_num(pdev) > probe_vf) { 1727 mlx4_warn(dev, "Skipping virtual function:%d\n", 1728 extended_func_num(pdev)); 1729 err = -ENODEV; 1730 goto err_free_dev; 1731 } 1732 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 1733 dev->flags |= MLX4_FLAG_SLAVE; 1734 } else { 1735 /* We reset the device and enable SRIOV only for physical 1736 * devices. Try to claim ownership on the device; 1737 * if already taken, skip -- do not allow multiple PFs */ 1738 err = mlx4_get_ownership(dev); 1739 if (err) { 1740 if (err < 0) 1741 goto err_free_dev; 1742 else { 1743 mlx4_warn(dev, "Multiple PFs not yet supported." 1744 " Skipping PF.\n"); 1745 err = -EINVAL; 1746 goto err_free_dev; 1747 } 1748 } 1749 1750 if (num_vfs) { 1751 mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs); 1752 err = pci_enable_sriov(pdev, num_vfs); 1753 if (err) { 1754 mlx4_err(dev, "Failed to enable sriov," 1755 "continuing without sriov enabled" 1756 " (err = %d).\n", err); 1757 num_vfs = 0; 1758 err = 0; 1759 } else { 1760 mlx4_warn(dev, "Running in master mode\n"); 1761 dev->flags |= MLX4_FLAG_SRIOV | 1762 MLX4_FLAG_MASTER; 1763 dev->num_vfs = num_vfs; 1764 } 1765 } 1766 1767 /* 1768 * Now reset the HCA before we touch the PCI capabilities or 1769 * attempt a firmware command, since a boot ROM may have left 1770 * the HCA in an undefined state. 1771 */ 1772 err = mlx4_reset(dev); 1773 if (err) { 1774 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 1775 goto err_rel_own; 1776 } 1777 } 1778 1779 slave_start: 1780 if (mlx4_cmd_init(dev)) { 1781 mlx4_err(dev, "Failed to init command interface, aborting.\n"); 1782 goto err_sriov; 1783 } 1784 1785 /* In slave functions, the communication channel must be initialized 1786 * before posting commands. Also, init num_slaves before calling 1787 * mlx4_init_hca */ 1788 if (mlx4_is_mfunc(dev)) { 1789 if (mlx4_is_master(dev)) 1790 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 1791 else { 1792 dev->num_slaves = 0; 1793 if (mlx4_multi_func_init(dev)) { 1794 mlx4_err(dev, "Failed to init slave mfunc" 1795 " interface, aborting.\n"); 1796 goto err_cmd; 1797 } 1798 } 1799 } 1800 1801 err = mlx4_init_hca(dev); 1802 if (err) { 1803 if (err == -EACCES) { 1804 /* Not primary Physical function 1805 * Running in slave mode */ 1806 mlx4_cmd_cleanup(dev); 1807 dev->flags |= MLX4_FLAG_SLAVE; 1808 dev->flags &= ~MLX4_FLAG_MASTER; 1809 goto slave_start; 1810 } else 1811 goto err_mfunc; 1812 } 1813 1814 /* In master functions, the communication channel must be initialized 1815 * after obtaining its address from fw */ 1816 if (mlx4_is_master(dev)) { 1817 if (mlx4_multi_func_init(dev)) { 1818 mlx4_err(dev, "Failed to init master mfunc" 1819 "interface, aborting.\n"); 1820 goto err_close; 1821 } 1822 } 1823 1824 err = mlx4_alloc_eq_table(dev); 1825 if (err) 1826 goto err_master_mfunc; 1827 1828 priv->msix_ctl.pool_bm = 0; 1829 spin_lock_init(&priv->msix_ctl.pool_lock); 1830 1831 mlx4_enable_msi_x(dev); 1832 if ((mlx4_is_mfunc(dev)) && 1833 !(dev->flags & MLX4_FLAG_MSI_X)) { 1834 mlx4_err(dev, "INTx is not supported in multi-function mode." 1835 " aborting.\n"); 1836 goto err_free_eq; 1837 } 1838 1839 if (!mlx4_is_slave(dev)) { 1840 err = mlx4_init_steering(dev); 1841 if (err) 1842 goto err_free_eq; 1843 } 1844 1845 err = mlx4_setup_hca(dev); 1846 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 1847 !mlx4_is_mfunc(dev)) { 1848 dev->flags &= ~MLX4_FLAG_MSI_X; 1849 pci_disable_msix(pdev); 1850 err = mlx4_setup_hca(dev); 1851 } 1852 1853 if (err) 1854 goto err_steer; 1855 1856 for (port = 1; port <= dev->caps.num_ports; port++) { 1857 err = mlx4_init_port_info(dev, port); 1858 if (err) 1859 goto err_port; 1860 } 1861 1862 err = mlx4_register_device(dev); 1863 if (err) 1864 goto err_port; 1865 1866 mlx4_sense_init(dev); 1867 mlx4_start_sense(dev); 1868 1869 pci_set_drvdata(pdev, dev); 1870 1871 return 0; 1872 1873 err_port: 1874 for (--port; port >= 1; --port) 1875 mlx4_cleanup_port_info(&priv->port[port]); 1876 1877 mlx4_cleanup_counters_table(dev); 1878 mlx4_cleanup_mcg_table(dev); 1879 mlx4_cleanup_qp_table(dev); 1880 mlx4_cleanup_srq_table(dev); 1881 mlx4_cleanup_cq_table(dev); 1882 mlx4_cmd_use_polling(dev); 1883 mlx4_cleanup_eq_table(dev); 1884 mlx4_cleanup_mr_table(dev); 1885 mlx4_cleanup_xrcd_table(dev); 1886 mlx4_cleanup_pd_table(dev); 1887 mlx4_cleanup_uar_table(dev); 1888 1889 err_steer: 1890 if (!mlx4_is_slave(dev)) 1891 mlx4_clear_steering(dev); 1892 1893 err_free_eq: 1894 mlx4_free_eq_table(dev); 1895 1896 err_master_mfunc: 1897 if (mlx4_is_master(dev)) 1898 mlx4_multi_func_cleanup(dev); 1899 1900 err_close: 1901 if (dev->flags & MLX4_FLAG_MSI_X) 1902 pci_disable_msix(pdev); 1903 1904 mlx4_close_hca(dev); 1905 1906 err_mfunc: 1907 if (mlx4_is_slave(dev)) 1908 mlx4_multi_func_cleanup(dev); 1909 1910 err_cmd: 1911 mlx4_cmd_cleanup(dev); 1912 1913 err_sriov: 1914 if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) 1915 pci_disable_sriov(pdev); 1916 1917 err_rel_own: 1918 if (!mlx4_is_slave(dev)) 1919 mlx4_free_ownership(dev); 1920 1921 err_free_dev: 1922 kfree(priv); 1923 1924 err_release_regions: 1925 pci_release_regions(pdev); 1926 1927 err_disable_pdev: 1928 pci_disable_device(pdev); 1929 pci_set_drvdata(pdev, NULL); 1930 return err; 1931 } 1932 1933 static int __devinit mlx4_init_one(struct pci_dev *pdev, 1934 const struct pci_device_id *id) 1935 { 1936 printk_once(KERN_INFO "%s", mlx4_version); 1937 1938 return __mlx4_init_one(pdev, id); 1939 } 1940 1941 static void mlx4_remove_one(struct pci_dev *pdev) 1942 { 1943 struct mlx4_dev *dev = pci_get_drvdata(pdev); 1944 struct mlx4_priv *priv = mlx4_priv(dev); 1945 int p; 1946 1947 if (dev) { 1948 /* in SRIOV it is not allowed to unload the pf's 1949 * driver while there are alive vf's */ 1950 if (mlx4_is_master(dev)) { 1951 if (mlx4_how_many_lives_vf(dev)) 1952 printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n"); 1953 } 1954 mlx4_stop_sense(dev); 1955 mlx4_unregister_device(dev); 1956 1957 for (p = 1; p <= dev->caps.num_ports; p++) { 1958 mlx4_cleanup_port_info(&priv->port[p]); 1959 mlx4_CLOSE_PORT(dev, p); 1960 } 1961 1962 mlx4_cleanup_counters_table(dev); 1963 mlx4_cleanup_mcg_table(dev); 1964 mlx4_cleanup_qp_table(dev); 1965 mlx4_cleanup_srq_table(dev); 1966 mlx4_cleanup_cq_table(dev); 1967 mlx4_cmd_use_polling(dev); 1968 mlx4_cleanup_eq_table(dev); 1969 mlx4_cleanup_mr_table(dev); 1970 mlx4_cleanup_xrcd_table(dev); 1971 mlx4_cleanup_pd_table(dev); 1972 1973 if (mlx4_is_master(dev)) 1974 mlx4_free_resource_tracker(dev); 1975 1976 iounmap(priv->kar); 1977 mlx4_uar_free(dev, &priv->driver_uar); 1978 mlx4_cleanup_uar_table(dev); 1979 if (!mlx4_is_slave(dev)) 1980 mlx4_clear_steering(dev); 1981 mlx4_free_eq_table(dev); 1982 if (mlx4_is_master(dev)) 1983 mlx4_multi_func_cleanup(dev); 1984 mlx4_close_hca(dev); 1985 if (mlx4_is_slave(dev)) 1986 mlx4_multi_func_cleanup(dev); 1987 mlx4_cmd_cleanup(dev); 1988 1989 if (dev->flags & MLX4_FLAG_MSI_X) 1990 pci_disable_msix(pdev); 1991 if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) { 1992 mlx4_warn(dev, "Disabling sriov\n"); 1993 pci_disable_sriov(pdev); 1994 } 1995 1996 if (!mlx4_is_slave(dev)) 1997 mlx4_free_ownership(dev); 1998 kfree(priv); 1999 pci_release_regions(pdev); 2000 pci_disable_device(pdev); 2001 pci_set_drvdata(pdev, NULL); 2002 } 2003 } 2004 2005 int mlx4_restart_one(struct pci_dev *pdev) 2006 { 2007 mlx4_remove_one(pdev); 2008 return __mlx4_init_one(pdev, NULL); 2009 } 2010 2011 static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = { 2012 /* MT25408 "Hermon" SDR */ 2013 { PCI_VDEVICE(MELLANOX, 0x6340), 0 }, 2014 /* MT25408 "Hermon" DDR */ 2015 { PCI_VDEVICE(MELLANOX, 0x634a), 0 }, 2016 /* MT25408 "Hermon" QDR */ 2017 { PCI_VDEVICE(MELLANOX, 0x6354), 0 }, 2018 /* MT25408 "Hermon" DDR PCIe gen2 */ 2019 { PCI_VDEVICE(MELLANOX, 0x6732), 0 }, 2020 /* MT25408 "Hermon" QDR PCIe gen2 */ 2021 { PCI_VDEVICE(MELLANOX, 0x673c), 0 }, 2022 /* MT25408 "Hermon" EN 10GigE */ 2023 { PCI_VDEVICE(MELLANOX, 0x6368), 0 }, 2024 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 2025 { PCI_VDEVICE(MELLANOX, 0x6750), 0 }, 2026 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 2027 { PCI_VDEVICE(MELLANOX, 0x6372), 0 }, 2028 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 2029 { PCI_VDEVICE(MELLANOX, 0x675a), 0 }, 2030 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 2031 { PCI_VDEVICE(MELLANOX, 0x6764), 0 }, 2032 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 2033 { PCI_VDEVICE(MELLANOX, 0x6746), 0 }, 2034 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 2035 { PCI_VDEVICE(MELLANOX, 0x676e), 0 }, 2036 /* MT25400 Family [ConnectX-2 Virtual Function] */ 2037 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF }, 2038 /* MT27500 Family [ConnectX-3] */ 2039 { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, 2040 /* MT27500 Family [ConnectX-3 Virtual Function] */ 2041 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF }, 2042 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ 2043 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ 2044 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ 2045 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ 2046 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ 2047 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ 2048 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ 2049 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ 2050 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ 2051 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ 2052 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ 2053 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ 2054 { 0, } 2055 }; 2056 2057 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 2058 2059 static struct pci_driver mlx4_driver = { 2060 .name = DRV_NAME, 2061 .id_table = mlx4_pci_table, 2062 .probe = mlx4_init_one, 2063 .remove = __devexit_p(mlx4_remove_one) 2064 }; 2065 2066 static int __init mlx4_verify_params(void) 2067 { 2068 if ((log_num_mac < 0) || (log_num_mac > 7)) { 2069 pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac); 2070 return -1; 2071 } 2072 2073 if (log_num_vlan != 0) 2074 pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 2075 MLX4_LOG_NUM_VLANS); 2076 2077 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 2078 pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); 2079 return -1; 2080 } 2081 2082 /* Check if module param for ports type has legal combination */ 2083 if (port_type_array[0] == false && port_type_array[1] == true) { 2084 printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 2085 port_type_array[0] = true; 2086 } 2087 2088 return 0; 2089 } 2090 2091 static int __init mlx4_init(void) 2092 { 2093 int ret; 2094 2095 if (mlx4_verify_params()) 2096 return -EINVAL; 2097 2098 mlx4_catas_init(); 2099 2100 mlx4_wq = create_singlethread_workqueue("mlx4"); 2101 if (!mlx4_wq) 2102 return -ENOMEM; 2103 2104 ret = pci_register_driver(&mlx4_driver); 2105 return ret < 0 ? ret : 0; 2106 } 2107 2108 static void __exit mlx4_cleanup(void) 2109 { 2110 pci_unregister_driver(&mlx4_driver); 2111 destroy_workqueue(mlx4_wq); 2112 } 2113 2114 module_init(mlx4_init); 2115 module_exit(mlx4_cleanup); 2116