1 /* 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 4 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved. 5 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. 6 * 7 * This software is available to you under a choice of one of two 8 * licenses. You may choose to be licensed under the terms of the GNU 9 * General Public License (GPL) Version 2, available from the file 10 * COPYING in the main directory of this source tree, or the 11 * OpenIB.org BSD license below: 12 * 13 * Redistribution and use in source and binary forms, with or 14 * without modification, are permitted provided that the following 15 * conditions are met: 16 * 17 * - Redistributions of source code must retain the above 18 * copyright notice, this list of conditions and the following 19 * disclaimer. 20 * 21 * - Redistributions in binary form must reproduce the above 22 * copyright notice, this list of conditions and the following 23 * disclaimer in the documentation and/or other materials 24 * provided with the distribution. 25 * 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * SOFTWARE. 34 */ 35 36 #include <linux/module.h> 37 #include <linux/init.h> 38 #include <linux/errno.h> 39 #include <linux/pci.h> 40 #include <linux/dma-mapping.h> 41 #include <linux/slab.h> 42 #include <linux/io-mapping.h> 43 #include <linux/delay.h> 44 #include <linux/kmod.h> 45 46 #include <linux/mlx4/device.h> 47 #include <linux/mlx4/doorbell.h> 48 49 #include "mlx4.h" 50 #include "fw.h" 51 #include "icm.h" 52 53 MODULE_AUTHOR("Roland Dreier"); 54 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver"); 55 MODULE_LICENSE("Dual BSD/GPL"); 56 MODULE_VERSION(DRV_VERSION); 57 58 struct workqueue_struct *mlx4_wq; 59 60 #ifdef CONFIG_MLX4_DEBUG 61 62 int mlx4_debug_level = 0; 63 module_param_named(debug_level, mlx4_debug_level, int, 0644); 64 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); 65 66 #endif /* CONFIG_MLX4_DEBUG */ 67 68 #ifdef CONFIG_PCI_MSI 69 70 static int msi_x = 1; 71 module_param(msi_x, int, 0444); 72 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero"); 73 74 #else /* CONFIG_PCI_MSI */ 75 76 #define msi_x (0) 77 78 #endif /* CONFIG_PCI_MSI */ 79 80 static uint8_t num_vfs[3] = {0, 0, 0}; 81 static int num_vfs_argc; 82 module_param_array(num_vfs, byte , &num_vfs_argc, 0444); 83 MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" 84 "num_vfs=port1,port2,port1+2"); 85 86 static uint8_t probe_vf[3] = {0, 0, 0}; 87 static int probe_vfs_argc; 88 module_param_array(probe_vf, byte, &probe_vfs_argc, 0444); 89 MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)\n" 90 "probe_vf=port1,port2,port1+2"); 91 92 int mlx4_log_num_mgm_entry_size = MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 93 module_param_named(log_num_mgm_entry_size, 94 mlx4_log_num_mgm_entry_size, int, 0444); 95 MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" 96 " of qp per mcg, for example:" 97 " 10 gives 248.range: 7 <=" 98 " log_num_mgm_entry_size <= 12." 99 " To activate device managed" 100 " flow steering when available, set to -1"); 101 102 static bool enable_64b_cqe_eqe = true; 103 module_param(enable_64b_cqe_eqe, bool, 0444); 104 MODULE_PARM_DESC(enable_64b_cqe_eqe, 105 "Enable 64 byte CQEs/EQEs when the FW supports this (default: True)"); 106 107 #define PF_CONTEXT_BEHAVIOUR_MASK (MLX4_FUNC_CAP_64B_EQE_CQE | \ 108 MLX4_FUNC_CAP_EQE_CQE_STRIDE | \ 109 MLX4_FUNC_CAP_DMFS_A0_STATIC) 110 111 #define RESET_PERSIST_MASK_FLAGS (MLX4_FLAG_SRIOV) 112 113 static char mlx4_version[] = 114 DRV_NAME ": Mellanox ConnectX core driver v" 115 DRV_VERSION " (" DRV_RELDATE ")\n"; 116 117 static struct mlx4_profile default_profile = { 118 .num_qp = 1 << 18, 119 .num_srq = 1 << 16, 120 .rdmarc_per_qp = 1 << 4, 121 .num_cq = 1 << 16, 122 .num_mcg = 1 << 13, 123 .num_mpt = 1 << 19, 124 .num_mtt = 1 << 20, /* It is really num mtt segements */ 125 }; 126 127 static struct mlx4_profile low_mem_profile = { 128 .num_qp = 1 << 17, 129 .num_srq = 1 << 6, 130 .rdmarc_per_qp = 1 << 4, 131 .num_cq = 1 << 8, 132 .num_mcg = 1 << 8, 133 .num_mpt = 1 << 9, 134 .num_mtt = 1 << 7, 135 }; 136 137 static int log_num_mac = 7; 138 module_param_named(log_num_mac, log_num_mac, int, 0444); 139 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)"); 140 141 static int log_num_vlan; 142 module_param_named(log_num_vlan, log_num_vlan, int, 0444); 143 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)"); 144 /* Log2 max number of VLANs per ETH port (0-7) */ 145 #define MLX4_LOG_NUM_VLANS 7 146 #define MLX4_MIN_LOG_NUM_VLANS 0 147 #define MLX4_MIN_LOG_NUM_MAC 1 148 149 static bool use_prio; 150 module_param_named(use_prio, use_prio, bool, 0444); 151 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)"); 152 153 int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); 154 module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); 155 MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)"); 156 157 static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE}; 158 static int arr_argc = 2; 159 module_param_array(port_type_array, int, &arr_argc, 0444); 160 MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " 161 "1 for IB, 2 for Ethernet"); 162 163 struct mlx4_port_config { 164 struct list_head list; 165 enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; 166 struct pci_dev *pdev; 167 }; 168 169 static atomic_t pf_loading = ATOMIC_INIT(0); 170 171 static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev, 172 struct mlx4_dev_cap *dev_cap) 173 { 174 /* The reserved_uars is calculated by system page size unit. 175 * Therefore, adjustment is added when the uar page size is less 176 * than the system page size 177 */ 178 dev->caps.reserved_uars = 179 max_t(int, 180 mlx4_get_num_reserved_uar(dev), 181 dev_cap->reserved_uars / 182 (1 << (PAGE_SHIFT - dev->uar_page_shift))); 183 } 184 185 int mlx4_check_port_params(struct mlx4_dev *dev, 186 enum mlx4_port_type *port_type) 187 { 188 int i; 189 190 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) { 191 for (i = 0; i < dev->caps.num_ports - 1; i++) { 192 if (port_type[i] != port_type[i + 1]) { 193 mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); 194 return -EINVAL; 195 } 196 } 197 } 198 199 for (i = 0; i < dev->caps.num_ports; i++) { 200 if (!(port_type[i] & dev->caps.supported_type[i+1])) { 201 mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", 202 i + 1); 203 return -EINVAL; 204 } 205 } 206 return 0; 207 } 208 209 static void mlx4_set_port_mask(struct mlx4_dev *dev) 210 { 211 int i; 212 213 for (i = 1; i <= dev->caps.num_ports; ++i) 214 dev->caps.port_mask[i] = dev->caps.port_type[i]; 215 } 216 217 enum { 218 MLX4_QUERY_FUNC_NUM_SYS_EQS = 1 << 0, 219 }; 220 221 static int mlx4_query_func(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 222 { 223 int err = 0; 224 struct mlx4_func func; 225 226 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 227 err = mlx4_QUERY_FUNC(dev, &func, 0); 228 if (err) { 229 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 230 return err; 231 } 232 dev_cap->max_eqs = func.max_eq; 233 dev_cap->reserved_eqs = func.rsvd_eqs; 234 dev_cap->reserved_uars = func.rsvd_uars; 235 err |= MLX4_QUERY_FUNC_NUM_SYS_EQS; 236 } 237 return err; 238 } 239 240 static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) 241 { 242 struct mlx4_caps *dev_cap = &dev->caps; 243 244 /* FW not supporting or cancelled by user */ 245 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_EQE_STRIDE) || 246 !(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_CQE_STRIDE)) 247 return; 248 249 /* Must have 64B CQE_EQE enabled by FW to use bigger stride 250 * When FW has NCSI it may decide not to report 64B CQE/EQEs 251 */ 252 if (!(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_EQE) || 253 !(dev_cap->flags & MLX4_DEV_CAP_FLAG_64B_CQE)) { 254 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 255 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 256 return; 257 } 258 259 if (cache_line_size() == 128 || cache_line_size() == 256) { 260 mlx4_dbg(dev, "Enabling CQE stride cacheLine supported\n"); 261 /* Changing the real data inside CQE size to 32B */ 262 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 263 dev_cap->flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 264 265 if (mlx4_is_master(dev)) 266 dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; 267 } else { 268 if (cache_line_size() != 32 && cache_line_size() != 64) 269 mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n"); 270 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 271 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 272 } 273 } 274 275 static int _mlx4_dev_port(struct mlx4_dev *dev, int port, 276 struct mlx4_port_cap *port_cap) 277 { 278 dev->caps.vl_cap[port] = port_cap->max_vl; 279 dev->caps.ib_mtu_cap[port] = port_cap->ib_mtu; 280 dev->phys_caps.gid_phys_table_len[port] = port_cap->max_gids; 281 dev->phys_caps.pkey_phys_table_len[port] = port_cap->max_pkeys; 282 /* set gid and pkey table operating lengths by default 283 * to non-sriov values 284 */ 285 dev->caps.gid_table_len[port] = port_cap->max_gids; 286 dev->caps.pkey_table_len[port] = port_cap->max_pkeys; 287 dev->caps.port_width_cap[port] = port_cap->max_port_width; 288 dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu; 289 dev->caps.def_mac[port] = port_cap->def_mac; 290 dev->caps.supported_type[port] = port_cap->supported_port_types; 291 dev->caps.suggested_type[port] = port_cap->suggested_type; 292 dev->caps.default_sense[port] = port_cap->default_sense; 293 dev->caps.trans_type[port] = port_cap->trans_type; 294 dev->caps.vendor_oui[port] = port_cap->vendor_oui; 295 dev->caps.wavelength[port] = port_cap->wavelength; 296 dev->caps.trans_code[port] = port_cap->trans_code; 297 298 return 0; 299 } 300 301 static int mlx4_dev_port(struct mlx4_dev *dev, int port, 302 struct mlx4_port_cap *port_cap) 303 { 304 int err = 0; 305 306 err = mlx4_QUERY_PORT(dev, port, port_cap); 307 308 if (err) 309 mlx4_err(dev, "QUERY_PORT command failed.\n"); 310 311 return err; 312 } 313 314 static inline void mlx4_enable_ignore_fcs(struct mlx4_dev *dev) 315 { 316 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_IGNORE_FCS)) 317 return; 318 319 if (mlx4_is_mfunc(dev)) { 320 mlx4_dbg(dev, "SRIOV mode - Disabling Ignore FCS"); 321 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 322 return; 323 } 324 325 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP)) { 326 mlx4_dbg(dev, 327 "Keep FCS is not supported - Disabling Ignore FCS"); 328 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_IGNORE_FCS; 329 return; 330 } 331 } 332 333 #define MLX4_A0_STEERING_TABLE_SIZE 256 334 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 335 { 336 int err; 337 int i; 338 339 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 340 if (err) { 341 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 342 return err; 343 } 344 mlx4_dev_cap_dump(dev, dev_cap); 345 346 if (dev_cap->min_page_sz > PAGE_SIZE) { 347 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 348 dev_cap->min_page_sz, PAGE_SIZE); 349 return -ENODEV; 350 } 351 if (dev_cap->num_ports > MLX4_MAX_PORTS) { 352 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 353 dev_cap->num_ports, MLX4_MAX_PORTS); 354 return -ENODEV; 355 } 356 357 if (dev_cap->uar_size > pci_resource_len(dev->persist->pdev, 2)) { 358 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 359 dev_cap->uar_size, 360 (unsigned long long) 361 pci_resource_len(dev->persist->pdev, 2)); 362 return -ENODEV; 363 } 364 365 dev->caps.num_ports = dev_cap->num_ports; 366 dev->caps.num_sys_eqs = dev_cap->num_sys_eqs; 367 dev->phys_caps.num_phys_eqs = dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS ? 368 dev->caps.num_sys_eqs : 369 MLX4_MAX_EQ_NUM; 370 for (i = 1; i <= dev->caps.num_ports; ++i) { 371 err = _mlx4_dev_port(dev, i, dev_cap->port_cap + i); 372 if (err) { 373 mlx4_err(dev, "QUERY_PORT command failed, aborting\n"); 374 return err; 375 } 376 } 377 378 dev->caps.uar_page_size = PAGE_SIZE; 379 dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE; 380 dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay; 381 dev->caps.bf_reg_size = dev_cap->bf_reg_size; 382 dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page; 383 dev->caps.max_sq_sg = dev_cap->max_sq_sg; 384 dev->caps.max_rq_sg = dev_cap->max_rq_sg; 385 dev->caps.max_wqes = dev_cap->max_qp_sz; 386 dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp; 387 dev->caps.max_srq_wqes = dev_cap->max_srq_sz; 388 dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1; 389 dev->caps.reserved_srqs = dev_cap->reserved_srqs; 390 dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; 391 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 392 /* 393 * Subtract 1 from the limit because we need to allocate a 394 * spare CQE so the HCA HW can tell the difference between an 395 * empty CQ and a full CQ. 396 */ 397 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 398 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 399 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 400 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 401 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 402 403 dev->caps.reserved_pds = dev_cap->reserved_pds; 404 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 405 dev_cap->reserved_xrcds : 0; 406 dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 407 dev_cap->max_xrcds : 0; 408 dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz; 409 410 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 411 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 412 dev->caps.flags = dev_cap->flags; 413 dev->caps.flags2 = dev_cap->flags2; 414 dev->caps.bmme_flags = dev_cap->bmme_flags; 415 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 416 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 417 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 418 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 419 420 /* Save uar page shift */ 421 if (!mlx4_is_slave(dev)) { 422 /* Virtual PCI function needs to determine UAR page size from 423 * firmware. Only master PCI function can set the uar page size 424 */ 425 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT; 426 mlx4_set_num_reserved_uars(dev, dev_cap); 427 } 428 429 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { 430 struct mlx4_init_hca_param hca_param; 431 432 memset(&hca_param, 0, sizeof(hca_param)); 433 err = mlx4_QUERY_HCA(dev, &hca_param); 434 /* Turn off PHV_EN flag in case phv_check_en is set. 435 * phv_check_en is a HW check that parse the packet and verify 436 * phv bit was reported correctly in the wqe. To allow QinQ 437 * PHV_EN flag should be set and phv_check_en must be cleared 438 * otherwise QinQ packets will be drop by the HW. 439 */ 440 if (err || hca_param.phv_check_en) 441 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_PHV_EN; 442 } 443 444 /* Sense port always allowed on supported devices for ConnectX-1 and -2 */ 445 if (mlx4_priv(dev)->pci_dev_data & MLX4_PCI_DEV_FORCE_SENSE_PORT) 446 dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 447 /* Don't do sense port on multifunction devices (for now at least) */ 448 if (mlx4_is_mfunc(dev)) 449 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; 450 451 if (mlx4_low_memory_profile()) { 452 dev->caps.log_num_macs = MLX4_MIN_LOG_NUM_MAC; 453 dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS; 454 } else { 455 dev->caps.log_num_macs = log_num_mac; 456 dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS; 457 } 458 459 for (i = 1; i <= dev->caps.num_ports; ++i) { 460 dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE; 461 if (dev->caps.supported_type[i]) { 462 /* if only ETH is supported - assign ETH */ 463 if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH) 464 dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH; 465 /* if only IB is supported, assign IB */ 466 else if (dev->caps.supported_type[i] == 467 MLX4_PORT_TYPE_IB) 468 dev->caps.port_type[i] = MLX4_PORT_TYPE_IB; 469 else { 470 /* if IB and ETH are supported, we set the port 471 * type according to user selection of port type; 472 * if user selected none, take the FW hint */ 473 if (port_type_array[i - 1] == MLX4_PORT_TYPE_NONE) 474 dev->caps.port_type[i] = dev->caps.suggested_type[i] ? 475 MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB; 476 else 477 dev->caps.port_type[i] = port_type_array[i - 1]; 478 } 479 } 480 /* 481 * Link sensing is allowed on the port if 3 conditions are true: 482 * 1. Both protocols are supported on the port. 483 * 2. Different types are supported on the port 484 * 3. FW declared that it supports link sensing 485 */ 486 mlx4_priv(dev)->sense.sense_allowed[i] = 487 ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) && 488 (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 489 (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)); 490 491 /* 492 * If "default_sense" bit is set, we move the port to "AUTO" mode 493 * and perform sense_port FW command to try and set the correct 494 * port type from beginning 495 */ 496 if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) { 497 enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE; 498 dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO; 499 mlx4_SENSE_PORT(dev, i, &sensed_port); 500 if (sensed_port != MLX4_PORT_TYPE_NONE) 501 dev->caps.port_type[i] = sensed_port; 502 } else { 503 dev->caps.possible_type[i] = dev->caps.port_type[i]; 504 } 505 506 if (dev->caps.log_num_macs > dev_cap->port_cap[i].log_max_macs) { 507 dev->caps.log_num_macs = dev_cap->port_cap[i].log_max_macs; 508 mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n", 509 i, 1 << dev->caps.log_num_macs); 510 } 511 if (dev->caps.log_num_vlans > dev_cap->port_cap[i].log_max_vlans) { 512 dev->caps.log_num_vlans = dev_cap->port_cap[i].log_max_vlans; 513 mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n", 514 i, 1 << dev->caps.log_num_vlans); 515 } 516 } 517 518 if (mlx4_is_master(dev) && (dev->caps.num_ports == 2) && 519 (port_type_array[0] == MLX4_PORT_TYPE_IB) && 520 (port_type_array[1] == MLX4_PORT_TYPE_ETH)) { 521 mlx4_warn(dev, 522 "Granular QoS per VF not supported with IB/Eth configuration\n"); 523 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_QOS_VPP; 524 } 525 526 dev->caps.max_counters = dev_cap->max_counters; 527 528 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 529 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 530 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 531 (1 << dev->caps.log_num_macs) * 532 (1 << dev->caps.log_num_vlans) * 533 dev->caps.num_ports; 534 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH; 535 536 if (dev_cap->dmfs_high_rate_qpn_base > 0 && 537 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) 538 dev->caps.dmfs_high_rate_qpn_base = dev_cap->dmfs_high_rate_qpn_base; 539 else 540 dev->caps.dmfs_high_rate_qpn_base = 541 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 542 543 if (dev_cap->dmfs_high_rate_qpn_range > 0 && 544 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { 545 dev->caps.dmfs_high_rate_qpn_range = dev_cap->dmfs_high_rate_qpn_range; 546 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DEFAULT; 547 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_FS_A0; 548 } else { 549 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_NOT_SUPPORTED; 550 dev->caps.dmfs_high_rate_qpn_base = 551 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 552 dev->caps.dmfs_high_rate_qpn_range = MLX4_A0_STEERING_TABLE_SIZE; 553 } 554 555 dev->caps.rl_caps = dev_cap->rl_caps; 556 557 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_RSS_RAW_ETH] = 558 dev->caps.dmfs_high_rate_qpn_range; 559 560 dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] + 561 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] + 562 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] + 563 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH]; 564 565 dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0; 566 567 if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) { 568 if (dev_cap->flags & 569 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) { 570 mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n"); 571 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_CQE; 572 dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_64B_EQE; 573 } 574 575 if (dev_cap->flags2 & 576 (MLX4_DEV_CAP_FLAG2_CQE_STRIDE | 577 MLX4_DEV_CAP_FLAG2_EQE_STRIDE)) { 578 mlx4_warn(dev, "Disabling EQE/CQE stride per user request\n"); 579 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; 580 dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; 581 } 582 } 583 584 if ((dev->caps.flags & 585 (MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) && 586 mlx4_is_master(dev)) 587 dev->caps.function_caps |= MLX4_FUNC_CAP_64B_EQE_CQE; 588 589 if (!mlx4_is_slave(dev)) { 590 mlx4_enable_cqe_eqe_stride(dev); 591 dev->caps.alloc_res_qp_mask = 592 (dev->caps.bf_reg_size ? MLX4_RESERVE_ETH_BF_QP : 0) | 593 MLX4_RESERVE_A0_QP; 594 595 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) && 596 dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) { 597 mlx4_warn(dev, "Old device ETS support detected\n"); 598 mlx4_warn(dev, "Consider upgrading device FW.\n"); 599 dev->caps.flags2 |= MLX4_DEV_CAP_FLAG2_ETS_CFG; 600 } 601 602 } else { 603 dev->caps.alloc_res_qp_mask = 0; 604 } 605 606 mlx4_enable_ignore_fcs(dev); 607 608 return 0; 609 } 610 611 static int mlx4_get_pcie_dev_link_caps(struct mlx4_dev *dev, 612 enum pci_bus_speed *speed, 613 enum pcie_link_width *width) 614 { 615 u32 lnkcap1, lnkcap2; 616 int err1, err2; 617 618 #define PCIE_MLW_CAP_SHIFT 4 /* start of MLW mask in link capabilities */ 619 620 *speed = PCI_SPEED_UNKNOWN; 621 *width = PCIE_LNK_WIDTH_UNKNOWN; 622 623 err1 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP, 624 &lnkcap1); 625 err2 = pcie_capability_read_dword(dev->persist->pdev, PCI_EXP_LNKCAP2, 626 &lnkcap2); 627 if (!err2 && lnkcap2) { /* PCIe r3.0-compliant */ 628 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) 629 *speed = PCIE_SPEED_8_0GT; 630 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) 631 *speed = PCIE_SPEED_5_0GT; 632 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) 633 *speed = PCIE_SPEED_2_5GT; 634 } 635 if (!err1) { 636 *width = (lnkcap1 & PCI_EXP_LNKCAP_MLW) >> PCIE_MLW_CAP_SHIFT; 637 if (!lnkcap2) { /* pre-r3.0 */ 638 if (lnkcap1 & PCI_EXP_LNKCAP_SLS_5_0GB) 639 *speed = PCIE_SPEED_5_0GT; 640 else if (lnkcap1 & PCI_EXP_LNKCAP_SLS_2_5GB) 641 *speed = PCIE_SPEED_2_5GT; 642 } 643 } 644 645 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) { 646 return err1 ? err1 : 647 err2 ? err2 : -EINVAL; 648 } 649 return 0; 650 } 651 652 static void mlx4_check_pcie_caps(struct mlx4_dev *dev) 653 { 654 enum pcie_link_width width, width_cap; 655 enum pci_bus_speed speed, speed_cap; 656 int err; 657 658 #define PCIE_SPEED_STR(speed) \ 659 (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" : \ 660 speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : \ 661 speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : \ 662 "Unknown") 663 664 err = mlx4_get_pcie_dev_link_caps(dev, &speed_cap, &width_cap); 665 if (err) { 666 mlx4_warn(dev, 667 "Unable to determine PCIe device BW capabilities\n"); 668 return; 669 } 670 671 err = pcie_get_minimum_link(dev->persist->pdev, &speed, &width); 672 if (err || speed == PCI_SPEED_UNKNOWN || 673 width == PCIE_LNK_WIDTH_UNKNOWN) { 674 mlx4_warn(dev, 675 "Unable to determine PCI device chain minimum BW\n"); 676 return; 677 } 678 679 if (width != width_cap || speed != speed_cap) 680 mlx4_warn(dev, 681 "PCIe BW is different than device's capability\n"); 682 683 mlx4_info(dev, "PCIe link speed is %s, device supports %s\n", 684 PCIE_SPEED_STR(speed), PCIE_SPEED_STR(speed_cap)); 685 mlx4_info(dev, "PCIe link width is x%d, device supports x%d\n", 686 width, width_cap); 687 return; 688 } 689 690 /*The function checks if there are live vf, return the num of them*/ 691 static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) 692 { 693 struct mlx4_priv *priv = mlx4_priv(dev); 694 struct mlx4_slave_state *s_state; 695 int i; 696 int ret = 0; 697 698 for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) { 699 s_state = &priv->mfunc.master.slave_state[i]; 700 if (s_state->active && s_state->last_cmd != 701 MLX4_COMM_CMD_RESET) { 702 mlx4_warn(dev, "%s: slave: %d is still active\n", 703 __func__, i); 704 ret++; 705 } 706 } 707 return ret; 708 } 709 710 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) 711 { 712 u32 qk = MLX4_RESERVED_QKEY_BASE; 713 714 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || 715 qpn < dev->phys_caps.base_proxy_sqpn) 716 return -EINVAL; 717 718 if (qpn >= dev->phys_caps.base_tunnel_sqpn) 719 /* tunnel qp */ 720 qk += qpn - dev->phys_caps.base_tunnel_sqpn; 721 else 722 qk += qpn - dev->phys_caps.base_proxy_sqpn; 723 *qkey = qk; 724 return 0; 725 } 726 EXPORT_SYMBOL(mlx4_get_parav_qkey); 727 728 void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, int i, int val) 729 { 730 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 731 732 if (!mlx4_is_master(dev)) 733 return; 734 735 priv->virt2phys_pkey[slave][port - 1][i] = val; 736 } 737 EXPORT_SYMBOL(mlx4_sync_pkey_table); 738 739 void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid) 740 { 741 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 742 743 if (!mlx4_is_master(dev)) 744 return; 745 746 priv->slave_node_guids[slave] = guid; 747 } 748 EXPORT_SYMBOL(mlx4_put_slave_node_guid); 749 750 __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave) 751 { 752 struct mlx4_priv *priv = container_of(dev, struct mlx4_priv, dev); 753 754 if (!mlx4_is_master(dev)) 755 return 0; 756 757 return priv->slave_node_guids[slave]; 758 } 759 EXPORT_SYMBOL(mlx4_get_slave_node_guid); 760 761 int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) 762 { 763 struct mlx4_priv *priv = mlx4_priv(dev); 764 struct mlx4_slave_state *s_slave; 765 766 if (!mlx4_is_master(dev)) 767 return 0; 768 769 s_slave = &priv->mfunc.master.slave_state[slave]; 770 return !!s_slave->active; 771 } 772 EXPORT_SYMBOL(mlx4_is_slave_active); 773 774 static void slave_adjust_steering_mode(struct mlx4_dev *dev, 775 struct mlx4_dev_cap *dev_cap, 776 struct mlx4_init_hca_param *hca_param) 777 { 778 dev->caps.steering_mode = hca_param->steering_mode; 779 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { 780 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 781 dev->caps.fs_log_max_ucast_qp_range_size = 782 dev_cap->fs_log_max_ucast_qp_range_size; 783 } else 784 dev->caps.num_qp_per_mgm = 785 4 * ((1 << hca_param->log_mc_entry_sz)/16 - 2); 786 787 mlx4_dbg(dev, "Steering mode is: %s\n", 788 mlx4_steering_mode_str(dev->caps.steering_mode)); 789 } 790 791 static int mlx4_slave_cap(struct mlx4_dev *dev) 792 { 793 int err; 794 u32 page_size; 795 struct mlx4_dev_cap dev_cap; 796 struct mlx4_func_cap func_cap; 797 struct mlx4_init_hca_param hca_param; 798 u8 i; 799 800 memset(&hca_param, 0, sizeof(hca_param)); 801 err = mlx4_QUERY_HCA(dev, &hca_param); 802 if (err) { 803 mlx4_err(dev, "QUERY_HCA command failed, aborting\n"); 804 return err; 805 } 806 807 /* fail if the hca has an unknown global capability 808 * at this time global_caps should be always zeroed 809 */ 810 if (hca_param.global_caps) { 811 mlx4_err(dev, "Unknown hca global capabilities\n"); 812 return -ENOSYS; 813 } 814 815 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 816 817 dev->caps.hca_core_clock = hca_param.hca_core_clock; 818 819 memset(&dev_cap, 0, sizeof(dev_cap)); 820 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp; 821 err = mlx4_dev_cap(dev, &dev_cap); 822 if (err) { 823 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 824 return err; 825 } 826 827 err = mlx4_QUERY_FW(dev); 828 if (err) 829 mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n"); 830 831 page_size = ~dev->caps.page_size_cap + 1; 832 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 833 if (page_size > PAGE_SIZE) { 834 mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n", 835 page_size, PAGE_SIZE); 836 return -ENODEV; 837 } 838 839 /* Set uar_page_shift for VF */ 840 dev->uar_page_shift = hca_param.uar_page_sz + 12; 841 842 /* Make sure the master uar page size is valid */ 843 if (dev->uar_page_shift > PAGE_SHIFT) { 844 mlx4_err(dev, 845 "Invalid configuration: uar page size is larger than system page size\n"); 846 return -ENODEV; 847 } 848 849 /* Set reserved_uars based on the uar_page_shift */ 850 mlx4_set_num_reserved_uars(dev, &dev_cap); 851 852 /* Although uar page size in FW differs from system page size, 853 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core) 854 * still works with assumption that uar page size == system page size 855 */ 856 dev->caps.uar_page_size = PAGE_SIZE; 857 858 memset(&func_cap, 0, sizeof(func_cap)); 859 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 860 if (err) { 861 mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n", 862 err); 863 return err; 864 } 865 866 if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) != 867 PF_CONTEXT_BEHAVIOUR_MASK) { 868 mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n", 869 func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK); 870 return -ENOSYS; 871 } 872 873 dev->caps.num_ports = func_cap.num_ports; 874 dev->quotas.qp = func_cap.qp_quota; 875 dev->quotas.srq = func_cap.srq_quota; 876 dev->quotas.cq = func_cap.cq_quota; 877 dev->quotas.mpt = func_cap.mpt_quota; 878 dev->quotas.mtt = func_cap.mtt_quota; 879 dev->caps.num_qps = 1 << hca_param.log_num_qps; 880 dev->caps.num_srqs = 1 << hca_param.log_num_srqs; 881 dev->caps.num_cqs = 1 << hca_param.log_num_cqs; 882 dev->caps.num_mpts = 1 << hca_param.log_mpt_sz; 883 dev->caps.num_eqs = func_cap.max_eq; 884 dev->caps.reserved_eqs = func_cap.reserved_eq; 885 dev->caps.reserved_lkey = func_cap.reserved_lkey; 886 dev->caps.num_pds = MLX4_NUM_PDS; 887 dev->caps.num_mgms = 0; 888 dev->caps.num_amgms = 0; 889 890 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 891 mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n", 892 dev->caps.num_ports, MLX4_MAX_PORTS); 893 return -ENODEV; 894 } 895 896 mlx4_replace_zero_macs(dev); 897 898 dev->caps.qp0_qkey = kcalloc(dev->caps.num_ports, sizeof(u32), GFP_KERNEL); 899 dev->caps.qp0_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 900 dev->caps.qp0_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 901 dev->caps.qp1_tunnel = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 902 dev->caps.qp1_proxy = kcalloc(dev->caps.num_ports, sizeof (u32), GFP_KERNEL); 903 904 if (!dev->caps.qp0_tunnel || !dev->caps.qp0_proxy || 905 !dev->caps.qp1_tunnel || !dev->caps.qp1_proxy || 906 !dev->caps.qp0_qkey) { 907 err = -ENOMEM; 908 goto err_mem; 909 } 910 911 for (i = 1; i <= dev->caps.num_ports; ++i) { 912 err = mlx4_QUERY_FUNC_CAP(dev, i, &func_cap); 913 if (err) { 914 mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n", 915 i, err); 916 goto err_mem; 917 } 918 dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey; 919 dev->caps.qp0_tunnel[i - 1] = func_cap.qp0_tunnel_qpn; 920 dev->caps.qp0_proxy[i - 1] = func_cap.qp0_proxy_qpn; 921 dev->caps.qp1_tunnel[i - 1] = func_cap.qp1_tunnel_qpn; 922 dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn; 923 dev->caps.port_mask[i] = dev->caps.port_type[i]; 924 dev->caps.phys_port_id[i] = func_cap.phys_port_id; 925 err = mlx4_get_slave_pkey_gid_tbl_len(dev, i, 926 &dev->caps.gid_table_len[i], 927 &dev->caps.pkey_table_len[i]); 928 if (err) 929 goto err_mem; 930 } 931 932 if (dev->caps.uar_page_size * (dev->caps.num_uars - 933 dev->caps.reserved_uars) > 934 pci_resource_len(dev->persist->pdev, 935 2)) { 936 mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n", 937 dev->caps.uar_page_size * dev->caps.num_uars, 938 (unsigned long long) 939 pci_resource_len(dev->persist->pdev, 2)); 940 err = -ENOMEM; 941 goto err_mem; 942 } 943 944 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_EQE_ENABLED) { 945 dev->caps.eqe_size = 64; 946 dev->caps.eqe_factor = 1; 947 } else { 948 dev->caps.eqe_size = 32; 949 dev->caps.eqe_factor = 0; 950 } 951 952 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_64B_CQE_ENABLED) { 953 dev->caps.cqe_size = 64; 954 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 955 } else { 956 dev->caps.cqe_size = 32; 957 } 958 959 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_EQE_STRIDE_ENABLED) { 960 dev->caps.eqe_size = hca_param.eqe_size; 961 dev->caps.eqe_factor = 0; 962 } 963 964 if (hca_param.dev_cap_enabled & MLX4_DEV_CAP_CQE_STRIDE_ENABLED) { 965 dev->caps.cqe_size = hca_param.cqe_size; 966 /* User still need to know when CQE > 32B */ 967 dev->caps.userspace_caps |= MLX4_USER_DEV_CAP_LARGE_CQE; 968 } 969 970 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 971 mlx4_warn(dev, "Timestamping is not supported in slave mode\n"); 972 973 slave_adjust_steering_mode(dev, &dev_cap, &hca_param); 974 mlx4_dbg(dev, "RSS support for IP fragments is %s\n", 975 hca_param.rss_ip_frags ? "on" : "off"); 976 977 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_BF_RES_QP && 978 dev->caps.bf_reg_size) 979 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_ETH_BF_QP; 980 981 if (func_cap.extra_flags & MLX4_QUERY_FUNC_FLAGS_A0_RES_QP) 982 dev->caps.alloc_res_qp_mask |= MLX4_RESERVE_A0_QP; 983 984 return 0; 985 986 err_mem: 987 kfree(dev->caps.qp0_qkey); 988 kfree(dev->caps.qp0_tunnel); 989 kfree(dev->caps.qp0_proxy); 990 kfree(dev->caps.qp1_tunnel); 991 kfree(dev->caps.qp1_proxy); 992 dev->caps.qp0_qkey = NULL; 993 dev->caps.qp0_tunnel = NULL; 994 dev->caps.qp0_proxy = NULL; 995 dev->caps.qp1_tunnel = NULL; 996 dev->caps.qp1_proxy = NULL; 997 998 return err; 999 } 1000 1001 static void mlx4_request_modules(struct mlx4_dev *dev) 1002 { 1003 int port; 1004 int has_ib_port = false; 1005 int has_eth_port = false; 1006 #define EN_DRV_NAME "mlx4_en" 1007 #define IB_DRV_NAME "mlx4_ib" 1008 1009 for (port = 1; port <= dev->caps.num_ports; port++) { 1010 if (dev->caps.port_type[port] == MLX4_PORT_TYPE_IB) 1011 has_ib_port = true; 1012 else if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) 1013 has_eth_port = true; 1014 } 1015 1016 if (has_eth_port) 1017 request_module_nowait(EN_DRV_NAME); 1018 if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) 1019 request_module_nowait(IB_DRV_NAME); 1020 } 1021 1022 /* 1023 * Change the port configuration of the device. 1024 * Every user of this function must hold the port mutex. 1025 */ 1026 int mlx4_change_port_types(struct mlx4_dev *dev, 1027 enum mlx4_port_type *port_types) 1028 { 1029 int err = 0; 1030 int change = 0; 1031 int port; 1032 1033 for (port = 0; port < dev->caps.num_ports; port++) { 1034 /* Change the port type only if the new type is different 1035 * from the current, and not set to Auto */ 1036 if (port_types[port] != dev->caps.port_type[port + 1]) 1037 change = 1; 1038 } 1039 if (change) { 1040 mlx4_unregister_device(dev); 1041 for (port = 1; port <= dev->caps.num_ports; port++) { 1042 mlx4_CLOSE_PORT(dev, port); 1043 dev->caps.port_type[port] = port_types[port - 1]; 1044 err = mlx4_SET_PORT(dev, port, -1); 1045 if (err) { 1046 mlx4_err(dev, "Failed to set port %d, aborting\n", 1047 port); 1048 goto out; 1049 } 1050 } 1051 mlx4_set_port_mask(dev); 1052 err = mlx4_register_device(dev); 1053 if (err) { 1054 mlx4_err(dev, "Failed to register device\n"); 1055 goto out; 1056 } 1057 mlx4_request_modules(dev); 1058 } 1059 1060 out: 1061 return err; 1062 } 1063 1064 static ssize_t show_port_type(struct device *dev, 1065 struct device_attribute *attr, 1066 char *buf) 1067 { 1068 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1069 port_attr); 1070 struct mlx4_dev *mdev = info->dev; 1071 char type[8]; 1072 1073 sprintf(type, "%s", 1074 (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ? 1075 "ib" : "eth"); 1076 if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO) 1077 sprintf(buf, "auto (%s)\n", type); 1078 else 1079 sprintf(buf, "%s\n", type); 1080 1081 return strlen(buf); 1082 } 1083 1084 static ssize_t set_port_type(struct device *dev, 1085 struct device_attribute *attr, 1086 const char *buf, size_t count) 1087 { 1088 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1089 port_attr); 1090 struct mlx4_dev *mdev = info->dev; 1091 struct mlx4_priv *priv = mlx4_priv(mdev); 1092 enum mlx4_port_type types[MLX4_MAX_PORTS]; 1093 enum mlx4_port_type new_types[MLX4_MAX_PORTS]; 1094 static DEFINE_MUTEX(set_port_type_mutex); 1095 int i; 1096 int err = 0; 1097 1098 mutex_lock(&set_port_type_mutex); 1099 1100 if (!strcmp(buf, "ib\n")) 1101 info->tmp_type = MLX4_PORT_TYPE_IB; 1102 else if (!strcmp(buf, "eth\n")) 1103 info->tmp_type = MLX4_PORT_TYPE_ETH; 1104 else if (!strcmp(buf, "auto\n")) 1105 info->tmp_type = MLX4_PORT_TYPE_AUTO; 1106 else { 1107 mlx4_err(mdev, "%s is not supported port type\n", buf); 1108 err = -EINVAL; 1109 goto err_out; 1110 } 1111 1112 mlx4_stop_sense(mdev); 1113 mutex_lock(&priv->port_mutex); 1114 /* Possible type is always the one that was delivered */ 1115 mdev->caps.possible_type[info->port] = info->tmp_type; 1116 1117 for (i = 0; i < mdev->caps.num_ports; i++) { 1118 types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type : 1119 mdev->caps.possible_type[i+1]; 1120 if (types[i] == MLX4_PORT_TYPE_AUTO) 1121 types[i] = mdev->caps.port_type[i+1]; 1122 } 1123 1124 if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) && 1125 !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) { 1126 for (i = 1; i <= mdev->caps.num_ports; i++) { 1127 if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { 1128 mdev->caps.possible_type[i] = mdev->caps.port_type[i]; 1129 err = -EINVAL; 1130 } 1131 } 1132 } 1133 if (err) { 1134 mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n"); 1135 goto out; 1136 } 1137 1138 mlx4_do_sense_ports(mdev, new_types, types); 1139 1140 err = mlx4_check_port_params(mdev, new_types); 1141 if (err) 1142 goto out; 1143 1144 /* We are about to apply the changes after the configuration 1145 * was verified, no need to remember the temporary types 1146 * any more */ 1147 for (i = 0; i < mdev->caps.num_ports; i++) 1148 priv->port[i + 1].tmp_type = 0; 1149 1150 err = mlx4_change_port_types(mdev, new_types); 1151 1152 out: 1153 mlx4_start_sense(mdev); 1154 mutex_unlock(&priv->port_mutex); 1155 err_out: 1156 mutex_unlock(&set_port_type_mutex); 1157 1158 return err ? err : count; 1159 } 1160 1161 enum ibta_mtu { 1162 IB_MTU_256 = 1, 1163 IB_MTU_512 = 2, 1164 IB_MTU_1024 = 3, 1165 IB_MTU_2048 = 4, 1166 IB_MTU_4096 = 5 1167 }; 1168 1169 static inline int int_to_ibta_mtu(int mtu) 1170 { 1171 switch (mtu) { 1172 case 256: return IB_MTU_256; 1173 case 512: return IB_MTU_512; 1174 case 1024: return IB_MTU_1024; 1175 case 2048: return IB_MTU_2048; 1176 case 4096: return IB_MTU_4096; 1177 default: return -1; 1178 } 1179 } 1180 1181 static inline int ibta_mtu_to_int(enum ibta_mtu mtu) 1182 { 1183 switch (mtu) { 1184 case IB_MTU_256: return 256; 1185 case IB_MTU_512: return 512; 1186 case IB_MTU_1024: return 1024; 1187 case IB_MTU_2048: return 2048; 1188 case IB_MTU_4096: return 4096; 1189 default: return -1; 1190 } 1191 } 1192 1193 static ssize_t show_port_ib_mtu(struct device *dev, 1194 struct device_attribute *attr, 1195 char *buf) 1196 { 1197 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1198 port_mtu_attr); 1199 struct mlx4_dev *mdev = info->dev; 1200 1201 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) 1202 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1203 1204 sprintf(buf, "%d\n", 1205 ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); 1206 return strlen(buf); 1207 } 1208 1209 static ssize_t set_port_ib_mtu(struct device *dev, 1210 struct device_attribute *attr, 1211 const char *buf, size_t count) 1212 { 1213 struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, 1214 port_mtu_attr); 1215 struct mlx4_dev *mdev = info->dev; 1216 struct mlx4_priv *priv = mlx4_priv(mdev); 1217 int err, port, mtu, ibta_mtu = -1; 1218 1219 if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { 1220 mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); 1221 return -EINVAL; 1222 } 1223 1224 err = kstrtoint(buf, 0, &mtu); 1225 if (!err) 1226 ibta_mtu = int_to_ibta_mtu(mtu); 1227 1228 if (err || ibta_mtu < 0) { 1229 mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); 1230 return -EINVAL; 1231 } 1232 1233 mdev->caps.port_ib_mtu[info->port] = ibta_mtu; 1234 1235 mlx4_stop_sense(mdev); 1236 mutex_lock(&priv->port_mutex); 1237 mlx4_unregister_device(mdev); 1238 for (port = 1; port <= mdev->caps.num_ports; port++) { 1239 mlx4_CLOSE_PORT(mdev, port); 1240 err = mlx4_SET_PORT(mdev, port, -1); 1241 if (err) { 1242 mlx4_err(mdev, "Failed to set port %d, aborting\n", 1243 port); 1244 goto err_set_port; 1245 } 1246 } 1247 err = mlx4_register_device(mdev); 1248 err_set_port: 1249 mutex_unlock(&priv->port_mutex); 1250 mlx4_start_sense(mdev); 1251 return err ? err : count; 1252 } 1253 1254 /* bond for multi-function device */ 1255 #define MAX_MF_BOND_ALLOWED_SLAVES 63 1256 static int mlx4_mf_bond(struct mlx4_dev *dev) 1257 { 1258 int err = 0; 1259 int nvfs; 1260 struct mlx4_slaves_pport slaves_port1; 1261 struct mlx4_slaves_pport slaves_port2; 1262 DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); 1263 1264 slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1); 1265 slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2); 1266 bitmap_and(slaves_port_1_2, 1267 slaves_port1.slaves, slaves_port2.slaves, 1268 dev->persist->num_vfs + 1); 1269 1270 /* only single port vfs are allowed */ 1271 if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) { 1272 mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n"); 1273 return -EINVAL; 1274 } 1275 1276 /* number of virtual functions is number of total functions minus one 1277 * physical function for each port. 1278 */ 1279 nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + 1280 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2; 1281 1282 /* limit on maximum allowed VFs */ 1283 if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) { 1284 mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n", 1285 nvfs, MAX_MF_BOND_ALLOWED_SLAVES); 1286 return -EINVAL; 1287 } 1288 1289 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { 1290 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); 1291 return -EINVAL; 1292 } 1293 1294 err = mlx4_bond_mac_table(dev); 1295 if (err) 1296 return err; 1297 err = mlx4_bond_vlan_table(dev); 1298 if (err) 1299 goto err1; 1300 err = mlx4_bond_fs_rules(dev); 1301 if (err) 1302 goto err2; 1303 1304 return 0; 1305 err2: 1306 (void)mlx4_unbond_vlan_table(dev); 1307 err1: 1308 (void)mlx4_unbond_mac_table(dev); 1309 return err; 1310 } 1311 1312 static int mlx4_mf_unbond(struct mlx4_dev *dev) 1313 { 1314 int ret, ret1; 1315 1316 ret = mlx4_unbond_fs_rules(dev); 1317 if (ret) 1318 mlx4_warn(dev, "multifunction unbond for flow rules failedi (%d)\n", ret); 1319 ret1 = mlx4_unbond_mac_table(dev); 1320 if (ret1) { 1321 mlx4_warn(dev, "multifunction unbond for MAC table failed (%d)\n", ret1); 1322 ret = ret1; 1323 } 1324 ret1 = mlx4_unbond_vlan_table(dev); 1325 if (ret1) { 1326 mlx4_warn(dev, "multifunction unbond for VLAN table failed (%d)\n", ret1); 1327 ret = ret1; 1328 } 1329 return ret; 1330 } 1331 1332 int mlx4_bond(struct mlx4_dev *dev) 1333 { 1334 int ret = 0; 1335 struct mlx4_priv *priv = mlx4_priv(dev); 1336 1337 mutex_lock(&priv->bond_mutex); 1338 1339 if (!mlx4_is_bonded(dev)) { 1340 ret = mlx4_do_bond(dev, true); 1341 if (ret) 1342 mlx4_err(dev, "Failed to bond device: %d\n", ret); 1343 if (!ret && mlx4_is_master(dev)) { 1344 ret = mlx4_mf_bond(dev); 1345 if (ret) { 1346 mlx4_err(dev, "bond for multifunction failed\n"); 1347 mlx4_do_bond(dev, false); 1348 } 1349 } 1350 } 1351 1352 mutex_unlock(&priv->bond_mutex); 1353 if (!ret) 1354 mlx4_dbg(dev, "Device is bonded\n"); 1355 1356 return ret; 1357 } 1358 EXPORT_SYMBOL_GPL(mlx4_bond); 1359 1360 int mlx4_unbond(struct mlx4_dev *dev) 1361 { 1362 int ret = 0; 1363 struct mlx4_priv *priv = mlx4_priv(dev); 1364 1365 mutex_lock(&priv->bond_mutex); 1366 1367 if (mlx4_is_bonded(dev)) { 1368 int ret2 = 0; 1369 1370 ret = mlx4_do_bond(dev, false); 1371 if (ret) 1372 mlx4_err(dev, "Failed to unbond device: %d\n", ret); 1373 if (mlx4_is_master(dev)) 1374 ret2 = mlx4_mf_unbond(dev); 1375 if (ret2) { 1376 mlx4_warn(dev, "Failed to unbond device for multifunction (%d)\n", ret2); 1377 ret = ret2; 1378 } 1379 } 1380 1381 mutex_unlock(&priv->bond_mutex); 1382 if (!ret) 1383 mlx4_dbg(dev, "Device is unbonded\n"); 1384 1385 return ret; 1386 } 1387 EXPORT_SYMBOL_GPL(mlx4_unbond); 1388 1389 1390 int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) 1391 { 1392 u8 port1 = v2p->port1; 1393 u8 port2 = v2p->port2; 1394 struct mlx4_priv *priv = mlx4_priv(dev); 1395 int err; 1396 1397 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) 1398 return -ENOTSUPP; 1399 1400 mutex_lock(&priv->bond_mutex); 1401 1402 /* zero means keep current mapping for this port */ 1403 if (port1 == 0) 1404 port1 = priv->v2p.port1; 1405 if (port2 == 0) 1406 port2 = priv->v2p.port2; 1407 1408 if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) || 1409 (port2 < 1) || (port2 > MLX4_MAX_PORTS) || 1410 (port1 == 2 && port2 == 1)) { 1411 /* besides boundary checks cross mapping makes 1412 * no sense and therefore not allowed */ 1413 err = -EINVAL; 1414 } else if ((port1 == priv->v2p.port1) && 1415 (port2 == priv->v2p.port2)) { 1416 err = 0; 1417 } else { 1418 err = mlx4_virt2phy_port_map(dev, port1, port2); 1419 if (!err) { 1420 mlx4_dbg(dev, "port map changed: [%d][%d]\n", 1421 port1, port2); 1422 priv->v2p.port1 = port1; 1423 priv->v2p.port2 = port2; 1424 } else { 1425 mlx4_err(dev, "Failed to change port mape: %d\n", err); 1426 } 1427 } 1428 1429 mutex_unlock(&priv->bond_mutex); 1430 return err; 1431 } 1432 EXPORT_SYMBOL_GPL(mlx4_port_map_set); 1433 1434 static int mlx4_load_fw(struct mlx4_dev *dev) 1435 { 1436 struct mlx4_priv *priv = mlx4_priv(dev); 1437 int err; 1438 1439 priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages, 1440 GFP_HIGHUSER | __GFP_NOWARN, 0); 1441 if (!priv->fw.fw_icm) { 1442 mlx4_err(dev, "Couldn't allocate FW area, aborting\n"); 1443 return -ENOMEM; 1444 } 1445 1446 err = mlx4_MAP_FA(dev, priv->fw.fw_icm); 1447 if (err) { 1448 mlx4_err(dev, "MAP_FA command failed, aborting\n"); 1449 goto err_free; 1450 } 1451 1452 err = mlx4_RUN_FW(dev); 1453 if (err) { 1454 mlx4_err(dev, "RUN_FW command failed, aborting\n"); 1455 goto err_unmap_fa; 1456 } 1457 1458 return 0; 1459 1460 err_unmap_fa: 1461 mlx4_UNMAP_FA(dev); 1462 1463 err_free: 1464 mlx4_free_icm(dev, priv->fw.fw_icm, 0); 1465 return err; 1466 } 1467 1468 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, 1469 int cmpt_entry_sz) 1470 { 1471 struct mlx4_priv *priv = mlx4_priv(dev); 1472 int err; 1473 int num_eqs; 1474 1475 err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table, 1476 cmpt_base + 1477 ((u64) (MLX4_CMPT_TYPE_QP * 1478 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1479 cmpt_entry_sz, dev->caps.num_qps, 1480 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1481 0, 0); 1482 if (err) 1483 goto err; 1484 1485 err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table, 1486 cmpt_base + 1487 ((u64) (MLX4_CMPT_TYPE_SRQ * 1488 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1489 cmpt_entry_sz, dev->caps.num_srqs, 1490 dev->caps.reserved_srqs, 0, 0); 1491 if (err) 1492 goto err_qp; 1493 1494 err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table, 1495 cmpt_base + 1496 ((u64) (MLX4_CMPT_TYPE_CQ * 1497 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1498 cmpt_entry_sz, dev->caps.num_cqs, 1499 dev->caps.reserved_cqs, 0, 0); 1500 if (err) 1501 goto err_srq; 1502 1503 num_eqs = dev->phys_caps.num_phys_eqs; 1504 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 1505 cmpt_base + 1506 ((u64) (MLX4_CMPT_TYPE_EQ * 1507 cmpt_entry_sz) << MLX4_CMPT_SHIFT), 1508 cmpt_entry_sz, num_eqs, num_eqs, 0, 0); 1509 if (err) 1510 goto err_cq; 1511 1512 return 0; 1513 1514 err_cq: 1515 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1516 1517 err_srq: 1518 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1519 1520 err_qp: 1521 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1522 1523 err: 1524 return err; 1525 } 1526 1527 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 1528 struct mlx4_init_hca_param *init_hca, u64 icm_size) 1529 { 1530 struct mlx4_priv *priv = mlx4_priv(dev); 1531 u64 aux_pages; 1532 int num_eqs; 1533 int err; 1534 1535 err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages); 1536 if (err) { 1537 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n"); 1538 return err; 1539 } 1540 1541 mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n", 1542 (unsigned long long) icm_size >> 10, 1543 (unsigned long long) aux_pages << 2); 1544 1545 priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages, 1546 GFP_HIGHUSER | __GFP_NOWARN, 0); 1547 if (!priv->fw.aux_icm) { 1548 mlx4_err(dev, "Couldn't allocate aux memory, aborting\n"); 1549 return -ENOMEM; 1550 } 1551 1552 err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm); 1553 if (err) { 1554 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n"); 1555 goto err_free_aux; 1556 } 1557 1558 err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz); 1559 if (err) { 1560 mlx4_err(dev, "Failed to map cMPT context memory, aborting\n"); 1561 goto err_unmap_aux; 1562 } 1563 1564 1565 num_eqs = dev->phys_caps.num_phys_eqs; 1566 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 1567 init_hca->eqc_base, dev_cap->eqc_entry_sz, 1568 num_eqs, num_eqs, 0, 0); 1569 if (err) { 1570 mlx4_err(dev, "Failed to map EQ context memory, aborting\n"); 1571 goto err_unmap_cmpt; 1572 } 1573 1574 /* 1575 * Reserved MTT entries must be aligned up to a cacheline 1576 * boundary, since the FW will write to them, while the driver 1577 * writes to all other MTT entries. (The variable 1578 * dev->caps.mtt_entry_sz below is really the MTT segment 1579 * size, not the raw entry size) 1580 */ 1581 dev->caps.reserved_mtts = 1582 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz, 1583 dma_get_cache_alignment()) / dev->caps.mtt_entry_sz; 1584 1585 err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table, 1586 init_hca->mtt_base, 1587 dev->caps.mtt_entry_sz, 1588 dev->caps.num_mtts, 1589 dev->caps.reserved_mtts, 1, 0); 1590 if (err) { 1591 mlx4_err(dev, "Failed to map MTT context memory, aborting\n"); 1592 goto err_unmap_eq; 1593 } 1594 1595 err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table, 1596 init_hca->dmpt_base, 1597 dev_cap->dmpt_entry_sz, 1598 dev->caps.num_mpts, 1599 dev->caps.reserved_mrws, 1, 1); 1600 if (err) { 1601 mlx4_err(dev, "Failed to map dMPT context memory, aborting\n"); 1602 goto err_unmap_mtt; 1603 } 1604 1605 err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table, 1606 init_hca->qpc_base, 1607 dev_cap->qpc_entry_sz, 1608 dev->caps.num_qps, 1609 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1610 0, 0); 1611 if (err) { 1612 mlx4_err(dev, "Failed to map QP context memory, aborting\n"); 1613 goto err_unmap_dmpt; 1614 } 1615 1616 err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table, 1617 init_hca->auxc_base, 1618 dev_cap->aux_entry_sz, 1619 dev->caps.num_qps, 1620 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1621 0, 0); 1622 if (err) { 1623 mlx4_err(dev, "Failed to map AUXC context memory, aborting\n"); 1624 goto err_unmap_qp; 1625 } 1626 1627 err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table, 1628 init_hca->altc_base, 1629 dev_cap->altc_entry_sz, 1630 dev->caps.num_qps, 1631 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1632 0, 0); 1633 if (err) { 1634 mlx4_err(dev, "Failed to map ALTC context memory, aborting\n"); 1635 goto err_unmap_auxc; 1636 } 1637 1638 err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table, 1639 init_hca->rdmarc_base, 1640 dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift, 1641 dev->caps.num_qps, 1642 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 1643 0, 0); 1644 if (err) { 1645 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n"); 1646 goto err_unmap_altc; 1647 } 1648 1649 err = mlx4_init_icm_table(dev, &priv->cq_table.table, 1650 init_hca->cqc_base, 1651 dev_cap->cqc_entry_sz, 1652 dev->caps.num_cqs, 1653 dev->caps.reserved_cqs, 0, 0); 1654 if (err) { 1655 mlx4_err(dev, "Failed to map CQ context memory, aborting\n"); 1656 goto err_unmap_rdmarc; 1657 } 1658 1659 err = mlx4_init_icm_table(dev, &priv->srq_table.table, 1660 init_hca->srqc_base, 1661 dev_cap->srq_entry_sz, 1662 dev->caps.num_srqs, 1663 dev->caps.reserved_srqs, 0, 0); 1664 if (err) { 1665 mlx4_err(dev, "Failed to map SRQ context memory, aborting\n"); 1666 goto err_unmap_cq; 1667 } 1668 1669 /* 1670 * For flow steering device managed mode it is required to use 1671 * mlx4_init_icm_table. For B0 steering mode it's not strictly 1672 * required, but for simplicity just map the whole multicast 1673 * group table now. The table isn't very big and it's a lot 1674 * easier than trying to track ref counts. 1675 */ 1676 err = mlx4_init_icm_table(dev, &priv->mcg_table.table, 1677 init_hca->mc_base, 1678 mlx4_get_mgm_entry_size(dev), 1679 dev->caps.num_mgms + dev->caps.num_amgms, 1680 dev->caps.num_mgms + dev->caps.num_amgms, 1681 0, 0); 1682 if (err) { 1683 mlx4_err(dev, "Failed to map MCG context memory, aborting\n"); 1684 goto err_unmap_srq; 1685 } 1686 1687 return 0; 1688 1689 err_unmap_srq: 1690 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1691 1692 err_unmap_cq: 1693 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1694 1695 err_unmap_rdmarc: 1696 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1697 1698 err_unmap_altc: 1699 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1700 1701 err_unmap_auxc: 1702 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1703 1704 err_unmap_qp: 1705 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1706 1707 err_unmap_dmpt: 1708 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1709 1710 err_unmap_mtt: 1711 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1712 1713 err_unmap_eq: 1714 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1715 1716 err_unmap_cmpt: 1717 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1718 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1719 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1720 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1721 1722 err_unmap_aux: 1723 mlx4_UNMAP_ICM_AUX(dev); 1724 1725 err_free_aux: 1726 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1727 1728 return err; 1729 } 1730 1731 static void mlx4_free_icms(struct mlx4_dev *dev) 1732 { 1733 struct mlx4_priv *priv = mlx4_priv(dev); 1734 1735 mlx4_cleanup_icm_table(dev, &priv->mcg_table.table); 1736 mlx4_cleanup_icm_table(dev, &priv->srq_table.table); 1737 mlx4_cleanup_icm_table(dev, &priv->cq_table.table); 1738 mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table); 1739 mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table); 1740 mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table); 1741 mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); 1742 mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); 1743 mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); 1744 mlx4_cleanup_icm_table(dev, &priv->eq_table.table); 1745 mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); 1746 mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); 1747 mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); 1748 mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); 1749 1750 mlx4_UNMAP_ICM_AUX(dev); 1751 mlx4_free_icm(dev, priv->fw.aux_icm, 0); 1752 } 1753 1754 static void mlx4_slave_exit(struct mlx4_dev *dev) 1755 { 1756 struct mlx4_priv *priv = mlx4_priv(dev); 1757 1758 mutex_lock(&priv->cmd.slave_cmd_mutex); 1759 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 1760 MLX4_COMM_TIME)) 1761 mlx4_warn(dev, "Failed to close slave function\n"); 1762 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1763 } 1764 1765 static int map_bf_area(struct mlx4_dev *dev) 1766 { 1767 struct mlx4_priv *priv = mlx4_priv(dev); 1768 resource_size_t bf_start; 1769 resource_size_t bf_len; 1770 int err = 0; 1771 1772 if (!dev->caps.bf_reg_size) 1773 return -ENXIO; 1774 1775 bf_start = pci_resource_start(dev->persist->pdev, 2) + 1776 (dev->caps.num_uars << PAGE_SHIFT); 1777 bf_len = pci_resource_len(dev->persist->pdev, 2) - 1778 (dev->caps.num_uars << PAGE_SHIFT); 1779 priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len); 1780 if (!priv->bf_mapping) 1781 err = -ENOMEM; 1782 1783 return err; 1784 } 1785 1786 static void unmap_bf_area(struct mlx4_dev *dev) 1787 { 1788 if (mlx4_priv(dev)->bf_mapping) 1789 io_mapping_free(mlx4_priv(dev)->bf_mapping); 1790 } 1791 1792 cycle_t mlx4_read_clock(struct mlx4_dev *dev) 1793 { 1794 u32 clockhi, clocklo, clockhi1; 1795 cycle_t cycles; 1796 int i; 1797 struct mlx4_priv *priv = mlx4_priv(dev); 1798 1799 for (i = 0; i < 10; i++) { 1800 clockhi = swab32(readl(priv->clock_mapping)); 1801 clocklo = swab32(readl(priv->clock_mapping + 4)); 1802 clockhi1 = swab32(readl(priv->clock_mapping)); 1803 if (clockhi == clockhi1) 1804 break; 1805 } 1806 1807 cycles = (u64) clockhi << 32 | (u64) clocklo; 1808 1809 return cycles; 1810 } 1811 EXPORT_SYMBOL_GPL(mlx4_read_clock); 1812 1813 1814 static int map_internal_clock(struct mlx4_dev *dev) 1815 { 1816 struct mlx4_priv *priv = mlx4_priv(dev); 1817 1818 priv->clock_mapping = 1819 ioremap(pci_resource_start(dev->persist->pdev, 1820 priv->fw.clock_bar) + 1821 priv->fw.clock_offset, MLX4_CLOCK_SIZE); 1822 1823 if (!priv->clock_mapping) 1824 return -ENOMEM; 1825 1826 return 0; 1827 } 1828 1829 int mlx4_get_internal_clock_params(struct mlx4_dev *dev, 1830 struct mlx4_clock_params *params) 1831 { 1832 struct mlx4_priv *priv = mlx4_priv(dev); 1833 1834 if (mlx4_is_slave(dev)) 1835 return -ENOTSUPP; 1836 1837 if (!params) 1838 return -EINVAL; 1839 1840 params->bar = priv->fw.clock_bar; 1841 params->offset = priv->fw.clock_offset; 1842 params->size = MLX4_CLOCK_SIZE; 1843 1844 return 0; 1845 } 1846 EXPORT_SYMBOL_GPL(mlx4_get_internal_clock_params); 1847 1848 static void unmap_internal_clock(struct mlx4_dev *dev) 1849 { 1850 struct mlx4_priv *priv = mlx4_priv(dev); 1851 1852 if (priv->clock_mapping) 1853 iounmap(priv->clock_mapping); 1854 } 1855 1856 static void mlx4_close_hca(struct mlx4_dev *dev) 1857 { 1858 unmap_internal_clock(dev); 1859 unmap_bf_area(dev); 1860 if (mlx4_is_slave(dev)) 1861 mlx4_slave_exit(dev); 1862 else { 1863 mlx4_CLOSE_HCA(dev, 0); 1864 mlx4_free_icms(dev); 1865 } 1866 } 1867 1868 static void mlx4_close_fw(struct mlx4_dev *dev) 1869 { 1870 if (!mlx4_is_slave(dev)) { 1871 mlx4_UNMAP_FA(dev); 1872 mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0); 1873 } 1874 } 1875 1876 static int mlx4_comm_check_offline(struct mlx4_dev *dev) 1877 { 1878 #define COMM_CHAN_OFFLINE_OFFSET 0x09 1879 1880 u32 comm_flags; 1881 u32 offline_bit; 1882 unsigned long end; 1883 struct mlx4_priv *priv = mlx4_priv(dev); 1884 1885 end = msecs_to_jiffies(MLX4_COMM_OFFLINE_TIME_OUT) + jiffies; 1886 while (time_before(jiffies, end)) { 1887 comm_flags = swab32(readl((__iomem char *)priv->mfunc.comm + 1888 MLX4_COMM_CHAN_FLAGS)); 1889 offline_bit = (comm_flags & 1890 (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); 1891 if (!offline_bit) 1892 return 0; 1893 /* There are cases as part of AER/Reset flow that PF needs 1894 * around 100 msec to load. We therefore sleep for 100 msec 1895 * to allow other tasks to make use of that CPU during this 1896 * time interval. 1897 */ 1898 msleep(100); 1899 } 1900 mlx4_err(dev, "Communication channel is offline.\n"); 1901 return -EIO; 1902 } 1903 1904 static void mlx4_reset_vf_support(struct mlx4_dev *dev) 1905 { 1906 #define COMM_CHAN_RST_OFFSET 0x1e 1907 1908 struct mlx4_priv *priv = mlx4_priv(dev); 1909 u32 comm_rst; 1910 u32 comm_caps; 1911 1912 comm_caps = swab32(readl((__iomem char *)priv->mfunc.comm + 1913 MLX4_COMM_CHAN_CAPS)); 1914 comm_rst = (comm_caps & (u32)(1 << COMM_CHAN_RST_OFFSET)); 1915 1916 if (comm_rst) 1917 dev->caps.vf_caps |= MLX4_VF_CAP_FLAG_RESET; 1918 } 1919 1920 static int mlx4_init_slave(struct mlx4_dev *dev) 1921 { 1922 struct mlx4_priv *priv = mlx4_priv(dev); 1923 u64 dma = (u64) priv->mfunc.vhcr_dma; 1924 int ret_from_reset = 0; 1925 u32 slave_read; 1926 u32 cmd_channel_ver; 1927 1928 if (atomic_read(&pf_loading)) { 1929 mlx4_warn(dev, "PF is not ready - Deferring probe\n"); 1930 return -EPROBE_DEFER; 1931 } 1932 1933 mutex_lock(&priv->cmd.slave_cmd_mutex); 1934 priv->cmd.max_cmds = 1; 1935 if (mlx4_comm_check_offline(dev)) { 1936 mlx4_err(dev, "PF is not responsive, skipping initialization\n"); 1937 goto err_offline; 1938 } 1939 1940 mlx4_reset_vf_support(dev); 1941 mlx4_warn(dev, "Sending reset\n"); 1942 ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 1943 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME); 1944 /* if we are in the middle of flr the slave will try 1945 * NUM_OF_RESET_RETRIES times before leaving.*/ 1946 if (ret_from_reset) { 1947 if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) { 1948 mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n"); 1949 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1950 return -EPROBE_DEFER; 1951 } else 1952 goto err; 1953 } 1954 1955 /* check the driver version - the slave I/F revision 1956 * must match the master's */ 1957 slave_read = swab32(readl(&priv->mfunc.comm->slave_read)); 1958 cmd_channel_ver = mlx4_comm_get_version(); 1959 1960 if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) != 1961 MLX4_COMM_GET_IF_REV(slave_read)) { 1962 mlx4_err(dev, "slave driver version is not supported by the master\n"); 1963 goto err; 1964 } 1965 1966 mlx4_warn(dev, "Sending vhcr0\n"); 1967 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48, 1968 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1969 goto err; 1970 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32, 1971 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1972 goto err; 1973 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16, 1974 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1975 goto err; 1976 if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, 1977 MLX4_COMM_CMD_NA_OP, MLX4_COMM_TIME)) 1978 goto err; 1979 1980 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1981 return 0; 1982 1983 err: 1984 mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_CMD_NA_OP, 0); 1985 err_offline: 1986 mutex_unlock(&priv->cmd.slave_cmd_mutex); 1987 return -EIO; 1988 } 1989 1990 static void mlx4_parav_master_pf_caps(struct mlx4_dev *dev) 1991 { 1992 int i; 1993 1994 for (i = 1; i <= dev->caps.num_ports; i++) { 1995 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) 1996 dev->caps.gid_table_len[i] = 1997 mlx4_get_slave_num_gids(dev, 0, i); 1998 else 1999 dev->caps.gid_table_len[i] = 1; 2000 dev->caps.pkey_table_len[i] = 2001 dev->phys_caps.pkey_phys_table_len[i] - 1; 2002 } 2003 } 2004 2005 static int choose_log_fs_mgm_entry_size(int qp_per_entry) 2006 { 2007 int i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; 2008 2009 for (i = MLX4_MIN_MGM_LOG_ENTRY_SIZE; i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE; 2010 i++) { 2011 if (qp_per_entry <= 4 * ((1 << i) / 16 - 2)) 2012 break; 2013 } 2014 2015 return (i <= MLX4_MAX_MGM_LOG_ENTRY_SIZE) ? i : -1; 2016 } 2017 2018 static const char *dmfs_high_rate_steering_mode_str(int dmfs_high_steer_mode) 2019 { 2020 switch (dmfs_high_steer_mode) { 2021 case MLX4_STEERING_DMFS_A0_DEFAULT: 2022 return "default performance"; 2023 2024 case MLX4_STEERING_DMFS_A0_DYNAMIC: 2025 return "dynamic hybrid mode"; 2026 2027 case MLX4_STEERING_DMFS_A0_STATIC: 2028 return "performance optimized for limited rule configuration (static)"; 2029 2030 case MLX4_STEERING_DMFS_A0_DISABLE: 2031 return "disabled performance optimized steering"; 2032 2033 case MLX4_STEERING_DMFS_A0_NOT_SUPPORTED: 2034 return "performance optimized steering not supported"; 2035 2036 default: 2037 return "Unrecognized mode"; 2038 } 2039 } 2040 2041 #define MLX4_DMFS_A0_STEERING (1UL << 2) 2042 2043 static void choose_steering_mode(struct mlx4_dev *dev, 2044 struct mlx4_dev_cap *dev_cap) 2045 { 2046 if (mlx4_log_num_mgm_entry_size <= 0) { 2047 if ((-mlx4_log_num_mgm_entry_size) & MLX4_DMFS_A0_STEERING) { 2048 if (dev->caps.dmfs_high_steer_mode == 2049 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2050 mlx4_err(dev, "DMFS high rate mode not supported\n"); 2051 else 2052 dev->caps.dmfs_high_steer_mode = 2053 MLX4_STEERING_DMFS_A0_STATIC; 2054 } 2055 } 2056 2057 if (mlx4_log_num_mgm_entry_size <= 0 && 2058 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN && 2059 (!mlx4_is_mfunc(dev) || 2060 (dev_cap->fs_max_num_qp_per_entry >= 2061 (dev->persist->num_vfs + 1))) && 2062 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry) >= 2063 MLX4_MIN_MGM_LOG_ENTRY_SIZE) { 2064 dev->oper_log_mgm_entry_size = 2065 choose_log_fs_mgm_entry_size(dev_cap->fs_max_num_qp_per_entry); 2066 dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; 2067 dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; 2068 dev->caps.fs_log_max_ucast_qp_range_size = 2069 dev_cap->fs_log_max_ucast_qp_range_size; 2070 } else { 2071 if (dev->caps.dmfs_high_steer_mode != 2072 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2073 dev->caps.dmfs_high_steer_mode = MLX4_STEERING_DMFS_A0_DISABLE; 2074 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && 2075 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 2076 dev->caps.steering_mode = MLX4_STEERING_MODE_B0; 2077 else { 2078 dev->caps.steering_mode = MLX4_STEERING_MODE_A0; 2079 2080 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || 2081 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) 2082 mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n"); 2083 } 2084 dev->oper_log_mgm_entry_size = 2085 mlx4_log_num_mgm_entry_size > 0 ? 2086 mlx4_log_num_mgm_entry_size : 2087 MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE; 2088 dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); 2089 } 2090 mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n", 2091 mlx4_steering_mode_str(dev->caps.steering_mode), 2092 dev->oper_log_mgm_entry_size, 2093 mlx4_log_num_mgm_entry_size); 2094 } 2095 2096 static void choose_tunnel_offload_mode(struct mlx4_dev *dev, 2097 struct mlx4_dev_cap *dev_cap) 2098 { 2099 if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && 2100 dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) 2101 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; 2102 else 2103 dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; 2104 2105 mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode 2106 == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); 2107 } 2108 2109 static int mlx4_validate_optimized_steering(struct mlx4_dev *dev) 2110 { 2111 int i; 2112 struct mlx4_port_cap port_cap; 2113 2114 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) 2115 return -EINVAL; 2116 2117 for (i = 1; i <= dev->caps.num_ports; i++) { 2118 if (mlx4_dev_port(dev, i, &port_cap)) { 2119 mlx4_err(dev, 2120 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); 2121 } else if ((dev->caps.dmfs_high_steer_mode != 2122 MLX4_STEERING_DMFS_A0_DEFAULT) && 2123 (port_cap.dmfs_optimized_state == 2124 !!(dev->caps.dmfs_high_steer_mode == 2125 MLX4_STEERING_DMFS_A0_DISABLE))) { 2126 mlx4_err(dev, 2127 "DMFS high rate steer mode differ, driver requested %s but %s in FW.\n", 2128 dmfs_high_rate_steering_mode_str( 2129 dev->caps.dmfs_high_steer_mode), 2130 (port_cap.dmfs_optimized_state ? 2131 "enabled" : "disabled")); 2132 } 2133 } 2134 2135 return 0; 2136 } 2137 2138 static int mlx4_init_fw(struct mlx4_dev *dev) 2139 { 2140 struct mlx4_mod_stat_cfg mlx4_cfg; 2141 int err = 0; 2142 2143 if (!mlx4_is_slave(dev)) { 2144 err = mlx4_QUERY_FW(dev); 2145 if (err) { 2146 if (err == -EACCES) 2147 mlx4_info(dev, "non-primary physical function, skipping\n"); 2148 else 2149 mlx4_err(dev, "QUERY_FW command failed, aborting\n"); 2150 return err; 2151 } 2152 2153 err = mlx4_load_fw(dev); 2154 if (err) { 2155 mlx4_err(dev, "Failed to start FW, aborting\n"); 2156 return err; 2157 } 2158 2159 mlx4_cfg.log_pg_sz_m = 1; 2160 mlx4_cfg.log_pg_sz = 0; 2161 err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg); 2162 if (err) 2163 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n"); 2164 } 2165 2166 return err; 2167 } 2168 2169 static int mlx4_init_hca(struct mlx4_dev *dev) 2170 { 2171 struct mlx4_priv *priv = mlx4_priv(dev); 2172 struct mlx4_adapter adapter; 2173 struct mlx4_dev_cap dev_cap; 2174 struct mlx4_profile profile; 2175 struct mlx4_init_hca_param init_hca; 2176 u64 icm_size; 2177 struct mlx4_config_dev_params params; 2178 int err; 2179 2180 if (!mlx4_is_slave(dev)) { 2181 err = mlx4_dev_cap(dev, &dev_cap); 2182 if (err) { 2183 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); 2184 return err; 2185 } 2186 2187 choose_steering_mode(dev, &dev_cap); 2188 choose_tunnel_offload_mode(dev, &dev_cap); 2189 2190 if (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC && 2191 mlx4_is_master(dev)) 2192 dev->caps.function_caps |= MLX4_FUNC_CAP_DMFS_A0_STATIC; 2193 2194 err = mlx4_get_phys_port_id(dev); 2195 if (err) 2196 mlx4_err(dev, "Fail to get physical port id\n"); 2197 2198 if (mlx4_is_master(dev)) 2199 mlx4_parav_master_pf_caps(dev); 2200 2201 if (mlx4_low_memory_profile()) { 2202 mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n"); 2203 profile = low_mem_profile; 2204 } else { 2205 profile = default_profile; 2206 } 2207 if (dev->caps.steering_mode == 2208 MLX4_STEERING_MODE_DEVICE_MANAGED) 2209 profile.num_mcg = MLX4_FS_NUM_MCG; 2210 2211 icm_size = mlx4_make_profile(dev, &profile, &dev_cap, 2212 &init_hca); 2213 if ((long long) icm_size < 0) { 2214 err = icm_size; 2215 return err; 2216 } 2217 2218 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 2219 2220 /* Always set UAR page size 4KB, set log_uar_sz accordingly */ 2221 init_hca.log_uar_sz = ilog2(dev->caps.num_uars) + 2222 PAGE_SHIFT - 2223 DEFAULT_UAR_PAGE_SHIFT; 2224 init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12; 2225 2226 init_hca.mw_enabled = 0; 2227 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2228 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 2229 init_hca.mw_enabled = INIT_HCA_TPT_MW_ENABLE; 2230 2231 err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); 2232 if (err) 2233 return err; 2234 2235 err = mlx4_INIT_HCA(dev, &init_hca); 2236 if (err) { 2237 mlx4_err(dev, "INIT_HCA command failed, aborting\n"); 2238 goto err_free_icm; 2239 } 2240 2241 if (dev_cap.flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS) { 2242 err = mlx4_query_func(dev, &dev_cap); 2243 if (err < 0) { 2244 mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); 2245 goto err_close; 2246 } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { 2247 dev->caps.num_eqs = dev_cap.max_eqs; 2248 dev->caps.reserved_eqs = dev_cap.reserved_eqs; 2249 dev->caps.reserved_uars = dev_cap.reserved_uars; 2250 } 2251 } 2252 2253 /* 2254 * If TS is supported by FW 2255 * read HCA frequency by QUERY_HCA command 2256 */ 2257 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) { 2258 memset(&init_hca, 0, sizeof(init_hca)); 2259 err = mlx4_QUERY_HCA(dev, &init_hca); 2260 if (err) { 2261 mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n"); 2262 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2263 } else { 2264 dev->caps.hca_core_clock = 2265 init_hca.hca_core_clock; 2266 } 2267 2268 /* In case we got HCA frequency 0 - disable timestamping 2269 * to avoid dividing by zero 2270 */ 2271 if (!dev->caps.hca_core_clock) { 2272 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2273 mlx4_err(dev, 2274 "HCA frequency is 0 - timestamping is not supported\n"); 2275 } else if (map_internal_clock(dev)) { 2276 /* 2277 * Map internal clock, 2278 * in case of failure disable timestamping 2279 */ 2280 dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS; 2281 mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n"); 2282 } 2283 } 2284 2285 if (dev->caps.dmfs_high_steer_mode != 2286 MLX4_STEERING_DMFS_A0_NOT_SUPPORTED) { 2287 if (mlx4_validate_optimized_steering(dev)) 2288 mlx4_warn(dev, "Optimized steering validation failed\n"); 2289 2290 if (dev->caps.dmfs_high_steer_mode == 2291 MLX4_STEERING_DMFS_A0_DISABLE) { 2292 dev->caps.dmfs_high_rate_qpn_base = 2293 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]; 2294 dev->caps.dmfs_high_rate_qpn_range = 2295 MLX4_A0_STEERING_TABLE_SIZE; 2296 } 2297 2298 mlx4_dbg(dev, "DMFS high rate steer mode is: %s\n", 2299 dmfs_high_rate_steering_mode_str( 2300 dev->caps.dmfs_high_steer_mode)); 2301 } 2302 } else { 2303 err = mlx4_init_slave(dev); 2304 if (err) { 2305 if (err != -EPROBE_DEFER) 2306 mlx4_err(dev, "Failed to initialize slave\n"); 2307 return err; 2308 } 2309 2310 err = mlx4_slave_cap(dev); 2311 if (err) { 2312 mlx4_err(dev, "Failed to obtain slave caps\n"); 2313 goto err_close; 2314 } 2315 } 2316 2317 if (map_bf_area(dev)) 2318 mlx4_dbg(dev, "Failed to map blue flame area\n"); 2319 2320 /*Only the master set the ports, all the rest got it from it.*/ 2321 if (!mlx4_is_slave(dev)) 2322 mlx4_set_port_mask(dev); 2323 2324 err = mlx4_QUERY_ADAPTER(dev, &adapter); 2325 if (err) { 2326 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n"); 2327 goto unmap_bf; 2328 } 2329 2330 /* Query CONFIG_DEV parameters */ 2331 err = mlx4_config_dev_retrieval(dev, ¶ms); 2332 if (err && err != -ENOTSUPP) { 2333 mlx4_err(dev, "Failed to query CONFIG_DEV parameters\n"); 2334 } else if (!err) { 2335 dev->caps.rx_checksum_flags_port[1] = params.rx_csum_flags_port_1; 2336 dev->caps.rx_checksum_flags_port[2] = params.rx_csum_flags_port_2; 2337 } 2338 priv->eq_table.inta_pin = adapter.inta_pin; 2339 memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id); 2340 2341 return 0; 2342 2343 unmap_bf: 2344 unmap_internal_clock(dev); 2345 unmap_bf_area(dev); 2346 2347 if (mlx4_is_slave(dev)) { 2348 kfree(dev->caps.qp0_qkey); 2349 kfree(dev->caps.qp0_tunnel); 2350 kfree(dev->caps.qp0_proxy); 2351 kfree(dev->caps.qp1_tunnel); 2352 kfree(dev->caps.qp1_proxy); 2353 } 2354 2355 err_close: 2356 if (mlx4_is_slave(dev)) 2357 mlx4_slave_exit(dev); 2358 else 2359 mlx4_CLOSE_HCA(dev, 0); 2360 2361 err_free_icm: 2362 if (!mlx4_is_slave(dev)) 2363 mlx4_free_icms(dev); 2364 2365 return err; 2366 } 2367 2368 static int mlx4_init_counters_table(struct mlx4_dev *dev) 2369 { 2370 struct mlx4_priv *priv = mlx4_priv(dev); 2371 int nent_pow2; 2372 2373 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2374 return -ENOENT; 2375 2376 if (!dev->caps.max_counters) 2377 return -ENOSPC; 2378 2379 nent_pow2 = roundup_pow_of_two(dev->caps.max_counters); 2380 /* reserve last counter index for sink counter */ 2381 return mlx4_bitmap_init(&priv->counters_bitmap, nent_pow2, 2382 nent_pow2 - 1, 0, 2383 nent_pow2 - dev->caps.max_counters + 1); 2384 } 2385 2386 static void mlx4_cleanup_counters_table(struct mlx4_dev *dev) 2387 { 2388 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2389 return; 2390 2391 if (!dev->caps.max_counters) 2392 return; 2393 2394 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap); 2395 } 2396 2397 static void mlx4_cleanup_default_counters(struct mlx4_dev *dev) 2398 { 2399 struct mlx4_priv *priv = mlx4_priv(dev); 2400 int port; 2401 2402 for (port = 0; port < dev->caps.num_ports; port++) 2403 if (priv->def_counter[port] != -1) 2404 mlx4_counter_free(dev, priv->def_counter[port]); 2405 } 2406 2407 static int mlx4_allocate_default_counters(struct mlx4_dev *dev) 2408 { 2409 struct mlx4_priv *priv = mlx4_priv(dev); 2410 int port, err = 0; 2411 u32 idx; 2412 2413 for (port = 0; port < dev->caps.num_ports; port++) 2414 priv->def_counter[port] = -1; 2415 2416 for (port = 0; port < dev->caps.num_ports; port++) { 2417 err = mlx4_counter_alloc(dev, &idx); 2418 2419 if (!err || err == -ENOSPC) { 2420 priv->def_counter[port] = idx; 2421 } else if (err == -ENOENT) { 2422 err = 0; 2423 continue; 2424 } else if (mlx4_is_slave(dev) && err == -EINVAL) { 2425 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev); 2426 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n", 2427 MLX4_SINK_COUNTER_INDEX(dev)); 2428 err = 0; 2429 } else { 2430 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", 2431 __func__, port + 1, err); 2432 mlx4_cleanup_default_counters(dev); 2433 return err; 2434 } 2435 2436 mlx4_dbg(dev, "%s: default counter index %d for port %d\n", 2437 __func__, priv->def_counter[port], port + 1); 2438 } 2439 2440 return err; 2441 } 2442 2443 int __mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2444 { 2445 struct mlx4_priv *priv = mlx4_priv(dev); 2446 2447 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2448 return -ENOENT; 2449 2450 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap); 2451 if (*idx == -1) { 2452 *idx = MLX4_SINK_COUNTER_INDEX(dev); 2453 return -ENOSPC; 2454 } 2455 2456 return 0; 2457 } 2458 2459 int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx) 2460 { 2461 u64 out_param; 2462 int err; 2463 2464 if (mlx4_is_mfunc(dev)) { 2465 err = mlx4_cmd_imm(dev, 0, &out_param, RES_COUNTER, 2466 RES_OP_RESERVE, MLX4_CMD_ALLOC_RES, 2467 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); 2468 if (!err) 2469 *idx = get_param_l(&out_param); 2470 2471 return err; 2472 } 2473 return __mlx4_counter_alloc(dev, idx); 2474 } 2475 EXPORT_SYMBOL_GPL(mlx4_counter_alloc); 2476 2477 static int __mlx4_clear_if_stat(struct mlx4_dev *dev, 2478 u8 counter_index) 2479 { 2480 struct mlx4_cmd_mailbox *if_stat_mailbox; 2481 int err; 2482 u32 if_stat_in_mod = (counter_index & 0xff) | MLX4_QUERY_IF_STAT_RESET; 2483 2484 if_stat_mailbox = mlx4_alloc_cmd_mailbox(dev); 2485 if (IS_ERR(if_stat_mailbox)) 2486 return PTR_ERR(if_stat_mailbox); 2487 2488 err = mlx4_cmd_box(dev, 0, if_stat_mailbox->dma, if_stat_in_mod, 0, 2489 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C, 2490 MLX4_CMD_NATIVE); 2491 2492 mlx4_free_cmd_mailbox(dev, if_stat_mailbox); 2493 return err; 2494 } 2495 2496 void __mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2497 { 2498 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)) 2499 return; 2500 2501 if (idx == MLX4_SINK_COUNTER_INDEX(dev)) 2502 return; 2503 2504 __mlx4_clear_if_stat(dev, idx); 2505 2506 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx, MLX4_USE_RR); 2507 return; 2508 } 2509 2510 void mlx4_counter_free(struct mlx4_dev *dev, u32 idx) 2511 { 2512 u64 in_param = 0; 2513 2514 if (mlx4_is_mfunc(dev)) { 2515 set_param_l(&in_param, idx); 2516 mlx4_cmd(dev, in_param, RES_COUNTER, RES_OP_RESERVE, 2517 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A, 2518 MLX4_CMD_WRAPPED); 2519 return; 2520 } 2521 __mlx4_counter_free(dev, idx); 2522 } 2523 EXPORT_SYMBOL_GPL(mlx4_counter_free); 2524 2525 int mlx4_get_default_counter_index(struct mlx4_dev *dev, int port) 2526 { 2527 struct mlx4_priv *priv = mlx4_priv(dev); 2528 2529 return priv->def_counter[port - 1]; 2530 } 2531 EXPORT_SYMBOL_GPL(mlx4_get_default_counter_index); 2532 2533 void mlx4_set_admin_guid(struct mlx4_dev *dev, __be64 guid, int entry, int port) 2534 { 2535 struct mlx4_priv *priv = mlx4_priv(dev); 2536 2537 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; 2538 } 2539 EXPORT_SYMBOL_GPL(mlx4_set_admin_guid); 2540 2541 __be64 mlx4_get_admin_guid(struct mlx4_dev *dev, int entry, int port) 2542 { 2543 struct mlx4_priv *priv = mlx4_priv(dev); 2544 2545 return priv->mfunc.master.vf_admin[entry].vport[port].guid; 2546 } 2547 EXPORT_SYMBOL_GPL(mlx4_get_admin_guid); 2548 2549 void mlx4_set_random_admin_guid(struct mlx4_dev *dev, int entry, int port) 2550 { 2551 struct mlx4_priv *priv = mlx4_priv(dev); 2552 __be64 guid; 2553 2554 /* hw GUID */ 2555 if (entry == 0) 2556 return; 2557 2558 get_random_bytes((char *)&guid, sizeof(guid)); 2559 guid &= ~(cpu_to_be64(1ULL << 56)); 2560 guid |= cpu_to_be64(1ULL << 57); 2561 priv->mfunc.master.vf_admin[entry].vport[port].guid = guid; 2562 } 2563 2564 static int mlx4_setup_hca(struct mlx4_dev *dev) 2565 { 2566 struct mlx4_priv *priv = mlx4_priv(dev); 2567 int err; 2568 int port; 2569 __be32 ib_port_default_caps; 2570 2571 err = mlx4_init_uar_table(dev); 2572 if (err) { 2573 mlx4_err(dev, "Failed to initialize user access region table, aborting\n"); 2574 return err; 2575 } 2576 2577 err = mlx4_uar_alloc(dev, &priv->driver_uar); 2578 if (err) { 2579 mlx4_err(dev, "Failed to allocate driver access region, aborting\n"); 2580 goto err_uar_table_free; 2581 } 2582 2583 priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE); 2584 if (!priv->kar) { 2585 mlx4_err(dev, "Couldn't map kernel access region, aborting\n"); 2586 err = -ENOMEM; 2587 goto err_uar_free; 2588 } 2589 2590 err = mlx4_init_pd_table(dev); 2591 if (err) { 2592 mlx4_err(dev, "Failed to initialize protection domain table, aborting\n"); 2593 goto err_kar_unmap; 2594 } 2595 2596 err = mlx4_init_xrcd_table(dev); 2597 if (err) { 2598 mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n"); 2599 goto err_pd_table_free; 2600 } 2601 2602 err = mlx4_init_mr_table(dev); 2603 if (err) { 2604 mlx4_err(dev, "Failed to initialize memory region table, aborting\n"); 2605 goto err_xrcd_table_free; 2606 } 2607 2608 if (!mlx4_is_slave(dev)) { 2609 err = mlx4_init_mcg_table(dev); 2610 if (err) { 2611 mlx4_err(dev, "Failed to initialize multicast group table, aborting\n"); 2612 goto err_mr_table_free; 2613 } 2614 err = mlx4_config_mad_demux(dev); 2615 if (err) { 2616 mlx4_err(dev, "Failed in config_mad_demux, aborting\n"); 2617 goto err_mcg_table_free; 2618 } 2619 } 2620 2621 err = mlx4_init_eq_table(dev); 2622 if (err) { 2623 mlx4_err(dev, "Failed to initialize event queue table, aborting\n"); 2624 goto err_mcg_table_free; 2625 } 2626 2627 err = mlx4_cmd_use_events(dev); 2628 if (err) { 2629 mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n"); 2630 goto err_eq_table_free; 2631 } 2632 2633 err = mlx4_NOP(dev); 2634 if (err) { 2635 if (dev->flags & MLX4_FLAG_MSI_X) { 2636 mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n", 2637 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 2638 mlx4_warn(dev, "Trying again without MSI-X\n"); 2639 } else { 2640 mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n", 2641 priv->eq_table.eq[MLX4_EQ_ASYNC].irq); 2642 mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); 2643 } 2644 2645 goto err_cmd_poll; 2646 } 2647 2648 mlx4_dbg(dev, "NOP command IRQ test passed\n"); 2649 2650 err = mlx4_init_cq_table(dev); 2651 if (err) { 2652 mlx4_err(dev, "Failed to initialize completion queue table, aborting\n"); 2653 goto err_cmd_poll; 2654 } 2655 2656 err = mlx4_init_srq_table(dev); 2657 if (err) { 2658 mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n"); 2659 goto err_cq_table_free; 2660 } 2661 2662 err = mlx4_init_qp_table(dev); 2663 if (err) { 2664 mlx4_err(dev, "Failed to initialize queue pair table, aborting\n"); 2665 goto err_srq_table_free; 2666 } 2667 2668 if (!mlx4_is_slave(dev)) { 2669 err = mlx4_init_counters_table(dev); 2670 if (err && err != -ENOENT) { 2671 mlx4_err(dev, "Failed to initialize counters table, aborting\n"); 2672 goto err_qp_table_free; 2673 } 2674 } 2675 2676 err = mlx4_allocate_default_counters(dev); 2677 if (err) { 2678 mlx4_err(dev, "Failed to allocate default counters, aborting\n"); 2679 goto err_counters_table_free; 2680 } 2681 2682 if (!mlx4_is_slave(dev)) { 2683 for (port = 1; port <= dev->caps.num_ports; port++) { 2684 ib_port_default_caps = 0; 2685 err = mlx4_get_port_ib_caps(dev, port, 2686 &ib_port_default_caps); 2687 if (err) 2688 mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n", 2689 port, err); 2690 dev->caps.ib_port_def_cap[port] = ib_port_default_caps; 2691 2692 /* initialize per-slave default ib port capabilities */ 2693 if (mlx4_is_master(dev)) { 2694 int i; 2695 for (i = 0; i < dev->num_slaves; i++) { 2696 if (i == mlx4_master_func_num(dev)) 2697 continue; 2698 priv->mfunc.master.slave_state[i].ib_cap_mask[port] = 2699 ib_port_default_caps; 2700 } 2701 } 2702 2703 if (mlx4_is_mfunc(dev)) 2704 dev->caps.port_ib_mtu[port] = IB_MTU_2048; 2705 else 2706 dev->caps.port_ib_mtu[port] = IB_MTU_4096; 2707 2708 err = mlx4_SET_PORT(dev, port, mlx4_is_master(dev) ? 2709 dev->caps.pkey_table_len[port] : -1); 2710 if (err) { 2711 mlx4_err(dev, "Failed to set port %d, aborting\n", 2712 port); 2713 goto err_default_countes_free; 2714 } 2715 } 2716 } 2717 2718 return 0; 2719 2720 err_default_countes_free: 2721 mlx4_cleanup_default_counters(dev); 2722 2723 err_counters_table_free: 2724 if (!mlx4_is_slave(dev)) 2725 mlx4_cleanup_counters_table(dev); 2726 2727 err_qp_table_free: 2728 mlx4_cleanup_qp_table(dev); 2729 2730 err_srq_table_free: 2731 mlx4_cleanup_srq_table(dev); 2732 2733 err_cq_table_free: 2734 mlx4_cleanup_cq_table(dev); 2735 2736 err_cmd_poll: 2737 mlx4_cmd_use_polling(dev); 2738 2739 err_eq_table_free: 2740 mlx4_cleanup_eq_table(dev); 2741 2742 err_mcg_table_free: 2743 if (!mlx4_is_slave(dev)) 2744 mlx4_cleanup_mcg_table(dev); 2745 2746 err_mr_table_free: 2747 mlx4_cleanup_mr_table(dev); 2748 2749 err_xrcd_table_free: 2750 mlx4_cleanup_xrcd_table(dev); 2751 2752 err_pd_table_free: 2753 mlx4_cleanup_pd_table(dev); 2754 2755 err_kar_unmap: 2756 iounmap(priv->kar); 2757 2758 err_uar_free: 2759 mlx4_uar_free(dev, &priv->driver_uar); 2760 2761 err_uar_table_free: 2762 mlx4_cleanup_uar_table(dev); 2763 return err; 2764 } 2765 2766 static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn) 2767 { 2768 int requested_cpu = 0; 2769 struct mlx4_priv *priv = mlx4_priv(dev); 2770 struct mlx4_eq *eq; 2771 int off = 0; 2772 int i; 2773 2774 if (eqn > dev->caps.num_comp_vectors) 2775 return -EINVAL; 2776 2777 for (i = 1; i < port; i++) 2778 off += mlx4_get_eqs_per_port(dev, i); 2779 2780 requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC); 2781 2782 /* Meaning EQs are shared, and this call comes from the second port */ 2783 if (requested_cpu < 0) 2784 return 0; 2785 2786 eq = &priv->eq_table.eq[eqn]; 2787 2788 if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL)) 2789 return -ENOMEM; 2790 2791 cpumask_set_cpu(requested_cpu, eq->affinity_mask); 2792 2793 return 0; 2794 } 2795 2796 static void mlx4_enable_msi_x(struct mlx4_dev *dev) 2797 { 2798 struct mlx4_priv *priv = mlx4_priv(dev); 2799 struct msix_entry *entries; 2800 int i; 2801 int port = 0; 2802 2803 if (msi_x) { 2804 int nreq = dev->caps.num_ports * num_online_cpus() + 1; 2805 2806 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, 2807 nreq); 2808 if (nreq > MAX_MSIX) 2809 nreq = MAX_MSIX; 2810 2811 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); 2812 if (!entries) 2813 goto no_msi; 2814 2815 for (i = 0; i < nreq; ++i) 2816 entries[i].entry = i; 2817 2818 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, 2819 nreq); 2820 2821 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { 2822 kfree(entries); 2823 goto no_msi; 2824 } 2825 /* 1 is reserved for events (asyncrounous EQ) */ 2826 dev->caps.num_comp_vectors = nreq - 1; 2827 2828 priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector; 2829 bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, 2830 dev->caps.num_ports); 2831 2832 for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { 2833 if (i == MLX4_EQ_ASYNC) 2834 continue; 2835 2836 priv->eq_table.eq[i].irq = 2837 entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; 2838 2839 if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { 2840 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, 2841 dev->caps.num_ports); 2842 /* We don't set affinity hint when there 2843 * aren't enough EQs 2844 */ 2845 } else { 2846 set_bit(port, 2847 priv->eq_table.eq[i].actv_ports.ports); 2848 if (mlx4_init_affinity_hint(dev, port + 1, i)) 2849 mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n", 2850 i); 2851 } 2852 /* We divide the Eqs evenly between the two ports. 2853 * (dev->caps.num_comp_vectors / dev->caps.num_ports) 2854 * refers to the number of Eqs per port 2855 * (i.e eqs_per_port). Theoretically, we would like to 2856 * write something like (i + 1) % eqs_per_port == 0. 2857 * However, since there's an asynchronous Eq, we have 2858 * to skip over it by comparing this condition to 2859 * !!((i + 1) > MLX4_EQ_ASYNC). 2860 */ 2861 if ((dev->caps.num_comp_vectors > dev->caps.num_ports) && 2862 ((i + 1) % 2863 (dev->caps.num_comp_vectors / dev->caps.num_ports)) == 2864 !!((i + 1) > MLX4_EQ_ASYNC)) 2865 /* If dev->caps.num_comp_vectors < dev->caps.num_ports, 2866 * everything is shared anyway. 2867 */ 2868 port++; 2869 } 2870 2871 dev->flags |= MLX4_FLAG_MSI_X; 2872 2873 kfree(entries); 2874 return; 2875 } 2876 2877 no_msi: 2878 dev->caps.num_comp_vectors = 1; 2879 2880 BUG_ON(MLX4_EQ_ASYNC >= 2); 2881 for (i = 0; i < 2; ++i) { 2882 priv->eq_table.eq[i].irq = dev->persist->pdev->irq; 2883 if (i != MLX4_EQ_ASYNC) { 2884 bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, 2885 dev->caps.num_ports); 2886 } 2887 } 2888 } 2889 2890 static int mlx4_init_port_info(struct mlx4_dev *dev, int port) 2891 { 2892 struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; 2893 int err = 0; 2894 2895 info->dev = dev; 2896 info->port = port; 2897 if (!mlx4_is_slave(dev)) { 2898 mlx4_init_mac_table(dev, &info->mac_table); 2899 mlx4_init_vlan_table(dev, &info->vlan_table); 2900 mlx4_init_roce_gid_table(dev, &info->gid_table); 2901 info->base_qpn = mlx4_get_base_qpn(dev, port); 2902 } 2903 2904 sprintf(info->dev_name, "mlx4_port%d", port); 2905 info->port_attr.attr.name = info->dev_name; 2906 if (mlx4_is_mfunc(dev)) 2907 info->port_attr.attr.mode = S_IRUGO; 2908 else { 2909 info->port_attr.attr.mode = S_IRUGO | S_IWUSR; 2910 info->port_attr.store = set_port_type; 2911 } 2912 info->port_attr.show = show_port_type; 2913 sysfs_attr_init(&info->port_attr.attr); 2914 2915 err = device_create_file(&dev->persist->pdev->dev, &info->port_attr); 2916 if (err) { 2917 mlx4_err(dev, "Failed to create file for port %d\n", port); 2918 info->port = -1; 2919 } 2920 2921 sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); 2922 info->port_mtu_attr.attr.name = info->dev_mtu_name; 2923 if (mlx4_is_mfunc(dev)) 2924 info->port_mtu_attr.attr.mode = S_IRUGO; 2925 else { 2926 info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; 2927 info->port_mtu_attr.store = set_port_ib_mtu; 2928 } 2929 info->port_mtu_attr.show = show_port_ib_mtu; 2930 sysfs_attr_init(&info->port_mtu_attr.attr); 2931 2932 err = device_create_file(&dev->persist->pdev->dev, 2933 &info->port_mtu_attr); 2934 if (err) { 2935 mlx4_err(dev, "Failed to create mtu file for port %d\n", port); 2936 device_remove_file(&info->dev->persist->pdev->dev, 2937 &info->port_attr); 2938 info->port = -1; 2939 } 2940 2941 return err; 2942 } 2943 2944 static void mlx4_cleanup_port_info(struct mlx4_port_info *info) 2945 { 2946 if (info->port < 0) 2947 return; 2948 2949 device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr); 2950 device_remove_file(&info->dev->persist->pdev->dev, 2951 &info->port_mtu_attr); 2952 #ifdef CONFIG_RFS_ACCEL 2953 free_irq_cpu_rmap(info->rmap); 2954 info->rmap = NULL; 2955 #endif 2956 } 2957 2958 static int mlx4_init_steering(struct mlx4_dev *dev) 2959 { 2960 struct mlx4_priv *priv = mlx4_priv(dev); 2961 int num_entries = dev->caps.num_ports; 2962 int i, j; 2963 2964 priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL); 2965 if (!priv->steer) 2966 return -ENOMEM; 2967 2968 for (i = 0; i < num_entries; i++) 2969 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2970 INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]); 2971 INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]); 2972 } 2973 return 0; 2974 } 2975 2976 static void mlx4_clear_steering(struct mlx4_dev *dev) 2977 { 2978 struct mlx4_priv *priv = mlx4_priv(dev); 2979 struct mlx4_steer_index *entry, *tmp_entry; 2980 struct mlx4_promisc_qp *pqp, *tmp_pqp; 2981 int num_entries = dev->caps.num_ports; 2982 int i, j; 2983 2984 for (i = 0; i < num_entries; i++) { 2985 for (j = 0; j < MLX4_NUM_STEERS; j++) { 2986 list_for_each_entry_safe(pqp, tmp_pqp, 2987 &priv->steer[i].promisc_qps[j], 2988 list) { 2989 list_del(&pqp->list); 2990 kfree(pqp); 2991 } 2992 list_for_each_entry_safe(entry, tmp_entry, 2993 &priv->steer[i].steer_entries[j], 2994 list) { 2995 list_del(&entry->list); 2996 list_for_each_entry_safe(pqp, tmp_pqp, 2997 &entry->duplicates, 2998 list) { 2999 list_del(&pqp->list); 3000 kfree(pqp); 3001 } 3002 kfree(entry); 3003 } 3004 } 3005 } 3006 kfree(priv->steer); 3007 } 3008 3009 static int extended_func_num(struct pci_dev *pdev) 3010 { 3011 return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn); 3012 } 3013 3014 #define MLX4_OWNER_BASE 0x8069c 3015 #define MLX4_OWNER_SIZE 4 3016 3017 static int mlx4_get_ownership(struct mlx4_dev *dev) 3018 { 3019 void __iomem *owner; 3020 u32 ret; 3021 3022 if (pci_channel_offline(dev->persist->pdev)) 3023 return -EIO; 3024 3025 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 3026 MLX4_OWNER_BASE, 3027 MLX4_OWNER_SIZE); 3028 if (!owner) { 3029 mlx4_err(dev, "Failed to obtain ownership bit\n"); 3030 return -ENOMEM; 3031 } 3032 3033 ret = readl(owner); 3034 iounmap(owner); 3035 return (int) !!ret; 3036 } 3037 3038 static void mlx4_free_ownership(struct mlx4_dev *dev) 3039 { 3040 void __iomem *owner; 3041 3042 if (pci_channel_offline(dev->persist->pdev)) 3043 return; 3044 3045 owner = ioremap(pci_resource_start(dev->persist->pdev, 0) + 3046 MLX4_OWNER_BASE, 3047 MLX4_OWNER_SIZE); 3048 if (!owner) { 3049 mlx4_err(dev, "Failed to obtain ownership bit\n"); 3050 return; 3051 } 3052 writel(0, owner); 3053 msleep(1000); 3054 iounmap(owner); 3055 } 3056 3057 #define SRIOV_VALID_STATE(flags) (!!((flags) & MLX4_FLAG_SRIOV) ==\ 3058 !!((flags) & MLX4_FLAG_MASTER)) 3059 3060 static u64 mlx4_enable_sriov(struct mlx4_dev *dev, struct pci_dev *pdev, 3061 u8 total_vfs, int existing_vfs, int reset_flow) 3062 { 3063 u64 dev_flags = dev->flags; 3064 int err = 0; 3065 int fw_enabled_sriov_vfs = min(pci_sriov_get_totalvfs(pdev), 3066 MLX4_MAX_NUM_VF); 3067 3068 if (reset_flow) { 3069 dev->dev_vfs = kcalloc(total_vfs, sizeof(*dev->dev_vfs), 3070 GFP_KERNEL); 3071 if (!dev->dev_vfs) 3072 goto free_mem; 3073 return dev_flags; 3074 } 3075 3076 atomic_inc(&pf_loading); 3077 if (dev->flags & MLX4_FLAG_SRIOV) { 3078 if (existing_vfs != total_vfs) { 3079 mlx4_err(dev, "SR-IOV was already enabled, but with num_vfs (%d) different than requested (%d)\n", 3080 existing_vfs, total_vfs); 3081 total_vfs = existing_vfs; 3082 } 3083 } 3084 3085 dev->dev_vfs = kzalloc(total_vfs * sizeof(*dev->dev_vfs), GFP_KERNEL); 3086 if (NULL == dev->dev_vfs) { 3087 mlx4_err(dev, "Failed to allocate memory for VFs\n"); 3088 goto disable_sriov; 3089 } 3090 3091 if (!(dev->flags & MLX4_FLAG_SRIOV)) { 3092 if (total_vfs > fw_enabled_sriov_vfs) { 3093 mlx4_err(dev, "requested vfs (%d) > available vfs (%d). Continuing without SR_IOV\n", 3094 total_vfs, fw_enabled_sriov_vfs); 3095 err = -ENOMEM; 3096 goto disable_sriov; 3097 } 3098 mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", total_vfs); 3099 err = pci_enable_sriov(pdev, total_vfs); 3100 } 3101 if (err) { 3102 mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n", 3103 err); 3104 goto disable_sriov; 3105 } else { 3106 mlx4_warn(dev, "Running in master mode\n"); 3107 dev_flags |= MLX4_FLAG_SRIOV | 3108 MLX4_FLAG_MASTER; 3109 dev_flags &= ~MLX4_FLAG_SLAVE; 3110 dev->persist->num_vfs = total_vfs; 3111 } 3112 return dev_flags; 3113 3114 disable_sriov: 3115 atomic_dec(&pf_loading); 3116 free_mem: 3117 dev->persist->num_vfs = 0; 3118 kfree(dev->dev_vfs); 3119 dev->dev_vfs = NULL; 3120 return dev_flags & ~MLX4_FLAG_MASTER; 3121 } 3122 3123 enum { 3124 MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64 = -1, 3125 }; 3126 3127 static int mlx4_check_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, 3128 int *nvfs) 3129 { 3130 int requested_vfs = nvfs[0] + nvfs[1] + nvfs[2]; 3131 /* Checking for 64 VFs as a limitation of CX2 */ 3132 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_80_VFS) && 3133 requested_vfs >= 64) { 3134 mlx4_err(dev, "Requested %d VFs, but FW does not support more than 64\n", 3135 requested_vfs); 3136 return MLX4_DEV_CAP_CHECK_NUM_VFS_ABOVE_64; 3137 } 3138 return 0; 3139 } 3140 3141 static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, 3142 int total_vfs, int *nvfs, struct mlx4_priv *priv, 3143 int reset_flow) 3144 { 3145 struct mlx4_dev *dev; 3146 unsigned sum = 0; 3147 int err; 3148 int port; 3149 int i; 3150 struct mlx4_dev_cap *dev_cap = NULL; 3151 int existing_vfs = 0; 3152 3153 dev = &priv->dev; 3154 3155 INIT_LIST_HEAD(&priv->ctx_list); 3156 spin_lock_init(&priv->ctx_lock); 3157 3158 mutex_init(&priv->port_mutex); 3159 mutex_init(&priv->bond_mutex); 3160 3161 INIT_LIST_HEAD(&priv->pgdir_list); 3162 mutex_init(&priv->pgdir_mutex); 3163 3164 INIT_LIST_HEAD(&priv->bf_list); 3165 mutex_init(&priv->bf_mutex); 3166 3167 dev->rev_id = pdev->revision; 3168 dev->numa_node = dev_to_node(&pdev->dev); 3169 3170 /* Detect if this device is a virtual function */ 3171 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3172 mlx4_warn(dev, "Detected virtual function - running in slave mode\n"); 3173 dev->flags |= MLX4_FLAG_SLAVE; 3174 } else { 3175 /* We reset the device and enable SRIOV only for physical 3176 * devices. Try to claim ownership on the device; 3177 * if already taken, skip -- do not allow multiple PFs */ 3178 err = mlx4_get_ownership(dev); 3179 if (err) { 3180 if (err < 0) 3181 return err; 3182 else { 3183 mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n"); 3184 return -EINVAL; 3185 } 3186 } 3187 3188 atomic_set(&priv->opreq_count, 0); 3189 INIT_WORK(&priv->opreq_task, mlx4_opreq_action); 3190 3191 /* 3192 * Now reset the HCA before we touch the PCI capabilities or 3193 * attempt a firmware command, since a boot ROM may have left 3194 * the HCA in an undefined state. 3195 */ 3196 err = mlx4_reset(dev); 3197 if (err) { 3198 mlx4_err(dev, "Failed to reset HCA, aborting\n"); 3199 goto err_sriov; 3200 } 3201 3202 if (total_vfs) { 3203 dev->flags = MLX4_FLAG_MASTER; 3204 existing_vfs = pci_num_vf(pdev); 3205 if (existing_vfs) 3206 dev->flags |= MLX4_FLAG_SRIOV; 3207 dev->persist->num_vfs = total_vfs; 3208 } 3209 } 3210 3211 /* on load remove any previous indication of internal error, 3212 * device is up. 3213 */ 3214 dev->persist->state = MLX4_DEVICE_STATE_UP; 3215 3216 slave_start: 3217 err = mlx4_cmd_init(dev); 3218 if (err) { 3219 mlx4_err(dev, "Failed to init command interface, aborting\n"); 3220 goto err_sriov; 3221 } 3222 3223 /* In slave functions, the communication channel must be initialized 3224 * before posting commands. Also, init num_slaves before calling 3225 * mlx4_init_hca */ 3226 if (mlx4_is_mfunc(dev)) { 3227 if (mlx4_is_master(dev)) { 3228 dev->num_slaves = MLX4_MAX_NUM_SLAVES; 3229 3230 } else { 3231 dev->num_slaves = 0; 3232 err = mlx4_multi_func_init(dev); 3233 if (err) { 3234 mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n"); 3235 goto err_cmd; 3236 } 3237 } 3238 } 3239 3240 err = mlx4_init_fw(dev); 3241 if (err) { 3242 mlx4_err(dev, "Failed to init fw, aborting.\n"); 3243 goto err_mfunc; 3244 } 3245 3246 if (mlx4_is_master(dev)) { 3247 /* when we hit the goto slave_start below, dev_cap already initialized */ 3248 if (!dev_cap) { 3249 dev_cap = kzalloc(sizeof(*dev_cap), GFP_KERNEL); 3250 3251 if (!dev_cap) { 3252 err = -ENOMEM; 3253 goto err_fw; 3254 } 3255 3256 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 3257 if (err) { 3258 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 3259 goto err_fw; 3260 } 3261 3262 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 3263 goto err_fw; 3264 3265 if (!(dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 3266 u64 dev_flags = mlx4_enable_sriov(dev, pdev, 3267 total_vfs, 3268 existing_vfs, 3269 reset_flow); 3270 3271 mlx4_close_fw(dev); 3272 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3273 dev->flags = dev_flags; 3274 if (!SRIOV_VALID_STATE(dev->flags)) { 3275 mlx4_err(dev, "Invalid SRIOV state\n"); 3276 goto err_sriov; 3277 } 3278 err = mlx4_reset(dev); 3279 if (err) { 3280 mlx4_err(dev, "Failed to reset HCA, aborting.\n"); 3281 goto err_sriov; 3282 } 3283 goto slave_start; 3284 } 3285 } else { 3286 /* Legacy mode FW requires SRIOV to be enabled before 3287 * doing QUERY_DEV_CAP, since max_eq's value is different if 3288 * SRIOV is enabled. 3289 */ 3290 memset(dev_cap, 0, sizeof(*dev_cap)); 3291 err = mlx4_QUERY_DEV_CAP(dev, dev_cap); 3292 if (err) { 3293 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 3294 goto err_fw; 3295 } 3296 3297 if (mlx4_check_dev_cap(dev, dev_cap, nvfs)) 3298 goto err_fw; 3299 } 3300 } 3301 3302 err = mlx4_init_hca(dev); 3303 if (err) { 3304 if (err == -EACCES) { 3305 /* Not primary Physical function 3306 * Running in slave mode */ 3307 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3308 /* We're not a PF */ 3309 if (dev->flags & MLX4_FLAG_SRIOV) { 3310 if (!existing_vfs) 3311 pci_disable_sriov(pdev); 3312 if (mlx4_is_master(dev) && !reset_flow) 3313 atomic_dec(&pf_loading); 3314 dev->flags &= ~MLX4_FLAG_SRIOV; 3315 } 3316 if (!mlx4_is_slave(dev)) 3317 mlx4_free_ownership(dev); 3318 dev->flags |= MLX4_FLAG_SLAVE; 3319 dev->flags &= ~MLX4_FLAG_MASTER; 3320 goto slave_start; 3321 } else 3322 goto err_fw; 3323 } 3324 3325 if (mlx4_is_master(dev) && (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_SYS_EQS)) { 3326 u64 dev_flags = mlx4_enable_sriov(dev, pdev, total_vfs, 3327 existing_vfs, reset_flow); 3328 3329 if ((dev->flags ^ dev_flags) & (MLX4_FLAG_MASTER | MLX4_FLAG_SLAVE)) { 3330 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_VHCR); 3331 dev->flags = dev_flags; 3332 err = mlx4_cmd_init(dev); 3333 if (err) { 3334 /* Only VHCR is cleaned up, so could still 3335 * send FW commands 3336 */ 3337 mlx4_err(dev, "Failed to init VHCR command interface, aborting\n"); 3338 goto err_close; 3339 } 3340 } else { 3341 dev->flags = dev_flags; 3342 } 3343 3344 if (!SRIOV_VALID_STATE(dev->flags)) { 3345 mlx4_err(dev, "Invalid SRIOV state\n"); 3346 goto err_close; 3347 } 3348 } 3349 3350 /* check if the device is functioning at its maximum possible speed. 3351 * No return code for this call, just warn the user in case of PCI 3352 * express device capabilities are under-satisfied by the bus. 3353 */ 3354 if (!mlx4_is_slave(dev)) 3355 mlx4_check_pcie_caps(dev); 3356 3357 /* In master functions, the communication channel must be initialized 3358 * after obtaining its address from fw */ 3359 if (mlx4_is_master(dev)) { 3360 if (dev->caps.num_ports < 2 && 3361 num_vfs_argc > 1) { 3362 err = -EINVAL; 3363 mlx4_err(dev, 3364 "Error: Trying to configure VFs on port 2, but HCA has only %d physical ports\n", 3365 dev->caps.num_ports); 3366 goto err_close; 3367 } 3368 memcpy(dev->persist->nvfs, nvfs, sizeof(dev->persist->nvfs)); 3369 3370 for (i = 0; 3371 i < sizeof(dev->persist->nvfs)/ 3372 sizeof(dev->persist->nvfs[0]); i++) { 3373 unsigned j; 3374 3375 for (j = 0; j < dev->persist->nvfs[i]; ++sum, ++j) { 3376 dev->dev_vfs[sum].min_port = i < 2 ? i + 1 : 1; 3377 dev->dev_vfs[sum].n_ports = i < 2 ? 1 : 3378 dev->caps.num_ports; 3379 } 3380 } 3381 3382 /* In master functions, the communication channel 3383 * must be initialized after obtaining its address from fw 3384 */ 3385 err = mlx4_multi_func_init(dev); 3386 if (err) { 3387 mlx4_err(dev, "Failed to init master mfunc interface, aborting.\n"); 3388 goto err_close; 3389 } 3390 } 3391 3392 err = mlx4_alloc_eq_table(dev); 3393 if (err) 3394 goto err_master_mfunc; 3395 3396 bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX); 3397 mutex_init(&priv->msix_ctl.pool_lock); 3398 3399 mlx4_enable_msi_x(dev); 3400 if ((mlx4_is_mfunc(dev)) && 3401 !(dev->flags & MLX4_FLAG_MSI_X)) { 3402 err = -ENOSYS; 3403 mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n"); 3404 goto err_free_eq; 3405 } 3406 3407 if (!mlx4_is_slave(dev)) { 3408 err = mlx4_init_steering(dev); 3409 if (err) 3410 goto err_disable_msix; 3411 } 3412 3413 err = mlx4_setup_hca(dev); 3414 if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) && 3415 !mlx4_is_mfunc(dev)) { 3416 dev->flags &= ~MLX4_FLAG_MSI_X; 3417 dev->caps.num_comp_vectors = 1; 3418 pci_disable_msix(pdev); 3419 err = mlx4_setup_hca(dev); 3420 } 3421 3422 if (err) 3423 goto err_steer; 3424 3425 mlx4_init_quotas(dev); 3426 /* When PF resources are ready arm its comm channel to enable 3427 * getting commands 3428 */ 3429 if (mlx4_is_master(dev)) { 3430 err = mlx4_ARM_COMM_CHANNEL(dev); 3431 if (err) { 3432 mlx4_err(dev, " Failed to arm comm channel eq: %x\n", 3433 err); 3434 goto err_steer; 3435 } 3436 } 3437 3438 for (port = 1; port <= dev->caps.num_ports; port++) { 3439 err = mlx4_init_port_info(dev, port); 3440 if (err) 3441 goto err_port; 3442 } 3443 3444 priv->v2p.port1 = 1; 3445 priv->v2p.port2 = 2; 3446 3447 err = mlx4_register_device(dev); 3448 if (err) 3449 goto err_port; 3450 3451 mlx4_request_modules(dev); 3452 3453 mlx4_sense_init(dev); 3454 mlx4_start_sense(dev); 3455 3456 priv->removed = 0; 3457 3458 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3459 atomic_dec(&pf_loading); 3460 3461 kfree(dev_cap); 3462 return 0; 3463 3464 err_port: 3465 for (--port; port >= 1; --port) 3466 mlx4_cleanup_port_info(&priv->port[port]); 3467 3468 mlx4_cleanup_default_counters(dev); 3469 if (!mlx4_is_slave(dev)) 3470 mlx4_cleanup_counters_table(dev); 3471 mlx4_cleanup_qp_table(dev); 3472 mlx4_cleanup_srq_table(dev); 3473 mlx4_cleanup_cq_table(dev); 3474 mlx4_cmd_use_polling(dev); 3475 mlx4_cleanup_eq_table(dev); 3476 mlx4_cleanup_mcg_table(dev); 3477 mlx4_cleanup_mr_table(dev); 3478 mlx4_cleanup_xrcd_table(dev); 3479 mlx4_cleanup_pd_table(dev); 3480 mlx4_cleanup_uar_table(dev); 3481 3482 err_steer: 3483 if (!mlx4_is_slave(dev)) 3484 mlx4_clear_steering(dev); 3485 3486 err_disable_msix: 3487 if (dev->flags & MLX4_FLAG_MSI_X) 3488 pci_disable_msix(pdev); 3489 3490 err_free_eq: 3491 mlx4_free_eq_table(dev); 3492 3493 err_master_mfunc: 3494 if (mlx4_is_master(dev)) { 3495 mlx4_free_resource_tracker(dev, RES_TR_FREE_STRUCTS_ONLY); 3496 mlx4_multi_func_cleanup(dev); 3497 } 3498 3499 if (mlx4_is_slave(dev)) { 3500 kfree(dev->caps.qp0_qkey); 3501 kfree(dev->caps.qp0_tunnel); 3502 kfree(dev->caps.qp0_proxy); 3503 kfree(dev->caps.qp1_tunnel); 3504 kfree(dev->caps.qp1_proxy); 3505 } 3506 3507 err_close: 3508 mlx4_close_hca(dev); 3509 3510 err_fw: 3511 mlx4_close_fw(dev); 3512 3513 err_mfunc: 3514 if (mlx4_is_slave(dev)) 3515 mlx4_multi_func_cleanup(dev); 3516 3517 err_cmd: 3518 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3519 3520 err_sriov: 3521 if (dev->flags & MLX4_FLAG_SRIOV && !existing_vfs) { 3522 pci_disable_sriov(pdev); 3523 dev->flags &= ~MLX4_FLAG_SRIOV; 3524 } 3525 3526 if (mlx4_is_master(dev) && dev->persist->num_vfs && !reset_flow) 3527 atomic_dec(&pf_loading); 3528 3529 kfree(priv->dev.dev_vfs); 3530 3531 if (!mlx4_is_slave(dev)) 3532 mlx4_free_ownership(dev); 3533 3534 kfree(dev_cap); 3535 return err; 3536 } 3537 3538 static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, 3539 struct mlx4_priv *priv) 3540 { 3541 int err; 3542 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3543 int prb_vf[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3544 const int param_map[MLX4_MAX_PORTS + 1][MLX4_MAX_PORTS + 1] = { 3545 {2, 0, 0}, {0, 1, 2}, {0, 1, 2} }; 3546 unsigned total_vfs = 0; 3547 unsigned int i; 3548 3549 pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev)); 3550 3551 err = pci_enable_device(pdev); 3552 if (err) { 3553 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); 3554 return err; 3555 } 3556 3557 /* Due to requirement that all VFs and the PF are *guaranteed* 2 MACS 3558 * per port, we must limit the number of VFs to 63 (since their are 3559 * 128 MACs) 3560 */ 3561 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && i < num_vfs_argc; 3562 total_vfs += nvfs[param_map[num_vfs_argc - 1][i]], i++) { 3563 nvfs[param_map[num_vfs_argc - 1][i]] = num_vfs[i]; 3564 if (nvfs[i] < 0) { 3565 dev_err(&pdev->dev, "num_vfs module parameter cannot be negative\n"); 3566 err = -EINVAL; 3567 goto err_disable_pdev; 3568 } 3569 } 3570 for (i = 0; i < sizeof(prb_vf)/sizeof(prb_vf[0]) && i < probe_vfs_argc; 3571 i++) { 3572 prb_vf[param_map[probe_vfs_argc - 1][i]] = probe_vf[i]; 3573 if (prb_vf[i] < 0 || prb_vf[i] > nvfs[i]) { 3574 dev_err(&pdev->dev, "probe_vf module parameter cannot be negative or greater than num_vfs\n"); 3575 err = -EINVAL; 3576 goto err_disable_pdev; 3577 } 3578 } 3579 if (total_vfs > MLX4_MAX_NUM_VF) { 3580 dev_err(&pdev->dev, 3581 "Requested more VF's (%d) than allowed by hw (%d)\n", 3582 total_vfs, MLX4_MAX_NUM_VF); 3583 err = -EINVAL; 3584 goto err_disable_pdev; 3585 } 3586 3587 for (i = 0; i < MLX4_MAX_PORTS; i++) { 3588 if (nvfs[i] + nvfs[2] > MLX4_MAX_NUM_VF_P_PORT) { 3589 dev_err(&pdev->dev, 3590 "Requested more VF's (%d) for port (%d) than allowed by driver (%d)\n", 3591 nvfs[i] + nvfs[2], i + 1, 3592 MLX4_MAX_NUM_VF_P_PORT); 3593 err = -EINVAL; 3594 goto err_disable_pdev; 3595 } 3596 } 3597 3598 /* Check for BARs. */ 3599 if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) && 3600 !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 3601 dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n", 3602 pci_dev_data, pci_resource_flags(pdev, 0)); 3603 err = -ENODEV; 3604 goto err_disable_pdev; 3605 } 3606 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 3607 dev_err(&pdev->dev, "Missing UAR, aborting\n"); 3608 err = -ENODEV; 3609 goto err_disable_pdev; 3610 } 3611 3612 err = pci_request_regions(pdev, DRV_NAME); 3613 if (err) { 3614 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n"); 3615 goto err_disable_pdev; 3616 } 3617 3618 pci_set_master(pdev); 3619 3620 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 3621 if (err) { 3622 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); 3623 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3624 if (err) { 3625 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); 3626 goto err_release_regions; 3627 } 3628 } 3629 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3630 if (err) { 3631 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); 3632 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3633 if (err) { 3634 dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); 3635 goto err_release_regions; 3636 } 3637 } 3638 3639 /* Allow large DMA segments, up to the firmware limit of 1 GB */ 3640 dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); 3641 /* Detect if this device is a virtual function */ 3642 if (pci_dev_data & MLX4_PCI_DEV_IS_VF) { 3643 /* When acting as pf, we normally skip vfs unless explicitly 3644 * requested to probe them. 3645 */ 3646 if (total_vfs) { 3647 unsigned vfs_offset = 0; 3648 3649 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) && 3650 vfs_offset + nvfs[i] < extended_func_num(pdev); 3651 vfs_offset += nvfs[i], i++) 3652 ; 3653 if (i == sizeof(nvfs)/sizeof(nvfs[0])) { 3654 err = -ENODEV; 3655 goto err_release_regions; 3656 } 3657 if ((extended_func_num(pdev) - vfs_offset) 3658 > prb_vf[i]) { 3659 dev_warn(&pdev->dev, "Skipping virtual function:%d\n", 3660 extended_func_num(pdev)); 3661 err = -ENODEV; 3662 goto err_release_regions; 3663 } 3664 } 3665 } 3666 3667 err = mlx4_catas_init(&priv->dev); 3668 if (err) 3669 goto err_release_regions; 3670 3671 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 0); 3672 if (err) 3673 goto err_catas; 3674 3675 return 0; 3676 3677 err_catas: 3678 mlx4_catas_end(&priv->dev); 3679 3680 err_release_regions: 3681 pci_release_regions(pdev); 3682 3683 err_disable_pdev: 3684 pci_disable_device(pdev); 3685 pci_set_drvdata(pdev, NULL); 3686 return err; 3687 } 3688 3689 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) 3690 { 3691 struct mlx4_priv *priv; 3692 struct mlx4_dev *dev; 3693 int ret; 3694 3695 printk_once(KERN_INFO "%s", mlx4_version); 3696 3697 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 3698 if (!priv) 3699 return -ENOMEM; 3700 3701 dev = &priv->dev; 3702 dev->persist = kzalloc(sizeof(*dev->persist), GFP_KERNEL); 3703 if (!dev->persist) { 3704 kfree(priv); 3705 return -ENOMEM; 3706 } 3707 dev->persist->pdev = pdev; 3708 dev->persist->dev = dev; 3709 pci_set_drvdata(pdev, dev->persist); 3710 priv->pci_dev_data = id->driver_data; 3711 mutex_init(&dev->persist->device_state_mutex); 3712 mutex_init(&dev->persist->interface_state_mutex); 3713 3714 ret = __mlx4_init_one(pdev, id->driver_data, priv); 3715 if (ret) { 3716 kfree(dev->persist); 3717 kfree(priv); 3718 } else { 3719 pci_save_state(pdev); 3720 } 3721 3722 return ret; 3723 } 3724 3725 static void mlx4_clean_dev(struct mlx4_dev *dev) 3726 { 3727 struct mlx4_dev_persistent *persist = dev->persist; 3728 struct mlx4_priv *priv = mlx4_priv(dev); 3729 unsigned long flags = (dev->flags & RESET_PERSIST_MASK_FLAGS); 3730 3731 memset(priv, 0, sizeof(*priv)); 3732 priv->dev.persist = persist; 3733 priv->dev.flags = flags; 3734 } 3735 3736 static void mlx4_unload_one(struct pci_dev *pdev) 3737 { 3738 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3739 struct mlx4_dev *dev = persist->dev; 3740 struct mlx4_priv *priv = mlx4_priv(dev); 3741 int pci_dev_data; 3742 int p, i; 3743 3744 if (priv->removed) 3745 return; 3746 3747 /* saving current ports type for further use */ 3748 for (i = 0; i < dev->caps.num_ports; i++) { 3749 dev->persist->curr_port_type[i] = dev->caps.port_type[i + 1]; 3750 dev->persist->curr_port_poss_type[i] = dev->caps. 3751 possible_type[i + 1]; 3752 } 3753 3754 pci_dev_data = priv->pci_dev_data; 3755 3756 mlx4_stop_sense(dev); 3757 mlx4_unregister_device(dev); 3758 3759 for (p = 1; p <= dev->caps.num_ports; p++) { 3760 mlx4_cleanup_port_info(&priv->port[p]); 3761 mlx4_CLOSE_PORT(dev, p); 3762 } 3763 3764 if (mlx4_is_master(dev)) 3765 mlx4_free_resource_tracker(dev, 3766 RES_TR_FREE_SLAVES_ONLY); 3767 3768 mlx4_cleanup_default_counters(dev); 3769 if (!mlx4_is_slave(dev)) 3770 mlx4_cleanup_counters_table(dev); 3771 mlx4_cleanup_qp_table(dev); 3772 mlx4_cleanup_srq_table(dev); 3773 mlx4_cleanup_cq_table(dev); 3774 mlx4_cmd_use_polling(dev); 3775 mlx4_cleanup_eq_table(dev); 3776 mlx4_cleanup_mcg_table(dev); 3777 mlx4_cleanup_mr_table(dev); 3778 mlx4_cleanup_xrcd_table(dev); 3779 mlx4_cleanup_pd_table(dev); 3780 3781 if (mlx4_is_master(dev)) 3782 mlx4_free_resource_tracker(dev, 3783 RES_TR_FREE_STRUCTS_ONLY); 3784 3785 iounmap(priv->kar); 3786 mlx4_uar_free(dev, &priv->driver_uar); 3787 mlx4_cleanup_uar_table(dev); 3788 if (!mlx4_is_slave(dev)) 3789 mlx4_clear_steering(dev); 3790 mlx4_free_eq_table(dev); 3791 if (mlx4_is_master(dev)) 3792 mlx4_multi_func_cleanup(dev); 3793 mlx4_close_hca(dev); 3794 mlx4_close_fw(dev); 3795 if (mlx4_is_slave(dev)) 3796 mlx4_multi_func_cleanup(dev); 3797 mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL); 3798 3799 if (dev->flags & MLX4_FLAG_MSI_X) 3800 pci_disable_msix(pdev); 3801 3802 if (!mlx4_is_slave(dev)) 3803 mlx4_free_ownership(dev); 3804 3805 kfree(dev->caps.qp0_qkey); 3806 kfree(dev->caps.qp0_tunnel); 3807 kfree(dev->caps.qp0_proxy); 3808 kfree(dev->caps.qp1_tunnel); 3809 kfree(dev->caps.qp1_proxy); 3810 kfree(dev->dev_vfs); 3811 3812 mlx4_clean_dev(dev); 3813 priv->pci_dev_data = pci_dev_data; 3814 priv->removed = 1; 3815 } 3816 3817 static void mlx4_remove_one(struct pci_dev *pdev) 3818 { 3819 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3820 struct mlx4_dev *dev = persist->dev; 3821 struct mlx4_priv *priv = mlx4_priv(dev); 3822 int active_vfs = 0; 3823 3824 mutex_lock(&persist->interface_state_mutex); 3825 persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; 3826 mutex_unlock(&persist->interface_state_mutex); 3827 3828 /* Disabling SR-IOV is not allowed while there are active vf's */ 3829 if (mlx4_is_master(dev) && dev->flags & MLX4_FLAG_SRIOV) { 3830 active_vfs = mlx4_how_many_lives_vf(dev); 3831 if (active_vfs) { 3832 pr_warn("Removing PF when there are active VF's !!\n"); 3833 pr_warn("Will not disable SR-IOV.\n"); 3834 } 3835 } 3836 3837 /* device marked to be under deletion running now without the lock 3838 * letting other tasks to be terminated 3839 */ 3840 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3841 mlx4_unload_one(pdev); 3842 else 3843 mlx4_info(dev, "%s: interface is down\n", __func__); 3844 mlx4_catas_end(dev); 3845 if (dev->flags & MLX4_FLAG_SRIOV && !active_vfs) { 3846 mlx4_warn(dev, "Disabling SR-IOV\n"); 3847 pci_disable_sriov(pdev); 3848 } 3849 3850 pci_release_regions(pdev); 3851 pci_disable_device(pdev); 3852 kfree(dev->persist); 3853 kfree(priv); 3854 pci_set_drvdata(pdev, NULL); 3855 } 3856 3857 static int restore_current_port_types(struct mlx4_dev *dev, 3858 enum mlx4_port_type *types, 3859 enum mlx4_port_type *poss_types) 3860 { 3861 struct mlx4_priv *priv = mlx4_priv(dev); 3862 int err, i; 3863 3864 mlx4_stop_sense(dev); 3865 3866 mutex_lock(&priv->port_mutex); 3867 for (i = 0; i < dev->caps.num_ports; i++) 3868 dev->caps.possible_type[i + 1] = poss_types[i]; 3869 err = mlx4_change_port_types(dev, types); 3870 mlx4_start_sense(dev); 3871 mutex_unlock(&priv->port_mutex); 3872 3873 return err; 3874 } 3875 3876 int mlx4_restart_one(struct pci_dev *pdev) 3877 { 3878 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3879 struct mlx4_dev *dev = persist->dev; 3880 struct mlx4_priv *priv = mlx4_priv(dev); 3881 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3882 int pci_dev_data, err, total_vfs; 3883 3884 pci_dev_data = priv->pci_dev_data; 3885 total_vfs = dev->persist->num_vfs; 3886 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 3887 3888 mlx4_unload_one(pdev); 3889 err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1); 3890 if (err) { 3891 mlx4_err(dev, "%s: ERROR: mlx4_load_one failed, pci_name=%s, err=%d\n", 3892 __func__, pci_name(pdev), err); 3893 return err; 3894 } 3895 3896 err = restore_current_port_types(dev, dev->persist->curr_port_type, 3897 dev->persist->curr_port_poss_type); 3898 if (err) 3899 mlx4_err(dev, "could not restore original port types (%d)\n", 3900 err); 3901 3902 return err; 3903 } 3904 3905 static const struct pci_device_id mlx4_pci_table[] = { 3906 /* MT25408 "Hermon" SDR */ 3907 { PCI_VDEVICE(MELLANOX, 0x6340), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3908 /* MT25408 "Hermon" DDR */ 3909 { PCI_VDEVICE(MELLANOX, 0x634a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3910 /* MT25408 "Hermon" QDR */ 3911 { PCI_VDEVICE(MELLANOX, 0x6354), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3912 /* MT25408 "Hermon" DDR PCIe gen2 */ 3913 { PCI_VDEVICE(MELLANOX, 0x6732), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3914 /* MT25408 "Hermon" QDR PCIe gen2 */ 3915 { PCI_VDEVICE(MELLANOX, 0x673c), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3916 /* MT25408 "Hermon" EN 10GigE */ 3917 { PCI_VDEVICE(MELLANOX, 0x6368), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3918 /* MT25408 "Hermon" EN 10GigE PCIe gen2 */ 3919 { PCI_VDEVICE(MELLANOX, 0x6750), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3920 /* MT25458 ConnectX EN 10GBASE-T 10GigE */ 3921 { PCI_VDEVICE(MELLANOX, 0x6372), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3922 /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */ 3923 { PCI_VDEVICE(MELLANOX, 0x675a), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3924 /* MT26468 ConnectX EN 10GigE PCIe gen2*/ 3925 { PCI_VDEVICE(MELLANOX, 0x6764), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3926 /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */ 3927 { PCI_VDEVICE(MELLANOX, 0x6746), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3928 /* MT26478 ConnectX2 40GigE PCIe gen2 */ 3929 { PCI_VDEVICE(MELLANOX, 0x676e), MLX4_PCI_DEV_FORCE_SENSE_PORT }, 3930 /* MT25400 Family [ConnectX-2 Virtual Function] */ 3931 { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF }, 3932 /* MT27500 Family [ConnectX-3] */ 3933 { PCI_VDEVICE(MELLANOX, 0x1003), 0 }, 3934 /* MT27500 Family [ConnectX-3 Virtual Function] */ 3935 { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF }, 3936 { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */ 3937 { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */ 3938 { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */ 3939 { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */ 3940 { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */ 3941 { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */ 3942 { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */ 3943 { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */ 3944 { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */ 3945 { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */ 3946 { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */ 3947 { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */ 3948 { 0, } 3949 }; 3950 3951 MODULE_DEVICE_TABLE(pci, mlx4_pci_table); 3952 3953 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev, 3954 pci_channel_state_t state) 3955 { 3956 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3957 3958 mlx4_err(persist->dev, "mlx4_pci_err_detected was called\n"); 3959 mlx4_enter_error_state(persist); 3960 3961 mutex_lock(&persist->interface_state_mutex); 3962 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 3963 mlx4_unload_one(pdev); 3964 3965 mutex_unlock(&persist->interface_state_mutex); 3966 if (state == pci_channel_io_perm_failure) 3967 return PCI_ERS_RESULT_DISCONNECT; 3968 3969 pci_disable_device(pdev); 3970 return PCI_ERS_RESULT_NEED_RESET; 3971 } 3972 3973 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev) 3974 { 3975 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 3976 struct mlx4_dev *dev = persist->dev; 3977 struct mlx4_priv *priv = mlx4_priv(dev); 3978 int ret; 3979 int nvfs[MLX4_MAX_PORTS + 1] = {0, 0, 0}; 3980 int total_vfs; 3981 3982 mlx4_err(dev, "mlx4_pci_slot_reset was called\n"); 3983 ret = pci_enable_device(pdev); 3984 if (ret) { 3985 mlx4_err(dev, "Can not re-enable device, ret=%d\n", ret); 3986 return PCI_ERS_RESULT_DISCONNECT; 3987 } 3988 3989 pci_set_master(pdev); 3990 pci_restore_state(pdev); 3991 pci_save_state(pdev); 3992 3993 total_vfs = dev->persist->num_vfs; 3994 memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs)); 3995 3996 mutex_lock(&persist->interface_state_mutex); 3997 if (!(persist->interface_state & MLX4_INTERFACE_STATE_UP)) { 3998 ret = mlx4_load_one(pdev, priv->pci_dev_data, total_vfs, nvfs, 3999 priv, 1); 4000 if (ret) { 4001 mlx4_err(dev, "%s: mlx4_load_one failed, ret=%d\n", 4002 __func__, ret); 4003 goto end; 4004 } 4005 4006 ret = restore_current_port_types(dev, dev->persist-> 4007 curr_port_type, dev->persist-> 4008 curr_port_poss_type); 4009 if (ret) 4010 mlx4_err(dev, "could not restore original port types (%d)\n", ret); 4011 } 4012 end: 4013 mutex_unlock(&persist->interface_state_mutex); 4014 4015 return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; 4016 } 4017 4018 static void mlx4_shutdown(struct pci_dev *pdev) 4019 { 4020 struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev); 4021 4022 mlx4_info(persist->dev, "mlx4_shutdown was called\n"); 4023 mutex_lock(&persist->interface_state_mutex); 4024 if (persist->interface_state & MLX4_INTERFACE_STATE_UP) 4025 mlx4_unload_one(pdev); 4026 mutex_unlock(&persist->interface_state_mutex); 4027 } 4028 4029 static const struct pci_error_handlers mlx4_err_handler = { 4030 .error_detected = mlx4_pci_err_detected, 4031 .slot_reset = mlx4_pci_slot_reset, 4032 }; 4033 4034 static struct pci_driver mlx4_driver = { 4035 .name = DRV_NAME, 4036 .id_table = mlx4_pci_table, 4037 .probe = mlx4_init_one, 4038 .shutdown = mlx4_shutdown, 4039 .remove = mlx4_remove_one, 4040 .err_handler = &mlx4_err_handler, 4041 }; 4042 4043 static int __init mlx4_verify_params(void) 4044 { 4045 if ((log_num_mac < 0) || (log_num_mac > 7)) { 4046 pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac); 4047 return -1; 4048 } 4049 4050 if (log_num_vlan != 0) 4051 pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n", 4052 MLX4_LOG_NUM_VLANS); 4053 4054 if (use_prio != 0) 4055 pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n"); 4056 4057 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) { 4058 pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n", 4059 log_mtts_per_seg); 4060 return -1; 4061 } 4062 4063 /* Check if module param for ports type has legal combination */ 4064 if (port_type_array[0] == false && port_type_array[1] == true) { 4065 pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n"); 4066 port_type_array[0] = true; 4067 } 4068 4069 if (mlx4_log_num_mgm_entry_size < -7 || 4070 (mlx4_log_num_mgm_entry_size > 0 && 4071 (mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE || 4072 mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE))) { 4073 pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-7..0 or %d..%d)\n", 4074 mlx4_log_num_mgm_entry_size, 4075 MLX4_MIN_MGM_LOG_ENTRY_SIZE, 4076 MLX4_MAX_MGM_LOG_ENTRY_SIZE); 4077 return -1; 4078 } 4079 4080 return 0; 4081 } 4082 4083 static int __init mlx4_init(void) 4084 { 4085 int ret; 4086 4087 if (mlx4_verify_params()) 4088 return -EINVAL; 4089 4090 4091 mlx4_wq = create_singlethread_workqueue("mlx4"); 4092 if (!mlx4_wq) 4093 return -ENOMEM; 4094 4095 ret = pci_register_driver(&mlx4_driver); 4096 if (ret < 0) 4097 destroy_workqueue(mlx4_wq); 4098 return ret < 0 ? ret : 0; 4099 } 4100 4101 static void __exit mlx4_cleanup(void) 4102 { 4103 pci_unregister_driver(&mlx4_driver); 4104 destroy_workqueue(mlx4_wq); 4105 } 4106 4107 module_init(mlx4_init); 4108 module_exit(mlx4_cleanup); 4109