1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) 2 /* QLogic qed NIC Driver 3 * Copyright (c) 2015-2017 QLogic Corporation 4 * Copyright (c) 2019-2020 Marvell International Ltd. 5 */ 6 7 #include <linux/stddef.h> 8 #include <linux/pci.h> 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/delay.h> 12 #include <asm/byteorder.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/string.h> 15 #include <linux/module.h> 16 #include <linux/interrupt.h> 17 #include <linux/workqueue.h> 18 #include <linux/ethtool.h> 19 #include <linux/etherdevice.h> 20 #include <linux/vmalloc.h> 21 #include <linux/crash_dump.h> 22 #include <linux/crc32.h> 23 #include <linux/qed/qed_if.h> 24 #include <linux/qed/qed_ll2_if.h> 25 #include <net/devlink.h> 26 #include <linux/phylink.h> 27 28 #include "qed.h" 29 #include "qed_sriov.h" 30 #include "qed_sp.h" 31 #include "qed_dev_api.h" 32 #include "qed_ll2.h" 33 #include "qed_fcoe.h" 34 #include "qed_iscsi.h" 35 36 #include "qed_mcp.h" 37 #include "qed_reg_addr.h" 38 #include "qed_hw.h" 39 #include "qed_selftest.h" 40 #include "qed_debug.h" 41 #include "qed_devlink.h" 42 43 #define QED_ROCE_QPS (8192) 44 #define QED_ROCE_DPIS (8) 45 #define QED_RDMA_SRQS QED_ROCE_QPS 46 #define QED_NVM_CFG_GET_FLAGS 0xA 47 #define QED_NVM_CFG_GET_PF_FLAGS 0x1A 48 #define QED_NVM_CFG_MAX_ATTRS 50 49 50 static char version[] = 51 "QLogic FastLinQ 4xxxx Core Module qed\n"; 52 53 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 54 MODULE_LICENSE("GPL"); 55 56 #define FW_FILE_VERSION \ 57 __stringify(FW_MAJOR_VERSION) "." \ 58 __stringify(FW_MINOR_VERSION) "." \ 59 __stringify(FW_REVISION_VERSION) "." \ 60 __stringify(FW_ENGINEERING_VERSION) 61 62 #define QED_FW_FILE_NAME \ 63 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 64 65 MODULE_FIRMWARE(QED_FW_FILE_NAME); 66 67 /* MFW speed capabilities maps */ 68 69 struct qed_mfw_speed_map { 70 u32 mfw_val; 71 __ETHTOOL_DECLARE_LINK_MODE_MASK(caps); 72 73 const u32 *cap_arr; 74 u32 arr_size; 75 }; 76 77 #define QED_MFW_SPEED_MAP(type, arr) \ 78 { \ 79 .mfw_val = (type), \ 80 .cap_arr = (arr), \ 81 .arr_size = ARRAY_SIZE(arr), \ 82 } 83 84 static const u32 qed_mfw_ext_1g[] __initconst = { 85 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 86 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 87 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 88 }; 89 90 static const u32 qed_mfw_ext_10g[] __initconst = { 91 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 92 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 93 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 94 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 95 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 96 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 97 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 98 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, 99 }; 100 101 static const u32 qed_mfw_ext_25g[] __initconst = { 102 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 103 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 104 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 105 }; 106 107 static const u32 qed_mfw_ext_40g[] __initconst = { 108 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 109 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 110 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 111 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 112 }; 113 114 static const u32 qed_mfw_ext_50g_base_r[] __initconst = { 115 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT, 116 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT, 117 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT, 118 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT, 119 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT, 120 }; 121 122 static const u32 qed_mfw_ext_50g_base_r2[] __initconst = { 123 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 124 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 125 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 126 }; 127 128 static const u32 qed_mfw_ext_100g_base_r2[] __initconst = { 129 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT, 130 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT, 131 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT, 132 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT, 133 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT, 134 }; 135 136 static const u32 qed_mfw_ext_100g_base_r4[] __initconst = { 137 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 138 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 139 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 140 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 141 }; 142 143 static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = { 144 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g), 145 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g), 146 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g), 147 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g), 148 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R, 149 qed_mfw_ext_50g_base_r), 150 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2, 151 qed_mfw_ext_50g_base_r2), 152 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2, 153 qed_mfw_ext_100g_base_r2), 154 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4, 155 qed_mfw_ext_100g_base_r4), 156 }; 157 158 static const u32 qed_mfw_legacy_1g[] __initconst = { 159 ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 160 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, 161 ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 162 }; 163 164 static const u32 qed_mfw_legacy_10g[] __initconst = { 165 ETHTOOL_LINK_MODE_10000baseT_Full_BIT, 166 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, 167 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, 168 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, 169 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, 170 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, 171 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, 172 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, 173 }; 174 175 static const u32 qed_mfw_legacy_20g[] __initconst = { 176 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, 177 }; 178 179 static const u32 qed_mfw_legacy_25g[] __initconst = { 180 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, 181 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, 182 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, 183 }; 184 185 static const u32 qed_mfw_legacy_40g[] __initconst = { 186 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, 187 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, 188 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, 189 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, 190 }; 191 192 static const u32 qed_mfw_legacy_50g[] __initconst = { 193 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, 194 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, 195 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, 196 }; 197 198 static const u32 qed_mfw_legacy_bb_100g[] __initconst = { 199 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, 200 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, 201 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, 202 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, 203 }; 204 205 static struct qed_mfw_speed_map qed_mfw_legacy_maps[] __ro_after_init = { 206 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G, 207 qed_mfw_legacy_1g), 208 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G, 209 qed_mfw_legacy_10g), 210 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G, 211 qed_mfw_legacy_20g), 212 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G, 213 qed_mfw_legacy_25g), 214 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G, 215 qed_mfw_legacy_40g), 216 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G, 217 qed_mfw_legacy_50g), 218 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G, 219 qed_mfw_legacy_bb_100g), 220 }; 221 222 static void __init qed_mfw_speed_map_populate(struct qed_mfw_speed_map *map) 223 { 224 linkmode_set_bit_array(map->cap_arr, map->arr_size, map->caps); 225 226 map->cap_arr = NULL; 227 map->arr_size = 0; 228 } 229 230 static void __init qed_mfw_speed_maps_init(void) 231 { 232 u32 i; 233 234 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) 235 qed_mfw_speed_map_populate(qed_mfw_ext_maps + i); 236 237 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) 238 qed_mfw_speed_map_populate(qed_mfw_legacy_maps + i); 239 } 240 241 static int __init qed_init(void) 242 { 243 pr_info("%s", version); 244 245 qed_mfw_speed_maps_init(); 246 247 return 0; 248 } 249 module_init(qed_init); 250 251 static void __exit qed_exit(void) 252 { 253 /* To prevent marking this module as "permanent" */ 254 } 255 module_exit(qed_exit); 256 257 static void qed_free_pci(struct qed_dev *cdev) 258 { 259 struct pci_dev *pdev = cdev->pdev; 260 261 if (cdev->doorbells && cdev->db_size) 262 iounmap(cdev->doorbells); 263 if (cdev->regview) 264 iounmap(cdev->regview); 265 if (atomic_read(&pdev->enable_cnt) == 1) 266 pci_release_regions(pdev); 267 268 pci_disable_device(pdev); 269 } 270 271 #define PCI_REVISION_ID_ERROR_VAL 0xff 272 273 /* Performs PCI initializations as well as initializing PCI-related parameters 274 * in the device structrue. Returns 0 in case of success. 275 */ 276 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 277 { 278 u8 rev_id; 279 int rc; 280 281 cdev->pdev = pdev; 282 283 rc = pci_enable_device(pdev); 284 if (rc) { 285 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 286 goto err0; 287 } 288 289 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 290 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 291 rc = -EIO; 292 goto err1; 293 } 294 295 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 296 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 297 rc = -EIO; 298 goto err1; 299 } 300 301 if (atomic_read(&pdev->enable_cnt) == 1) { 302 rc = pci_request_regions(pdev, "qed"); 303 if (rc) { 304 DP_NOTICE(cdev, 305 "Failed to request PCI memory resources\n"); 306 goto err1; 307 } 308 pci_set_master(pdev); 309 pci_save_state(pdev); 310 } 311 312 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 313 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 314 DP_NOTICE(cdev, 315 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 316 rev_id); 317 rc = -ENODEV; 318 goto err2; 319 } 320 if (!pci_is_pcie(pdev)) { 321 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 322 rc = -EIO; 323 goto err2; 324 } 325 326 if (IS_PF(cdev) && !pdev->pm_cap) 327 DP_NOTICE(cdev, "Cannot find power management capability\n"); 328 329 rc = dma_set_mask_and_coherent(&cdev->pdev->dev, DMA_BIT_MASK(64)); 330 if (rc) { 331 DP_NOTICE(cdev, "Can't request DMA addresses\n"); 332 rc = -EIO; 333 goto err2; 334 } 335 336 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 337 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 338 cdev->pci_params.irq = pdev->irq; 339 340 cdev->regview = pci_ioremap_bar(pdev, 0); 341 if (!cdev->regview) { 342 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 343 rc = -ENOMEM; 344 goto err2; 345 } 346 347 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 348 cdev->db_size = pci_resource_len(cdev->pdev, 2); 349 if (!cdev->db_size) { 350 if (IS_PF(cdev)) { 351 DP_NOTICE(cdev, "No Doorbell bar available\n"); 352 return -EINVAL; 353 } else { 354 return 0; 355 } 356 } 357 358 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 359 360 if (!cdev->doorbells) { 361 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 362 return -ENOMEM; 363 } 364 365 return 0; 366 367 err2: 368 pci_release_regions(pdev); 369 err1: 370 pci_disable_device(pdev); 371 err0: 372 return rc; 373 } 374 375 int qed_fill_dev_info(struct qed_dev *cdev, 376 struct qed_dev_info *dev_info) 377 { 378 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 379 struct qed_hw_info *hw_info = &p_hwfn->hw_info; 380 struct qed_tunnel_info *tun = &cdev->tunnel; 381 struct qed_ptt *ptt; 382 383 memset(dev_info, 0, sizeof(struct qed_dev_info)); 384 385 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 386 tun->vxlan.b_mode_enabled) 387 dev_info->vxlan_enable = true; 388 389 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 390 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 391 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 392 dev_info->gre_enable = true; 393 394 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 395 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 396 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 397 dev_info->geneve_enable = true; 398 399 dev_info->num_hwfns = cdev->num_hwfns; 400 dev_info->pci_mem_start = cdev->pci_params.mem_start; 401 dev_info->pci_mem_end = cdev->pci_params.mem_end; 402 dev_info->pci_irq = cdev->pci_params.irq; 403 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); 404 dev_info->dev_type = cdev->type; 405 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); 406 407 if (IS_PF(cdev)) { 408 dev_info->fw_major = FW_MAJOR_VERSION; 409 dev_info->fw_minor = FW_MINOR_VERSION; 410 dev_info->fw_rev = FW_REVISION_VERSION; 411 dev_info->fw_eng = FW_ENGINEERING_VERSION; 412 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, 413 &cdev->mf_bits); 414 if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits)) 415 dev_info->b_arfs_capable = true; 416 dev_info->tx_switching = true; 417 418 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) 419 dev_info->wol_support = true; 420 421 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); 422 dev_info->esl = qed_mcp_is_esl_supported(p_hwfn); 423 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; 424 } else { 425 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 426 &dev_info->fw_minor, &dev_info->fw_rev, 427 &dev_info->fw_eng); 428 } 429 430 if (IS_PF(cdev)) { 431 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 432 if (ptt) { 433 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 434 &dev_info->mfw_rev, NULL); 435 436 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, 437 &dev_info->mbi_version); 438 439 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 440 &dev_info->flash_size); 441 442 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 443 } 444 } else { 445 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 446 &dev_info->mfw_rev, NULL); 447 } 448 449 dev_info->mtu = hw_info->mtu; 450 cdev->common_dev_info = *dev_info; 451 452 return 0; 453 } 454 455 static void qed_free_cdev(struct qed_dev *cdev) 456 { 457 kfree((void *)cdev); 458 } 459 460 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 461 { 462 struct qed_dev *cdev; 463 464 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 465 if (!cdev) 466 return cdev; 467 468 qed_init_struct(cdev); 469 470 return cdev; 471 } 472 473 /* Sets the requested power state */ 474 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 475 { 476 if (!cdev) 477 return -ENODEV; 478 479 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 480 return 0; 481 } 482 483 /* probing */ 484 static struct qed_dev *qed_probe(struct pci_dev *pdev, 485 struct qed_probe_params *params) 486 { 487 struct qed_dev *cdev; 488 int rc; 489 490 cdev = qed_alloc_cdev(pdev); 491 if (!cdev) 492 goto err0; 493 494 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 495 cdev->protocol = params->protocol; 496 497 if (params->is_vf) 498 cdev->b_is_vf = true; 499 500 qed_init_dp(cdev, params->dp_module, params->dp_level); 501 502 cdev->recov_in_prog = params->recov_in_prog; 503 504 rc = qed_init_pci(cdev, pdev); 505 if (rc) { 506 DP_ERR(cdev, "init pci failed\n"); 507 goto err1; 508 } 509 DP_INFO(cdev, "PCI init completed successfully\n"); 510 511 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 512 if (rc) { 513 DP_ERR(cdev, "hw prepare failed\n"); 514 goto err2; 515 } 516 517 DP_INFO(cdev, "%s completed successfully\n", __func__); 518 519 return cdev; 520 521 err2: 522 qed_free_pci(cdev); 523 err1: 524 qed_free_cdev(cdev); 525 err0: 526 return NULL; 527 } 528 529 static void qed_remove(struct qed_dev *cdev) 530 { 531 if (!cdev) 532 return; 533 534 qed_hw_remove(cdev); 535 536 qed_free_pci(cdev); 537 538 qed_set_power_state(cdev, PCI_D3hot); 539 540 qed_free_cdev(cdev); 541 } 542 543 static void qed_disable_msix(struct qed_dev *cdev) 544 { 545 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 546 pci_disable_msix(cdev->pdev); 547 kfree(cdev->int_params.msix_table); 548 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 549 pci_disable_msi(cdev->pdev); 550 } 551 552 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 553 } 554 555 static int qed_enable_msix(struct qed_dev *cdev, 556 struct qed_int_params *int_params) 557 { 558 int i, rc, cnt; 559 560 cnt = int_params->in.num_vectors; 561 562 for (i = 0; i < cnt; i++) 563 int_params->msix_table[i].entry = i; 564 565 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 566 int_params->in.min_msix_cnt, cnt); 567 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 568 (rc % cdev->num_hwfns)) { 569 pci_disable_msix(cdev->pdev); 570 571 /* If fastpath is initialized, we need at least one interrupt 572 * per hwfn [and the slow path interrupts]. New requested number 573 * should be a multiple of the number of hwfns. 574 */ 575 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 576 DP_NOTICE(cdev, 577 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 578 cnt, int_params->in.num_vectors); 579 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 580 cnt); 581 if (!rc) 582 rc = cnt; 583 } 584 585 /* For VFs, we should return with an error in case we didn't get the 586 * exact number of msix vectors as we requested. 587 * Not doing that will lead to a crash when starting queues for 588 * this VF. 589 */ 590 if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { 591 /* MSI-x configuration was achieved */ 592 int_params->out.int_mode = QED_INT_MODE_MSIX; 593 int_params->out.num_vectors = rc; 594 rc = 0; 595 } else { 596 DP_NOTICE(cdev, 597 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 598 cnt, rc); 599 } 600 601 return rc; 602 } 603 604 /* This function outputs the int mode and the number of enabled msix vector */ 605 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 606 { 607 struct qed_int_params *int_params = &cdev->int_params; 608 struct msix_entry *tbl; 609 int rc = 0, cnt; 610 611 switch (int_params->in.int_mode) { 612 case QED_INT_MODE_MSIX: 613 /* Allocate MSIX table */ 614 cnt = int_params->in.num_vectors; 615 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 616 if (!int_params->msix_table) { 617 rc = -ENOMEM; 618 goto out; 619 } 620 621 /* Enable MSIX */ 622 rc = qed_enable_msix(cdev, int_params); 623 if (!rc) 624 goto out; 625 626 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 627 kfree(int_params->msix_table); 628 if (force_mode) 629 goto out; 630 fallthrough; 631 632 case QED_INT_MODE_MSI: 633 if (cdev->num_hwfns == 1) { 634 rc = pci_enable_msi(cdev->pdev); 635 if (!rc) { 636 int_params->out.int_mode = QED_INT_MODE_MSI; 637 goto out; 638 } 639 640 DP_NOTICE(cdev, "Failed to enable MSI\n"); 641 if (force_mode) 642 goto out; 643 } 644 fallthrough; 645 646 case QED_INT_MODE_INTA: 647 int_params->out.int_mode = QED_INT_MODE_INTA; 648 rc = 0; 649 goto out; 650 default: 651 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 652 int_params->in.int_mode); 653 rc = -EINVAL; 654 } 655 656 out: 657 if (!rc) 658 DP_INFO(cdev, "Using %s interrupts\n", 659 int_params->out.int_mode == QED_INT_MODE_INTA ? 660 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 661 "MSI" : "MSIX"); 662 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 663 664 return rc; 665 } 666 667 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 668 int index, void(*handler)(void *)) 669 { 670 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 671 int relative_idx = index / cdev->num_hwfns; 672 673 hwfn->simd_proto_handler[relative_idx].func = handler; 674 hwfn->simd_proto_handler[relative_idx].token = token; 675 } 676 677 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 678 { 679 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 680 int relative_idx = index / cdev->num_hwfns; 681 682 memset(&hwfn->simd_proto_handler[relative_idx], 0, 683 sizeof(struct qed_simd_fp_handler)); 684 } 685 686 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 687 { 688 tasklet_schedule((struct tasklet_struct *)tasklet); 689 return IRQ_HANDLED; 690 } 691 692 static irqreturn_t qed_single_int(int irq, void *dev_instance) 693 { 694 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 695 struct qed_hwfn *hwfn; 696 irqreturn_t rc = IRQ_NONE; 697 u64 status; 698 int i, j; 699 700 for (i = 0; i < cdev->num_hwfns; i++) { 701 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 702 703 if (!status) 704 continue; 705 706 hwfn = &cdev->hwfns[i]; 707 708 /* Slowpath interrupt */ 709 if (unlikely(status & 0x1)) { 710 tasklet_schedule(&hwfn->sp_dpc); 711 status &= ~0x1; 712 rc = IRQ_HANDLED; 713 } 714 715 /* Fastpath interrupts */ 716 for (j = 0; j < 64; j++) { 717 if ((0x2ULL << j) & status) { 718 struct qed_simd_fp_handler *p_handler = 719 &hwfn->simd_proto_handler[j]; 720 721 if (p_handler->func) 722 p_handler->func(p_handler->token); 723 else 724 DP_NOTICE(hwfn, 725 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", 726 j, status); 727 728 status &= ~(0x2ULL << j); 729 rc = IRQ_HANDLED; 730 } 731 } 732 733 if (unlikely(status)) 734 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 735 "got an unknown interrupt status 0x%llx\n", 736 status); 737 } 738 739 return rc; 740 } 741 742 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 743 { 744 struct qed_dev *cdev = hwfn->cdev; 745 u32 int_mode; 746 int rc = 0; 747 u8 id; 748 749 int_mode = cdev->int_params.out.int_mode; 750 if (int_mode == QED_INT_MODE_MSIX) { 751 id = hwfn->my_id; 752 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 753 id, cdev->pdev->bus->number, 754 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 755 rc = request_irq(cdev->int_params.msix_table[id].vector, 756 qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc); 757 } else { 758 unsigned long flags = 0; 759 760 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 761 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 762 PCI_FUNC(cdev->pdev->devfn)); 763 764 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 765 flags |= IRQF_SHARED; 766 767 rc = request_irq(cdev->pdev->irq, qed_single_int, 768 flags, cdev->name, cdev); 769 } 770 771 if (rc) 772 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 773 else 774 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 775 "Requested slowpath %s\n", 776 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 777 778 return rc; 779 } 780 781 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) 782 { 783 /* Calling the disable function will make sure that any 784 * currently-running function is completed. The following call to the 785 * enable function makes this sequence a flush-like operation. 786 */ 787 if (p_hwfn->b_sp_dpc_enabled) { 788 tasklet_disable(&p_hwfn->sp_dpc); 789 tasklet_enable(&p_hwfn->sp_dpc); 790 } 791 } 792 793 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 794 { 795 struct qed_dev *cdev = p_hwfn->cdev; 796 u8 id = p_hwfn->my_id; 797 u32 int_mode; 798 799 int_mode = cdev->int_params.out.int_mode; 800 if (int_mode == QED_INT_MODE_MSIX) 801 synchronize_irq(cdev->int_params.msix_table[id].vector); 802 else 803 synchronize_irq(cdev->pdev->irq); 804 805 qed_slowpath_tasklet_flush(p_hwfn); 806 } 807 808 static void qed_slowpath_irq_free(struct qed_dev *cdev) 809 { 810 int i; 811 812 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 813 for_each_hwfn(cdev, i) { 814 if (!cdev->hwfns[i].b_int_requested) 815 break; 816 free_irq(cdev->int_params.msix_table[i].vector, 817 &cdev->hwfns[i].sp_dpc); 818 } 819 } else { 820 if (QED_LEADING_HWFN(cdev)->b_int_requested) 821 free_irq(cdev->pdev->irq, cdev); 822 } 823 qed_int_disable_post_isr_release(cdev); 824 } 825 826 static int qed_nic_stop(struct qed_dev *cdev) 827 { 828 int i, rc; 829 830 rc = qed_hw_stop(cdev); 831 832 for (i = 0; i < cdev->num_hwfns; i++) { 833 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 834 835 if (p_hwfn->b_sp_dpc_enabled) { 836 tasklet_disable(&p_hwfn->sp_dpc); 837 p_hwfn->b_sp_dpc_enabled = false; 838 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 839 "Disabled sp tasklet [hwfn %d] at %p\n", 840 i, &p_hwfn->sp_dpc); 841 } 842 } 843 844 qed_dbg_pf_exit(cdev); 845 846 return rc; 847 } 848 849 static int qed_nic_setup(struct qed_dev *cdev) 850 { 851 int rc, i; 852 853 /* Determine if interface is going to require LL2 */ 854 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 855 for (i = 0; i < cdev->num_hwfns; i++) { 856 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 857 858 p_hwfn->using_ll2 = true; 859 } 860 } 861 862 rc = qed_resc_alloc(cdev); 863 if (rc) 864 return rc; 865 866 DP_INFO(cdev, "Allocated qed resources\n"); 867 868 qed_resc_setup(cdev); 869 870 return rc; 871 } 872 873 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 874 { 875 int limit = 0; 876 877 /* Mark the fastpath as free/used */ 878 cdev->int_params.fp_initialized = cnt ? true : false; 879 880 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 881 limit = cdev->num_hwfns * 63; 882 else if (cdev->int_params.fp_msix_cnt) 883 limit = cdev->int_params.fp_msix_cnt; 884 885 if (!limit) 886 return -ENOMEM; 887 888 return min_t(int, cnt, limit); 889 } 890 891 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 892 { 893 memset(info, 0, sizeof(struct qed_int_info)); 894 895 if (!cdev->int_params.fp_initialized) { 896 DP_INFO(cdev, 897 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 898 return -EINVAL; 899 } 900 901 /* Need to expose only MSI-X information; Single IRQ is handled solely 902 * by qed. 903 */ 904 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 905 int msix_base = cdev->int_params.fp_msix_base; 906 907 info->msix_cnt = cdev->int_params.fp_msix_cnt; 908 info->msix = &cdev->int_params.msix_table[msix_base]; 909 } 910 911 return 0; 912 } 913 914 static int qed_slowpath_setup_int(struct qed_dev *cdev, 915 enum qed_int_mode int_mode) 916 { 917 struct qed_sb_cnt_info sb_cnt_info; 918 int num_l2_queues = 0; 919 int rc; 920 int i; 921 922 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 923 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 924 return -EINVAL; 925 } 926 927 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 928 cdev->int_params.in.int_mode = int_mode; 929 for_each_hwfn(cdev, i) { 930 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 931 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 932 cdev->int_params.in.num_vectors += sb_cnt_info.cnt; 933 cdev->int_params.in.num_vectors++; /* slowpath */ 934 } 935 936 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 937 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 938 939 if (is_kdump_kernel()) { 940 DP_INFO(cdev, 941 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", 942 cdev->int_params.in.min_msix_cnt); 943 cdev->int_params.in.num_vectors = 944 cdev->int_params.in.min_msix_cnt; 945 } 946 947 rc = qed_set_int_mode(cdev, false); 948 if (rc) { 949 DP_ERR(cdev, "%s ERR\n", __func__); 950 return rc; 951 } 952 953 cdev->int_params.fp_msix_base = cdev->num_hwfns; 954 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 955 cdev->num_hwfns; 956 957 if (!IS_ENABLED(CONFIG_QED_RDMA) || 958 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) 959 return 0; 960 961 for_each_hwfn(cdev, i) 962 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 963 964 DP_VERBOSE(cdev, QED_MSG_RDMA, 965 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 966 cdev->int_params.fp_msix_cnt, num_l2_queues); 967 968 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 969 cdev->int_params.rdma_msix_cnt = 970 (cdev->int_params.fp_msix_cnt - num_l2_queues) 971 / cdev->num_hwfns; 972 cdev->int_params.rdma_msix_base = 973 cdev->int_params.fp_msix_base + num_l2_queues; 974 cdev->int_params.fp_msix_cnt = num_l2_queues; 975 } else { 976 cdev->int_params.rdma_msix_cnt = 0; 977 } 978 979 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 980 cdev->int_params.rdma_msix_cnt, 981 cdev->int_params.rdma_msix_base); 982 983 return 0; 984 } 985 986 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 987 { 988 int rc; 989 990 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 991 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 992 993 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 994 &cdev->int_params.in.num_vectors); 995 if (cdev->num_hwfns > 1) { 996 u8 vectors = 0; 997 998 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 999 cdev->int_params.in.num_vectors += vectors; 1000 } 1001 1002 /* We want a minimum of one fastpath vector per vf hwfn */ 1003 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 1004 1005 rc = qed_set_int_mode(cdev, true); 1006 if (rc) 1007 return rc; 1008 1009 cdev->int_params.fp_msix_base = 0; 1010 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 1011 1012 return 0; 1013 } 1014 1015 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 1016 u8 *input_buf, u32 max_size, u8 *unzip_buf) 1017 { 1018 int rc; 1019 1020 p_hwfn->stream->next_in = input_buf; 1021 p_hwfn->stream->avail_in = input_len; 1022 p_hwfn->stream->next_out = unzip_buf; 1023 p_hwfn->stream->avail_out = max_size; 1024 1025 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 1026 1027 if (rc != Z_OK) { 1028 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 1029 rc); 1030 return 0; 1031 } 1032 1033 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 1034 zlib_inflateEnd(p_hwfn->stream); 1035 1036 if (rc != Z_OK && rc != Z_STREAM_END) { 1037 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 1038 p_hwfn->stream->msg, rc); 1039 return 0; 1040 } 1041 1042 return p_hwfn->stream->total_out / 4; 1043 } 1044 1045 static int qed_alloc_stream_mem(struct qed_dev *cdev) 1046 { 1047 int i; 1048 void *workspace; 1049 1050 for_each_hwfn(cdev, i) { 1051 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1052 1053 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 1054 if (!p_hwfn->stream) 1055 return -ENOMEM; 1056 1057 workspace = vzalloc(zlib_inflate_workspacesize()); 1058 if (!workspace) 1059 return -ENOMEM; 1060 p_hwfn->stream->workspace = workspace; 1061 } 1062 1063 return 0; 1064 } 1065 1066 static void qed_free_stream_mem(struct qed_dev *cdev) 1067 { 1068 int i; 1069 1070 for_each_hwfn(cdev, i) { 1071 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1072 1073 if (!p_hwfn->stream) 1074 return; 1075 1076 vfree(p_hwfn->stream->workspace); 1077 kfree(p_hwfn->stream); 1078 } 1079 } 1080 1081 static void qed_update_pf_params(struct qed_dev *cdev, 1082 struct qed_pf_params *params) 1083 { 1084 int i; 1085 1086 if (IS_ENABLED(CONFIG_QED_RDMA)) { 1087 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 1088 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 1089 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; 1090 /* divide by 3 the MRs to avoid MF ILT overflow */ 1091 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 1092 } 1093 1094 if (cdev->num_hwfns > 1 || IS_VF(cdev)) 1095 params->eth_pf_params.num_arfs_filters = 0; 1096 1097 /* In case we might support RDMA, don't allow qede to be greedy 1098 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] 1099 * per hwfn. 1100 */ 1101 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { 1102 u16 *num_cons; 1103 1104 num_cons = ¶ms->eth_pf_params.num_cons; 1105 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); 1106 } 1107 1108 for (i = 0; i < cdev->num_hwfns; i++) { 1109 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1110 1111 p_hwfn->pf_params = *params; 1112 } 1113 } 1114 1115 #define QED_PERIODIC_DB_REC_COUNT 10 1116 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 1117 #define QED_PERIODIC_DB_REC_INTERVAL \ 1118 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) 1119 1120 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, 1121 enum qed_slowpath_wq_flag wq_flag, 1122 unsigned long delay) 1123 { 1124 if (!hwfn->slowpath_wq_active) 1125 return -EINVAL; 1126 1127 /* Memory barrier for setting atomic bit */ 1128 smp_mb__before_atomic(); 1129 set_bit(wq_flag, &hwfn->slowpath_task_flags); 1130 /* Memory barrier after setting atomic bit */ 1131 smp_mb__after_atomic(); 1132 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); 1133 1134 return 0; 1135 } 1136 1137 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) 1138 { 1139 /* Reset periodic Doorbell Recovery counter */ 1140 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; 1141 1142 /* Don't schedule periodic Doorbell Recovery if already scheduled */ 1143 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1144 &p_hwfn->slowpath_task_flags)) 1145 return; 1146 1147 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, 1148 QED_PERIODIC_DB_REC_INTERVAL); 1149 } 1150 1151 static void qed_slowpath_wq_stop(struct qed_dev *cdev) 1152 { 1153 int i; 1154 1155 if (IS_VF(cdev)) 1156 return; 1157 1158 for_each_hwfn(cdev, i) { 1159 if (!cdev->hwfns[i].slowpath_wq) 1160 continue; 1161 1162 /* Stop queuing new delayed works */ 1163 cdev->hwfns[i].slowpath_wq_active = false; 1164 1165 cancel_delayed_work(&cdev->hwfns[i].slowpath_task); 1166 destroy_workqueue(cdev->hwfns[i].slowpath_wq); 1167 } 1168 } 1169 1170 static void qed_slowpath_task(struct work_struct *work) 1171 { 1172 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1173 slowpath_task.work); 1174 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 1175 1176 if (!ptt) { 1177 if (hwfn->slowpath_wq_active) 1178 queue_delayed_work(hwfn->slowpath_wq, 1179 &hwfn->slowpath_task, 0); 1180 1181 return; 1182 } 1183 1184 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, 1185 &hwfn->slowpath_task_flags)) 1186 qed_mfw_process_tlv_req(hwfn, ptt); 1187 1188 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1189 &hwfn->slowpath_task_flags)) { 1190 /* skip qed_db_rec_handler during recovery/unload */ 1191 if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active) 1192 goto out; 1193 1194 qed_db_rec_handler(hwfn, ptt); 1195 if (hwfn->periodic_db_rec_count--) 1196 qed_slowpath_delayed_work(hwfn, 1197 QED_SLOWPATH_PERIODIC_DB_REC, 1198 QED_PERIODIC_DB_REC_INTERVAL); 1199 } 1200 1201 out: 1202 qed_ptt_release(hwfn, ptt); 1203 } 1204 1205 static int qed_slowpath_wq_start(struct qed_dev *cdev) 1206 { 1207 struct qed_hwfn *hwfn; 1208 int i; 1209 1210 if (IS_VF(cdev)) 1211 return 0; 1212 1213 for_each_hwfn(cdev, i) { 1214 hwfn = &cdev->hwfns[i]; 1215 1216 hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x", 1217 0, 0, cdev->pdev->bus->number, 1218 PCI_SLOT(cdev->pdev->devfn), 1219 hwfn->abs_pf_id); 1220 1221 if (!hwfn->slowpath_wq) { 1222 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); 1223 return -ENOMEM; 1224 } 1225 1226 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); 1227 hwfn->slowpath_wq_active = true; 1228 } 1229 1230 return 0; 1231 } 1232 1233 static int qed_slowpath_start(struct qed_dev *cdev, 1234 struct qed_slowpath_params *params) 1235 { 1236 struct qed_drv_load_params drv_load_params; 1237 struct qed_hw_init_params hw_init_params; 1238 struct qed_mcp_drv_version drv_version; 1239 struct qed_tunnel_info tunn_info; 1240 const u8 *data = NULL; 1241 struct qed_hwfn *hwfn; 1242 struct qed_ptt *p_ptt; 1243 int rc = -EINVAL; 1244 1245 if (qed_iov_wq_start(cdev)) 1246 goto err; 1247 1248 if (qed_slowpath_wq_start(cdev)) 1249 goto err; 1250 1251 if (IS_PF(cdev)) { 1252 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 1253 &cdev->pdev->dev); 1254 if (rc) { 1255 DP_NOTICE(cdev, 1256 "Failed to find fw file - /lib/firmware/%s\n", 1257 QED_FW_FILE_NAME); 1258 goto err; 1259 } 1260 1261 if (cdev->num_hwfns == 1) { 1262 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 1263 if (p_ptt) { 1264 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; 1265 } else { 1266 DP_NOTICE(cdev, 1267 "Failed to acquire PTT for aRFS\n"); 1268 rc = -EINVAL; 1269 goto err; 1270 } 1271 } 1272 } 1273 1274 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 1275 rc = qed_nic_setup(cdev); 1276 if (rc) 1277 goto err; 1278 1279 if (IS_PF(cdev)) 1280 rc = qed_slowpath_setup_int(cdev, params->int_mode); 1281 else 1282 rc = qed_slowpath_vf_setup_int(cdev); 1283 if (rc) 1284 goto err1; 1285 1286 if (IS_PF(cdev)) { 1287 /* Allocate stream for unzipping */ 1288 rc = qed_alloc_stream_mem(cdev); 1289 if (rc) 1290 goto err2; 1291 1292 /* First Dword used to differentiate between various sources */ 1293 data = cdev->firmware->data + sizeof(u32); 1294 1295 qed_dbg_pf_init(cdev); 1296 } 1297 1298 /* Start the slowpath */ 1299 memset(&hw_init_params, 0, sizeof(hw_init_params)); 1300 memset(&tunn_info, 0, sizeof(tunn_info)); 1301 tunn_info.vxlan.b_mode_enabled = true; 1302 tunn_info.l2_gre.b_mode_enabled = true; 1303 tunn_info.ip_gre.b_mode_enabled = true; 1304 tunn_info.l2_geneve.b_mode_enabled = true; 1305 tunn_info.ip_geneve.b_mode_enabled = true; 1306 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1307 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1308 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1309 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1310 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1311 hw_init_params.p_tunn = &tunn_info; 1312 hw_init_params.b_hw_start = true; 1313 hw_init_params.int_mode = cdev->int_params.out.int_mode; 1314 hw_init_params.allow_npar_tx_switch = true; 1315 hw_init_params.bin_fw_data = data; 1316 1317 memset(&drv_load_params, 0, sizeof(drv_load_params)); 1318 drv_load_params.is_crash_kernel = is_kdump_kernel(); 1319 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 1320 drv_load_params.avoid_eng_reset = false; 1321 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 1322 hw_init_params.p_drv_load_params = &drv_load_params; 1323 1324 rc = qed_hw_init(cdev, &hw_init_params); 1325 if (rc) 1326 goto err2; 1327 1328 DP_INFO(cdev, 1329 "HW initialization and function start completed successfully\n"); 1330 1331 if (IS_PF(cdev)) { 1332 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1333 BIT(QED_MODE_L2GENEVE_TUNN) | 1334 BIT(QED_MODE_IPGENEVE_TUNN) | 1335 BIT(QED_MODE_L2GRE_TUNN) | 1336 BIT(QED_MODE_IPGRE_TUNN)); 1337 } 1338 1339 /* Allocate LL2 interface if needed */ 1340 if (QED_LEADING_HWFN(cdev)->using_ll2) { 1341 rc = qed_ll2_alloc_if(cdev); 1342 if (rc) 1343 goto err3; 1344 } 1345 if (IS_PF(cdev)) { 1346 hwfn = QED_LEADING_HWFN(cdev); 1347 drv_version.version = (params->drv_major << 24) | 1348 (params->drv_minor << 16) | 1349 (params->drv_rev << 8) | 1350 (params->drv_eng); 1351 strscpy(drv_version.name, params->name, 1352 sizeof(drv_version.name)); 1353 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1354 &drv_version); 1355 if (rc) { 1356 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1357 goto err4; 1358 } 1359 } 1360 1361 qed_reset_vport_stats(cdev); 1362 1363 return 0; 1364 1365 err4: 1366 qed_ll2_dealloc_if(cdev); 1367 err3: 1368 qed_hw_stop(cdev); 1369 err2: 1370 qed_hw_timers_stop_all(cdev); 1371 if (IS_PF(cdev)) 1372 qed_slowpath_irq_free(cdev); 1373 qed_free_stream_mem(cdev); 1374 qed_disable_msix(cdev); 1375 err1: 1376 qed_resc_free(cdev); 1377 err: 1378 if (IS_PF(cdev)) 1379 release_firmware(cdev->firmware); 1380 1381 if (IS_PF(cdev) && (cdev->num_hwfns == 1) && 1382 QED_LEADING_HWFN(cdev)->p_arfs_ptt) 1383 qed_ptt_release(QED_LEADING_HWFN(cdev), 1384 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1385 1386 qed_iov_wq_stop(cdev, false); 1387 1388 qed_slowpath_wq_stop(cdev); 1389 1390 return rc; 1391 } 1392 1393 static int qed_slowpath_stop(struct qed_dev *cdev) 1394 { 1395 if (!cdev) 1396 return -ENODEV; 1397 1398 qed_slowpath_wq_stop(cdev); 1399 1400 qed_ll2_dealloc_if(cdev); 1401 1402 if (IS_PF(cdev)) { 1403 if (cdev->num_hwfns == 1) 1404 qed_ptt_release(QED_LEADING_HWFN(cdev), 1405 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1406 qed_free_stream_mem(cdev); 1407 if (IS_QED_ETH_IF(cdev)) 1408 qed_sriov_disable(cdev, true); 1409 } 1410 1411 qed_nic_stop(cdev); 1412 1413 if (IS_PF(cdev)) 1414 qed_slowpath_irq_free(cdev); 1415 1416 qed_disable_msix(cdev); 1417 1418 qed_resc_free(cdev); 1419 1420 qed_iov_wq_stop(cdev, true); 1421 1422 if (IS_PF(cdev)) 1423 release_firmware(cdev->firmware); 1424 1425 return 0; 1426 } 1427 1428 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) 1429 { 1430 int i; 1431 1432 memcpy(cdev->name, name, NAME_SIZE); 1433 for_each_hwfn(cdev, i) 1434 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1435 } 1436 1437 static u32 qed_sb_init(struct qed_dev *cdev, 1438 struct qed_sb_info *sb_info, 1439 void *sb_virt_addr, 1440 dma_addr_t sb_phy_addr, u16 sb_id, 1441 enum qed_sb_type type) 1442 { 1443 struct qed_hwfn *p_hwfn; 1444 struct qed_ptt *p_ptt; 1445 u16 rel_sb_id; 1446 u32 rc; 1447 1448 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1449 if (type == QED_SB_TYPE_L2_QUEUE) { 1450 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1451 rel_sb_id = sb_id / cdev->num_hwfns; 1452 } else { 1453 p_hwfn = QED_AFFIN_HWFN(cdev); 1454 rel_sb_id = sb_id; 1455 } 1456 1457 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1458 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1459 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1460 1461 if (IS_PF(p_hwfn->cdev)) { 1462 p_ptt = qed_ptt_acquire(p_hwfn); 1463 if (!p_ptt) 1464 return -EBUSY; 1465 1466 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1467 sb_phy_addr, rel_sb_id); 1468 qed_ptt_release(p_hwfn, p_ptt); 1469 } else { 1470 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1471 sb_phy_addr, rel_sb_id); 1472 } 1473 1474 return rc; 1475 } 1476 1477 static u32 qed_sb_release(struct qed_dev *cdev, 1478 struct qed_sb_info *sb_info, 1479 u16 sb_id, 1480 enum qed_sb_type type) 1481 { 1482 struct qed_hwfn *p_hwfn; 1483 u16 rel_sb_id; 1484 u32 rc; 1485 1486 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1487 if (type == QED_SB_TYPE_L2_QUEUE) { 1488 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1489 rel_sb_id = sb_id / cdev->num_hwfns; 1490 } else { 1491 p_hwfn = QED_AFFIN_HWFN(cdev); 1492 rel_sb_id = sb_id; 1493 } 1494 1495 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1496 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1497 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1498 1499 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1500 1501 return rc; 1502 } 1503 1504 static bool qed_can_link_change(struct qed_dev *cdev) 1505 { 1506 return true; 1507 } 1508 1509 static void qed_set_ext_speed_params(struct qed_mcp_link_params *link_params, 1510 const struct qed_link_params *params) 1511 { 1512 struct qed_mcp_link_speed_params *ext_speed = &link_params->ext_speed; 1513 const struct qed_mfw_speed_map *map; 1514 u32 i; 1515 1516 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1517 ext_speed->autoneg = !!params->autoneg; 1518 1519 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1520 ext_speed->advertised_speeds = 0; 1521 1522 for (i = 0; i < ARRAY_SIZE(qed_mfw_ext_maps); i++) { 1523 map = qed_mfw_ext_maps + i; 1524 1525 if (linkmode_intersects(params->adv_speeds, map->caps)) 1526 ext_speed->advertised_speeds |= map->mfw_val; 1527 } 1528 } 1529 1530 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) { 1531 switch (params->forced_speed) { 1532 case SPEED_1000: 1533 ext_speed->forced_speed = QED_EXT_SPEED_1G; 1534 break; 1535 case SPEED_10000: 1536 ext_speed->forced_speed = QED_EXT_SPEED_10G; 1537 break; 1538 case SPEED_20000: 1539 ext_speed->forced_speed = QED_EXT_SPEED_20G; 1540 break; 1541 case SPEED_25000: 1542 ext_speed->forced_speed = QED_EXT_SPEED_25G; 1543 break; 1544 case SPEED_40000: 1545 ext_speed->forced_speed = QED_EXT_SPEED_40G; 1546 break; 1547 case SPEED_50000: 1548 ext_speed->forced_speed = QED_EXT_SPEED_50G_R | 1549 QED_EXT_SPEED_50G_R2; 1550 break; 1551 case SPEED_100000: 1552 ext_speed->forced_speed = QED_EXT_SPEED_100G_R2 | 1553 QED_EXT_SPEED_100G_R4 | 1554 QED_EXT_SPEED_100G_P4; 1555 break; 1556 default: 1557 break; 1558 } 1559 } 1560 1561 if (!(params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG)) 1562 return; 1563 1564 switch (params->forced_speed) { 1565 case SPEED_25000: 1566 switch (params->fec) { 1567 case FEC_FORCE_MODE_NONE: 1568 link_params->ext_fec_mode = ETH_EXT_FEC_25G_NONE; 1569 break; 1570 case FEC_FORCE_MODE_FIRECODE: 1571 link_params->ext_fec_mode = ETH_EXT_FEC_25G_BASE_R; 1572 break; 1573 case FEC_FORCE_MODE_RS: 1574 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528; 1575 break; 1576 case FEC_FORCE_MODE_AUTO: 1577 link_params->ext_fec_mode = ETH_EXT_FEC_25G_RS528 | 1578 ETH_EXT_FEC_25G_BASE_R | 1579 ETH_EXT_FEC_25G_NONE; 1580 break; 1581 default: 1582 break; 1583 } 1584 1585 break; 1586 case SPEED_40000: 1587 switch (params->fec) { 1588 case FEC_FORCE_MODE_NONE: 1589 link_params->ext_fec_mode = ETH_EXT_FEC_40G_NONE; 1590 break; 1591 case FEC_FORCE_MODE_FIRECODE: 1592 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R; 1593 break; 1594 case FEC_FORCE_MODE_AUTO: 1595 link_params->ext_fec_mode = ETH_EXT_FEC_40G_BASE_R | 1596 ETH_EXT_FEC_40G_NONE; 1597 break; 1598 default: 1599 break; 1600 } 1601 1602 break; 1603 case SPEED_50000: 1604 switch (params->fec) { 1605 case FEC_FORCE_MODE_NONE: 1606 link_params->ext_fec_mode = ETH_EXT_FEC_50G_NONE; 1607 break; 1608 case FEC_FORCE_MODE_FIRECODE: 1609 link_params->ext_fec_mode = ETH_EXT_FEC_50G_BASE_R; 1610 break; 1611 case FEC_FORCE_MODE_RS: 1612 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528; 1613 break; 1614 case FEC_FORCE_MODE_AUTO: 1615 link_params->ext_fec_mode = ETH_EXT_FEC_50G_RS528 | 1616 ETH_EXT_FEC_50G_BASE_R | 1617 ETH_EXT_FEC_50G_NONE; 1618 break; 1619 default: 1620 break; 1621 } 1622 1623 break; 1624 case SPEED_100000: 1625 switch (params->fec) { 1626 case FEC_FORCE_MODE_NONE: 1627 link_params->ext_fec_mode = ETH_EXT_FEC_100G_NONE; 1628 break; 1629 case FEC_FORCE_MODE_FIRECODE: 1630 link_params->ext_fec_mode = ETH_EXT_FEC_100G_BASE_R; 1631 break; 1632 case FEC_FORCE_MODE_RS: 1633 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528; 1634 break; 1635 case FEC_FORCE_MODE_AUTO: 1636 link_params->ext_fec_mode = ETH_EXT_FEC_100G_RS528 | 1637 ETH_EXT_FEC_100G_BASE_R | 1638 ETH_EXT_FEC_100G_NONE; 1639 break; 1640 default: 1641 break; 1642 } 1643 1644 break; 1645 default: 1646 break; 1647 } 1648 } 1649 1650 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1651 { 1652 struct qed_mcp_link_params *link_params; 1653 struct qed_mcp_link_speed_params *speed; 1654 const struct qed_mfw_speed_map *map; 1655 struct qed_hwfn *hwfn; 1656 struct qed_ptt *ptt; 1657 int rc; 1658 u32 i; 1659 1660 if (!cdev) 1661 return -ENODEV; 1662 1663 /* The link should be set only once per PF */ 1664 hwfn = &cdev->hwfns[0]; 1665 1666 /* When VF wants to set link, force it to read the bulletin instead. 1667 * This mimics the PF behavior, where a noitification [both immediate 1668 * and possible later] would be generated when changing properties. 1669 */ 1670 if (IS_VF(cdev)) { 1671 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1672 return 0; 1673 } 1674 1675 ptt = qed_ptt_acquire(hwfn); 1676 if (!ptt) 1677 return -EBUSY; 1678 1679 link_params = qed_mcp_get_link_params(hwfn); 1680 if (!link_params) 1681 return -ENODATA; 1682 1683 speed = &link_params->speed; 1684 1685 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1686 speed->autoneg = !!params->autoneg; 1687 1688 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1689 speed->advertised_speeds = 0; 1690 1691 for (i = 0; i < ARRAY_SIZE(qed_mfw_legacy_maps); i++) { 1692 map = qed_mfw_legacy_maps + i; 1693 1694 if (linkmode_intersects(params->adv_speeds, map->caps)) 1695 speed->advertised_speeds |= map->mfw_val; 1696 } 1697 } 1698 1699 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1700 speed->forced_speed = params->forced_speed; 1701 1702 if (qed_mcp_is_ext_speed_supported(hwfn)) 1703 qed_set_ext_speed_params(link_params, params); 1704 1705 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1706 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1707 link_params->pause.autoneg = true; 1708 else 1709 link_params->pause.autoneg = false; 1710 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1711 link_params->pause.forced_rx = true; 1712 else 1713 link_params->pause.forced_rx = false; 1714 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1715 link_params->pause.forced_tx = true; 1716 else 1717 link_params->pause.forced_tx = false; 1718 } 1719 1720 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1721 switch (params->loopback_mode) { 1722 case QED_LINK_LOOPBACK_INT_PHY: 1723 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1724 break; 1725 case QED_LINK_LOOPBACK_EXT_PHY: 1726 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1727 break; 1728 case QED_LINK_LOOPBACK_EXT: 1729 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1730 break; 1731 case QED_LINK_LOOPBACK_MAC: 1732 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1733 break; 1734 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123: 1735 link_params->loopback_mode = 1736 ETH_LOOPBACK_CNIG_AH_ONLY_0123; 1737 break; 1738 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301: 1739 link_params->loopback_mode = 1740 ETH_LOOPBACK_CNIG_AH_ONLY_2301; 1741 break; 1742 case QED_LINK_LOOPBACK_PCS_AH_ONLY: 1743 link_params->loopback_mode = ETH_LOOPBACK_PCS_AH_ONLY; 1744 break; 1745 case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY: 1746 link_params->loopback_mode = 1747 ETH_LOOPBACK_REVERSE_MAC_AH_ONLY; 1748 break; 1749 case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY: 1750 link_params->loopback_mode = 1751 ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY; 1752 break; 1753 default: 1754 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1755 break; 1756 } 1757 } 1758 1759 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) 1760 memcpy(&link_params->eee, ¶ms->eee, 1761 sizeof(link_params->eee)); 1762 1763 if (params->override_flags & QED_LINK_OVERRIDE_FEC_CONFIG) 1764 link_params->fec = params->fec; 1765 1766 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1767 1768 qed_ptt_release(hwfn, ptt); 1769 1770 return rc; 1771 } 1772 1773 static int qed_get_port_type(u32 media_type) 1774 { 1775 int port_type; 1776 1777 switch (media_type) { 1778 case MEDIA_SFPP_10G_FIBER: 1779 case MEDIA_SFP_1G_FIBER: 1780 case MEDIA_XFP_FIBER: 1781 case MEDIA_MODULE_FIBER: 1782 port_type = PORT_FIBRE; 1783 break; 1784 case MEDIA_DA_TWINAX: 1785 port_type = PORT_DA; 1786 break; 1787 case MEDIA_BASE_T: 1788 port_type = PORT_TP; 1789 break; 1790 case MEDIA_KR: 1791 case MEDIA_NOT_PRESENT: 1792 port_type = PORT_NONE; 1793 break; 1794 case MEDIA_UNSPECIFIED: 1795 default: 1796 port_type = PORT_OTHER; 1797 break; 1798 } 1799 return port_type; 1800 } 1801 1802 static int qed_get_link_data(struct qed_hwfn *hwfn, 1803 struct qed_mcp_link_params *params, 1804 struct qed_mcp_link_state *link, 1805 struct qed_mcp_link_capabilities *link_caps) 1806 { 1807 void *p; 1808 1809 if (!IS_PF(hwfn->cdev)) { 1810 qed_vf_get_link_params(hwfn, params); 1811 qed_vf_get_link_state(hwfn, link); 1812 qed_vf_get_link_caps(hwfn, link_caps); 1813 1814 return 0; 1815 } 1816 1817 p = qed_mcp_get_link_params(hwfn); 1818 if (!p) 1819 return -ENXIO; 1820 memcpy(params, p, sizeof(*params)); 1821 1822 p = qed_mcp_get_link_state(hwfn); 1823 if (!p) 1824 return -ENXIO; 1825 memcpy(link, p, sizeof(*link)); 1826 1827 p = qed_mcp_get_link_capabilities(hwfn); 1828 if (!p) 1829 return -ENXIO; 1830 memcpy(link_caps, p, sizeof(*link_caps)); 1831 1832 return 0; 1833 } 1834 1835 static void qed_fill_link_capability(struct qed_hwfn *hwfn, 1836 struct qed_ptt *ptt, u32 capability, 1837 unsigned long *if_caps) 1838 { 1839 u32 media_type, tcvr_state, tcvr_type; 1840 u32 speed_mask, board_cfg; 1841 1842 if (qed_mcp_get_media_type(hwfn, ptt, &media_type)) 1843 media_type = MEDIA_UNSPECIFIED; 1844 1845 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type)) 1846 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; 1847 1848 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask)) 1849 speed_mask = 0xFFFFFFFF; 1850 1851 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg)) 1852 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; 1853 1854 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 1855 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n", 1856 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); 1857 1858 switch (media_type) { 1859 case MEDIA_DA_TWINAX: 1860 phylink_set(if_caps, FIBRE); 1861 1862 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1863 phylink_set(if_caps, 20000baseKR2_Full); 1864 1865 /* For DAC media multiple speed capabilities are supported */ 1866 capability |= speed_mask; 1867 1868 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1869 phylink_set(if_caps, 1000baseKX_Full); 1870 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1871 phylink_set(if_caps, 10000baseCR_Full); 1872 1873 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1874 switch (tcvr_type) { 1875 case ETH_TRANSCEIVER_TYPE_40G_CR4: 1876 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR: 1877 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1878 phylink_set(if_caps, 40000baseCR4_Full); 1879 break; 1880 default: 1881 break; 1882 } 1883 1884 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1885 phylink_set(if_caps, 25000baseCR_Full); 1886 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1887 phylink_set(if_caps, 50000baseCR2_Full); 1888 1889 if (capability & 1890 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1891 switch (tcvr_type) { 1892 case ETH_TRANSCEIVER_TYPE_100G_CR4: 1893 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR: 1894 phylink_set(if_caps, 100000baseCR4_Full); 1895 break; 1896 default: 1897 break; 1898 } 1899 1900 break; 1901 case MEDIA_BASE_T: 1902 phylink_set(if_caps, TP); 1903 1904 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { 1905 if (capability & 1906 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1907 phylink_set(if_caps, 1000baseT_Full); 1908 if (capability & 1909 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1910 phylink_set(if_caps, 10000baseT_Full); 1911 } 1912 1913 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { 1914 phylink_set(if_caps, FIBRE); 1915 1916 switch (tcvr_type) { 1917 case ETH_TRANSCEIVER_TYPE_1000BASET: 1918 phylink_set(if_caps, 1000baseT_Full); 1919 break; 1920 case ETH_TRANSCEIVER_TYPE_10G_BASET: 1921 phylink_set(if_caps, 10000baseT_Full); 1922 break; 1923 default: 1924 break; 1925 } 1926 } 1927 1928 break; 1929 case MEDIA_SFP_1G_FIBER: 1930 case MEDIA_SFPP_10G_FIBER: 1931 case MEDIA_XFP_FIBER: 1932 case MEDIA_MODULE_FIBER: 1933 phylink_set(if_caps, FIBRE); 1934 capability |= speed_mask; 1935 1936 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1937 switch (tcvr_type) { 1938 case ETH_TRANSCEIVER_TYPE_1G_LX: 1939 case ETH_TRANSCEIVER_TYPE_1G_SX: 1940 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1941 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1942 phylink_set(if_caps, 1000baseKX_Full); 1943 break; 1944 default: 1945 break; 1946 } 1947 1948 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1949 switch (tcvr_type) { 1950 case ETH_TRANSCEIVER_TYPE_10G_SR: 1951 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 1952 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 1953 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR: 1954 phylink_set(if_caps, 10000baseSR_Full); 1955 break; 1956 case ETH_TRANSCEIVER_TYPE_10G_LR: 1957 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 1958 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR: 1959 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR: 1960 phylink_set(if_caps, 10000baseLR_Full); 1961 break; 1962 case ETH_TRANSCEIVER_TYPE_10G_LRM: 1963 phylink_set(if_caps, 10000baseLRM_Full); 1964 break; 1965 case ETH_TRANSCEIVER_TYPE_10G_ER: 1966 phylink_set(if_caps, 10000baseR_FEC); 1967 break; 1968 default: 1969 break; 1970 } 1971 1972 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1973 phylink_set(if_caps, 20000baseKR2_Full); 1974 1975 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1976 switch (tcvr_type) { 1977 case ETH_TRANSCEIVER_TYPE_25G_SR: 1978 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR: 1979 phylink_set(if_caps, 25000baseSR_Full); 1980 break; 1981 default: 1982 break; 1983 } 1984 1985 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1986 switch (tcvr_type) { 1987 case ETH_TRANSCEIVER_TYPE_40G_LR4: 1988 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR: 1989 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 1990 phylink_set(if_caps, 40000baseLR4_Full); 1991 break; 1992 case ETH_TRANSCEIVER_TYPE_40G_SR4: 1993 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 1994 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR: 1995 phylink_set(if_caps, 40000baseSR4_Full); 1996 break; 1997 default: 1998 break; 1999 } 2000 2001 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 2002 phylink_set(if_caps, 50000baseKR2_Full); 2003 2004 if (capability & 2005 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 2006 switch (tcvr_type) { 2007 case ETH_TRANSCEIVER_TYPE_100G_SR4: 2008 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR: 2009 phylink_set(if_caps, 100000baseSR4_Full); 2010 break; 2011 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR: 2012 phylink_set(if_caps, 100000baseLR4_ER4_Full); 2013 break; 2014 default: 2015 break; 2016 } 2017 2018 break; 2019 case MEDIA_KR: 2020 phylink_set(if_caps, Backplane); 2021 2022 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 2023 phylink_set(if_caps, 20000baseKR2_Full); 2024 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 2025 phylink_set(if_caps, 1000baseKX_Full); 2026 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 2027 phylink_set(if_caps, 10000baseKR_Full); 2028 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 2029 phylink_set(if_caps, 25000baseKR_Full); 2030 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 2031 phylink_set(if_caps, 40000baseKR4_Full); 2032 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 2033 phylink_set(if_caps, 50000baseKR2_Full); 2034 if (capability & 2035 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 2036 phylink_set(if_caps, 100000baseKR4_Full); 2037 2038 break; 2039 case MEDIA_UNSPECIFIED: 2040 case MEDIA_NOT_PRESENT: 2041 default: 2042 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, 2043 "Unknown media and transceiver type;\n"); 2044 break; 2045 } 2046 } 2047 2048 static void qed_lp_caps_to_speed_mask(u32 caps, u32 *speed_mask) 2049 { 2050 *speed_mask = 0; 2051 2052 if (caps & 2053 (QED_LINK_PARTNER_SPEED_1G_FD | QED_LINK_PARTNER_SPEED_1G_HD)) 2054 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 2055 if (caps & QED_LINK_PARTNER_SPEED_10G) 2056 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 2057 if (caps & QED_LINK_PARTNER_SPEED_20G) 2058 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; 2059 if (caps & QED_LINK_PARTNER_SPEED_25G) 2060 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 2061 if (caps & QED_LINK_PARTNER_SPEED_40G) 2062 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 2063 if (caps & QED_LINK_PARTNER_SPEED_50G) 2064 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 2065 if (caps & QED_LINK_PARTNER_SPEED_100G) 2066 *speed_mask |= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 2067 } 2068 2069 static void qed_fill_link(struct qed_hwfn *hwfn, 2070 struct qed_ptt *ptt, 2071 struct qed_link_output *if_link) 2072 { 2073 struct qed_mcp_link_capabilities link_caps; 2074 struct qed_mcp_link_params params; 2075 struct qed_mcp_link_state link; 2076 u32 media_type, speed_mask; 2077 2078 memset(if_link, 0, sizeof(*if_link)); 2079 2080 /* Prepare source inputs */ 2081 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 2082 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 2083 return; 2084 } 2085 2086 /* Set the link parameters to pass to protocol driver */ 2087 if (link.link_up) 2088 if_link->link_up = true; 2089 2090 if (IS_PF(hwfn->cdev) && qed_mcp_is_ext_speed_supported(hwfn)) { 2091 if (link_caps.default_ext_autoneg) 2092 phylink_set(if_link->supported_caps, Autoneg); 2093 2094 linkmode_copy(if_link->advertised_caps, if_link->supported_caps); 2095 2096 if (params.ext_speed.autoneg) 2097 phylink_set(if_link->advertised_caps, Autoneg); 2098 else 2099 phylink_clear(if_link->advertised_caps, Autoneg); 2100 2101 qed_fill_link_capability(hwfn, ptt, 2102 params.ext_speed.advertised_speeds, 2103 if_link->advertised_caps); 2104 } else { 2105 if (link_caps.default_speed_autoneg) 2106 phylink_set(if_link->supported_caps, Autoneg); 2107 2108 linkmode_copy(if_link->advertised_caps, if_link->supported_caps); 2109 2110 if (params.speed.autoneg) 2111 phylink_set(if_link->advertised_caps, Autoneg); 2112 else 2113 phylink_clear(if_link->advertised_caps, Autoneg); 2114 } 2115 2116 if (params.pause.autoneg || 2117 (params.pause.forced_rx && params.pause.forced_tx)) 2118 phylink_set(if_link->supported_caps, Asym_Pause); 2119 if (params.pause.autoneg || params.pause.forced_rx || 2120 params.pause.forced_tx) 2121 phylink_set(if_link->supported_caps, Pause); 2122 2123 if_link->sup_fec = link_caps.fec_default; 2124 if_link->active_fec = params.fec; 2125 2126 /* Fill link advertised capability */ 2127 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, 2128 if_link->advertised_caps); 2129 2130 /* Fill link supported capability */ 2131 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, 2132 if_link->supported_caps); 2133 2134 /* Fill partner advertised capability */ 2135 qed_lp_caps_to_speed_mask(link.partner_adv_speed, &speed_mask); 2136 qed_fill_link_capability(hwfn, ptt, speed_mask, if_link->lp_caps); 2137 2138 if (link.link_up) 2139 if_link->speed = link.speed; 2140 2141 /* TODO - fill duplex properly */ 2142 if_link->duplex = DUPLEX_FULL; 2143 qed_mcp_get_media_type(hwfn, ptt, &media_type); 2144 if_link->port = qed_get_port_type(media_type); 2145 2146 if_link->autoneg = params.speed.autoneg; 2147 2148 if (params.pause.autoneg) 2149 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 2150 if (params.pause.forced_rx) 2151 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 2152 if (params.pause.forced_tx) 2153 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 2154 2155 if (link.an_complete) 2156 phylink_set(if_link->lp_caps, Autoneg); 2157 if (link.partner_adv_pause) 2158 phylink_set(if_link->lp_caps, Pause); 2159 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 2160 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 2161 phylink_set(if_link->lp_caps, Asym_Pause); 2162 2163 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { 2164 if_link->eee_supported = false; 2165 } else { 2166 if_link->eee_supported = true; 2167 if_link->eee_active = link.eee_active; 2168 if_link->sup_caps = link_caps.eee_speed_caps; 2169 /* MFW clears adv_caps on eee disable; use configured value */ 2170 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : 2171 params.eee.adv_caps; 2172 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; 2173 if_link->eee.enable = params.eee.enable; 2174 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; 2175 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; 2176 } 2177 } 2178 2179 static void qed_get_current_link(struct qed_dev *cdev, 2180 struct qed_link_output *if_link) 2181 { 2182 struct qed_hwfn *hwfn; 2183 struct qed_ptt *ptt; 2184 int i; 2185 2186 hwfn = &cdev->hwfns[0]; 2187 if (IS_PF(cdev)) { 2188 ptt = qed_ptt_acquire(hwfn); 2189 if (ptt) { 2190 qed_fill_link(hwfn, ptt, if_link); 2191 qed_ptt_release(hwfn, ptt); 2192 } else { 2193 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); 2194 } 2195 } else { 2196 qed_fill_link(hwfn, NULL, if_link); 2197 } 2198 2199 for_each_hwfn(cdev, i) 2200 qed_inform_vf_link_state(&cdev->hwfns[i]); 2201 } 2202 2203 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2204 { 2205 void *cookie = hwfn->cdev->ops_cookie; 2206 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2207 struct qed_link_output if_link; 2208 2209 qed_fill_link(hwfn, ptt, &if_link); 2210 qed_inform_vf_link_state(hwfn); 2211 2212 if (IS_LEAD_HWFN(hwfn) && cookie) 2213 op->link_update(cookie, &if_link); 2214 } 2215 2216 void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 2217 { 2218 void *cookie = hwfn->cdev->ops_cookie; 2219 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 2220 2221 if (IS_LEAD_HWFN(hwfn) && cookie && op && op->bw_update) 2222 op->bw_update(cookie); 2223 } 2224 2225 static int qed_drain(struct qed_dev *cdev) 2226 { 2227 struct qed_hwfn *hwfn; 2228 struct qed_ptt *ptt; 2229 int i, rc; 2230 2231 if (IS_VF(cdev)) 2232 return 0; 2233 2234 for_each_hwfn(cdev, i) { 2235 hwfn = &cdev->hwfns[i]; 2236 ptt = qed_ptt_acquire(hwfn); 2237 if (!ptt) { 2238 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 2239 return -EBUSY; 2240 } 2241 rc = qed_mcp_drain(hwfn, ptt); 2242 qed_ptt_release(hwfn, ptt); 2243 if (rc) 2244 return rc; 2245 } 2246 2247 return 0; 2248 } 2249 2250 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, 2251 struct qed_nvm_image_att *nvm_image, 2252 u32 *crc) 2253 { 2254 u8 *buf = NULL; 2255 int rc; 2256 2257 /* Allocate a buffer for holding the nvram image */ 2258 buf = kzalloc(nvm_image->length, GFP_KERNEL); 2259 if (!buf) 2260 return -ENOMEM; 2261 2262 /* Read image into buffer */ 2263 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, 2264 buf, nvm_image->length); 2265 if (rc) { 2266 DP_ERR(cdev, "Failed reading image from nvm\n"); 2267 goto out; 2268 } 2269 2270 /* Convert the buffer into big-endian format (excluding the 2271 * closing 4 bytes of CRC). 2272 */ 2273 cpu_to_be32_array((__force __be32 *)buf, (const u32 *)buf, 2274 DIV_ROUND_UP(nvm_image->length - 4, 4)); 2275 2276 /* Calc CRC for the "actual" image buffer, i.e. not including 2277 * the last 4 CRC bytes. 2278 */ 2279 *crc = ~crc32(~0U, buf, nvm_image->length - 4); 2280 *crc = (__force u32)cpu_to_be32p(crc); 2281 2282 out: 2283 kfree(buf); 2284 2285 return rc; 2286 } 2287 2288 /* Binary file format - 2289 * /----------------------------------------------------------------------\ 2290 * 0B | 0x4 [command index] | 2291 * 4B | image_type | Options | Number of register settings | 2292 * 8B | Value | 2293 * 12B | Mask | 2294 * 16B | Offset | 2295 * \----------------------------------------------------------------------/ 2296 * There can be several Value-Mask-Offset sets as specified by 'Number of...'. 2297 * Options - 0'b - Calculate & Update CRC for image 2298 */ 2299 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, 2300 bool *check_resp) 2301 { 2302 struct qed_nvm_image_att nvm_image; 2303 struct qed_hwfn *p_hwfn; 2304 bool is_crc = false; 2305 u32 image_type; 2306 int rc = 0, i; 2307 u16 len; 2308 2309 *data += 4; 2310 image_type = **data; 2311 p_hwfn = QED_LEADING_HWFN(cdev); 2312 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 2313 if (image_type == p_hwfn->nvm_info.image_att[i].image_type) 2314 break; 2315 if (i == p_hwfn->nvm_info.num_images) { 2316 DP_ERR(cdev, "Failed to find nvram image of type %08x\n", 2317 image_type); 2318 return -ENOENT; 2319 } 2320 2321 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; 2322 nvm_image.length = p_hwfn->nvm_info.image_att[i].len; 2323 2324 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2325 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", 2326 **data, image_type, nvm_image.start_addr, 2327 nvm_image.start_addr + nvm_image.length - 1); 2328 (*data)++; 2329 is_crc = !!(**data & BIT(0)); 2330 (*data)++; 2331 len = *((u16 *)*data); 2332 *data += 2; 2333 if (is_crc) { 2334 u32 crc = 0; 2335 2336 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); 2337 if (rc) { 2338 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); 2339 goto exit; 2340 } 2341 2342 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2343 (nvm_image.start_addr + 2344 nvm_image.length - 4), (u8 *)&crc, 4); 2345 if (rc) 2346 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", 2347 nvm_image.start_addr + nvm_image.length - 4, rc); 2348 goto exit; 2349 } 2350 2351 /* Iterate over the values for setting */ 2352 while (len) { 2353 u32 offset, mask, value, cur_value; 2354 u8 buf[4]; 2355 2356 value = *((u32 *)*data); 2357 *data += 4; 2358 mask = *((u32 *)*data); 2359 *data += 4; 2360 offset = *((u32 *)*data); 2361 *data += 4; 2362 2363 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, 2364 4); 2365 if (rc) { 2366 DP_ERR(cdev, "Failed reading from %08x\n", 2367 nvm_image.start_addr + offset); 2368 goto exit; 2369 } 2370 2371 cur_value = le32_to_cpu(*((__le32 *)buf)); 2372 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2373 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", 2374 nvm_image.start_addr + offset, cur_value, 2375 (cur_value & ~mask) | (value & mask), value, mask); 2376 value = (value & mask) | (cur_value & ~mask); 2377 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2378 nvm_image.start_addr + offset, 2379 (u8 *)&value, 4); 2380 if (rc) { 2381 DP_ERR(cdev, "Failed writing to %08x\n", 2382 nvm_image.start_addr + offset); 2383 goto exit; 2384 } 2385 2386 len--; 2387 } 2388 exit: 2389 return rc; 2390 } 2391 2392 /* Binary file format - 2393 * /----------------------------------------------------------------------\ 2394 * 0B | 0x3 [command index] | 2395 * 4B | b'0: check_response? | b'1-31 reserved | 2396 * 8B | File-type | reserved | 2397 * 12B | Image length in bytes | 2398 * \----------------------------------------------------------------------/ 2399 * Start a new file of the provided type 2400 */ 2401 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, 2402 const u8 **data, bool *check_resp) 2403 { 2404 u32 file_type, file_size = 0; 2405 int rc; 2406 2407 *data += 4; 2408 *check_resp = !!(**data & BIT(0)); 2409 *data += 4; 2410 file_type = **data; 2411 2412 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2413 "About to start a new file of type %02x\n", file_type); 2414 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { 2415 *data += 4; 2416 file_size = *((u32 *)(*data)); 2417 } 2418 2419 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, 2420 (u8 *)(&file_size), 4); 2421 *data += 4; 2422 2423 return rc; 2424 } 2425 2426 /* Binary file format - 2427 * /----------------------------------------------------------------------\ 2428 * 0B | 0x2 [command index] | 2429 * 4B | Length in bytes | 2430 * 8B | b'0: check_response? | b'1-31 reserved | 2431 * 12B | Offset in bytes | 2432 * 16B | Data ... | 2433 * \----------------------------------------------------------------------/ 2434 * Write data as part of a file that was previously started. Data should be 2435 * of length equal to that provided in the message 2436 */ 2437 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, 2438 const u8 **data, bool *check_resp) 2439 { 2440 u32 offset, len; 2441 int rc; 2442 2443 *data += 4; 2444 len = *((u32 *)(*data)); 2445 *data += 4; 2446 *check_resp = !!(**data & BIT(0)); 2447 *data += 4; 2448 offset = *((u32 *)(*data)); 2449 *data += 4; 2450 2451 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2452 "About to write File-data: %08x bytes to offset %08x\n", 2453 len, offset); 2454 2455 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, 2456 (char *)(*data), len); 2457 *data += len; 2458 2459 return rc; 2460 } 2461 2462 /* Binary file format [General header] - 2463 * /----------------------------------------------------------------------\ 2464 * 0B | QED_NVM_SIGNATURE | 2465 * 4B | Length in bytes | 2466 * 8B | Highest command in this batchfile | Reserved | 2467 * \----------------------------------------------------------------------/ 2468 */ 2469 static int qed_nvm_flash_image_validate(struct qed_dev *cdev, 2470 const struct firmware *image, 2471 const u8 **data) 2472 { 2473 u32 signature, len; 2474 2475 /* Check minimum size */ 2476 if (image->size < 12) { 2477 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); 2478 return -EINVAL; 2479 } 2480 2481 /* Check signature */ 2482 signature = *((u32 *)(*data)); 2483 if (signature != QED_NVM_SIGNATURE) { 2484 DP_ERR(cdev, "Wrong signature '%08x'\n", signature); 2485 return -EINVAL; 2486 } 2487 2488 *data += 4; 2489 /* Validate internal size equals the image-size */ 2490 len = *((u32 *)(*data)); 2491 if (len != image->size) { 2492 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", 2493 len, (u32)image->size); 2494 return -EINVAL; 2495 } 2496 2497 *data += 4; 2498 /* Make sure driver familiar with all commands necessary for this */ 2499 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { 2500 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", 2501 *((u16 *)(*data))); 2502 return -EINVAL; 2503 } 2504 2505 *data += 4; 2506 2507 return 0; 2508 } 2509 2510 /* Binary file format - 2511 * /----------------------------------------------------------------------\ 2512 * 0B | 0x5 [command index] | 2513 * 4B | Number of config attributes | Reserved | 2514 * 4B | Config ID | Entity ID | Length | 2515 * 4B | Value | 2516 * | | 2517 * \----------------------------------------------------------------------/ 2518 * There can be several cfg_id-entity_id-Length-Value sets as specified by 2519 * 'Number of config attributes'. 2520 * 2521 * The API parses config attributes from the user provided buffer and flashes 2522 * them to the respective NVM path using Management FW inerface. 2523 */ 2524 static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data) 2525 { 2526 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2527 u8 entity_id, len, buf[32]; 2528 bool need_nvm_init = true; 2529 struct qed_ptt *ptt; 2530 u16 cfg_id, count; 2531 int rc = 0, i; 2532 u32 flags; 2533 2534 ptt = qed_ptt_acquire(hwfn); 2535 if (!ptt) 2536 return -EAGAIN; 2537 2538 /* NVM CFG ID attribute header */ 2539 *data += 4; 2540 count = *((u16 *)*data); 2541 *data += 4; 2542 2543 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2544 "Read config ids: num_attrs = %0d\n", count); 2545 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional 2546 * arithmetic operations in the implementation. 2547 */ 2548 for (i = 1; i <= count; i++) { 2549 cfg_id = *((u16 *)*data); 2550 *data += 2; 2551 entity_id = **data; 2552 (*data)++; 2553 len = **data; 2554 (*data)++; 2555 memcpy(buf, *data, len); 2556 *data += len; 2557 2558 flags = 0; 2559 if (need_nvm_init) { 2560 flags |= QED_NVM_CFG_OPTION_INIT; 2561 need_nvm_init = false; 2562 } 2563 2564 /* Commit to flash and free the resources */ 2565 if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) { 2566 flags |= QED_NVM_CFG_OPTION_COMMIT | 2567 QED_NVM_CFG_OPTION_FREE; 2568 need_nvm_init = true; 2569 } 2570 2571 if (entity_id) 2572 flags |= QED_NVM_CFG_OPTION_ENTITY_SEL; 2573 2574 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2575 "cfg_id = %d entity = %d len = %d\n", cfg_id, 2576 entity_id, len); 2577 rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags, 2578 buf, len); 2579 if (rc) { 2580 DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id); 2581 break; 2582 } 2583 } 2584 2585 qed_ptt_release(hwfn, ptt); 2586 2587 return rc; 2588 } 2589 2590 #define QED_MAX_NVM_BUF_LEN 32 2591 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd) 2592 { 2593 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2594 u8 buf[QED_MAX_NVM_BUF_LEN]; 2595 struct qed_ptt *ptt; 2596 u32 len; 2597 int rc; 2598 2599 ptt = qed_ptt_acquire(hwfn); 2600 if (!ptt) 2601 return QED_MAX_NVM_BUF_LEN; 2602 2603 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf, 2604 &len); 2605 if (rc || !len) { 2606 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2607 len = QED_MAX_NVM_BUF_LEN; 2608 } 2609 2610 qed_ptt_release(hwfn, ptt); 2611 2612 return len; 2613 } 2614 2615 static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data, 2616 u32 cmd, u32 entity_id) 2617 { 2618 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2619 struct qed_ptt *ptt; 2620 u32 flags, len; 2621 int rc = 0; 2622 2623 ptt = qed_ptt_acquire(hwfn); 2624 if (!ptt) 2625 return -EAGAIN; 2626 2627 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2628 "Read config cmd = %d entity id %d\n", cmd, entity_id); 2629 flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS; 2630 rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len); 2631 if (rc) 2632 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd); 2633 2634 qed_ptt_release(hwfn, ptt); 2635 2636 return rc; 2637 } 2638 2639 static int qed_nvm_flash(struct qed_dev *cdev, const char *name) 2640 { 2641 const struct firmware *image; 2642 const u8 *data, *data_end; 2643 u32 cmd_type; 2644 int rc; 2645 2646 rc = request_firmware(&image, name, &cdev->pdev->dev); 2647 if (rc) { 2648 DP_ERR(cdev, "Failed to find '%s'\n", name); 2649 return rc; 2650 } 2651 2652 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2653 "Flashing '%s' - firmware's data at %p, size is %08x\n", 2654 name, image->data, (u32)image->size); 2655 data = image->data; 2656 data_end = data + image->size; 2657 2658 rc = qed_nvm_flash_image_validate(cdev, image, &data); 2659 if (rc) 2660 goto exit; 2661 2662 while (data < data_end) { 2663 bool check_resp = false; 2664 2665 /* Parse the actual command */ 2666 cmd_type = *((u32 *)data); 2667 switch (cmd_type) { 2668 case QED_NVM_FLASH_CMD_FILE_DATA: 2669 rc = qed_nvm_flash_image_file_data(cdev, &data, 2670 &check_resp); 2671 break; 2672 case QED_NVM_FLASH_CMD_FILE_START: 2673 rc = qed_nvm_flash_image_file_start(cdev, &data, 2674 &check_resp); 2675 break; 2676 case QED_NVM_FLASH_CMD_NVM_CHANGE: 2677 rc = qed_nvm_flash_image_access(cdev, &data, 2678 &check_resp); 2679 break; 2680 case QED_NVM_FLASH_CMD_NVM_CFG_ID: 2681 rc = qed_nvm_flash_cfg_write(cdev, &data); 2682 break; 2683 default: 2684 DP_ERR(cdev, "Unknown command %08x\n", cmd_type); 2685 rc = -EINVAL; 2686 goto exit; 2687 } 2688 2689 if (rc) { 2690 DP_ERR(cdev, "Command %08x failed\n", cmd_type); 2691 goto exit; 2692 } 2693 2694 /* Check response if needed */ 2695 if (check_resp) { 2696 u32 mcp_response = 0; 2697 2698 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { 2699 DP_ERR(cdev, "Failed getting MCP response\n"); 2700 rc = -EINVAL; 2701 goto exit; 2702 } 2703 2704 switch (mcp_response & FW_MSG_CODE_MASK) { 2705 case FW_MSG_CODE_OK: 2706 case FW_MSG_CODE_NVM_OK: 2707 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: 2708 case FW_MSG_CODE_PHY_OK: 2709 break; 2710 default: 2711 DP_ERR(cdev, "MFW returns error: %08x\n", 2712 mcp_response); 2713 rc = -EINVAL; 2714 goto exit; 2715 } 2716 } 2717 } 2718 2719 exit: 2720 release_firmware(image); 2721 2722 return rc; 2723 } 2724 2725 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, 2726 u8 *buf, u16 len) 2727 { 2728 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2729 2730 return qed_mcp_get_nvm_image(hwfn, type, buf, len); 2731 } 2732 2733 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) 2734 { 2735 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2736 void *cookie = p_hwfn->cdev->ops_cookie; 2737 2738 if (ops && ops->schedule_recovery_handler) 2739 ops->schedule_recovery_handler(cookie); 2740 } 2741 2742 static const char * const qed_hw_err_type_descr[] = { 2743 [QED_HW_ERR_FAN_FAIL] = "Fan Failure", 2744 [QED_HW_ERR_MFW_RESP_FAIL] = "MFW Response Failure", 2745 [QED_HW_ERR_HW_ATTN] = "HW Attention", 2746 [QED_HW_ERR_DMAE_FAIL] = "DMAE Failure", 2747 [QED_HW_ERR_RAMROD_FAIL] = "Ramrod Failure", 2748 [QED_HW_ERR_FW_ASSERT] = "FW Assertion", 2749 [QED_HW_ERR_LAST] = "Unknown", 2750 }; 2751 2752 void qed_hw_error_occurred(struct qed_hwfn *p_hwfn, 2753 enum qed_hw_err_type err_type) 2754 { 2755 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2756 void *cookie = p_hwfn->cdev->ops_cookie; 2757 const char *err_str; 2758 2759 if (err_type > QED_HW_ERR_LAST) 2760 err_type = QED_HW_ERR_LAST; 2761 err_str = qed_hw_err_type_descr[err_type]; 2762 2763 DP_NOTICE(p_hwfn, "HW error occurred [%s]\n", err_str); 2764 2765 /* Call the HW error handler of the protocol driver. 2766 * If it is not available - perform a minimal handling of preventing 2767 * HW attentions from being reasserted. 2768 */ 2769 if (ops && ops->schedule_hw_err_handler) 2770 ops->schedule_hw_err_handler(cookie, err_type); 2771 else 2772 qed_int_attn_clr_enable(p_hwfn->cdev, true); 2773 } 2774 2775 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 2776 void *handle) 2777 { 2778 return qed_set_queue_coalesce(rx_coal, tx_coal, handle); 2779 } 2780 2781 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 2782 { 2783 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2784 struct qed_ptt *ptt; 2785 int status = 0; 2786 2787 ptt = qed_ptt_acquire(hwfn); 2788 if (!ptt) 2789 return -EAGAIN; 2790 2791 status = qed_mcp_set_led(hwfn, ptt, mode); 2792 2793 qed_ptt_release(hwfn, ptt); 2794 2795 return status; 2796 } 2797 2798 int qed_recovery_process(struct qed_dev *cdev) 2799 { 2800 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2801 struct qed_ptt *p_ptt; 2802 int rc = 0; 2803 2804 p_ptt = qed_ptt_acquire(p_hwfn); 2805 if (!p_ptt) 2806 return -EAGAIN; 2807 2808 rc = qed_start_recovery_process(p_hwfn, p_ptt); 2809 2810 qed_ptt_release(p_hwfn, p_ptt); 2811 2812 return rc; 2813 } 2814 2815 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 2816 { 2817 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2818 struct qed_ptt *ptt; 2819 int rc = 0; 2820 2821 if (IS_VF(cdev)) 2822 return 0; 2823 2824 ptt = qed_ptt_acquire(hwfn); 2825 if (!ptt) 2826 return -EAGAIN; 2827 2828 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 2829 : QED_OV_WOL_DISABLED); 2830 if (rc) 2831 goto out; 2832 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2833 2834 out: 2835 qed_ptt_release(hwfn, ptt); 2836 return rc; 2837 } 2838 2839 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 2840 { 2841 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2842 struct qed_ptt *ptt; 2843 int status = 0; 2844 2845 if (IS_VF(cdev)) 2846 return 0; 2847 2848 ptt = qed_ptt_acquire(hwfn); 2849 if (!ptt) 2850 return -EAGAIN; 2851 2852 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 2853 QED_OV_DRIVER_STATE_ACTIVE : 2854 QED_OV_DRIVER_STATE_DISABLED); 2855 2856 qed_ptt_release(hwfn, ptt); 2857 2858 return status; 2859 } 2860 2861 static int qed_update_mac(struct qed_dev *cdev, const u8 *mac) 2862 { 2863 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2864 struct qed_ptt *ptt; 2865 int status = 0; 2866 2867 if (IS_VF(cdev)) 2868 return 0; 2869 2870 ptt = qed_ptt_acquire(hwfn); 2871 if (!ptt) 2872 return -EAGAIN; 2873 2874 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 2875 if (status) 2876 goto out; 2877 2878 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2879 2880 out: 2881 qed_ptt_release(hwfn, ptt); 2882 return status; 2883 } 2884 2885 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 2886 { 2887 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2888 struct qed_ptt *ptt; 2889 int status = 0; 2890 2891 if (IS_VF(cdev)) 2892 return 0; 2893 2894 ptt = qed_ptt_acquire(hwfn); 2895 if (!ptt) 2896 return -EAGAIN; 2897 2898 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 2899 if (status) 2900 goto out; 2901 2902 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2903 2904 out: 2905 qed_ptt_release(hwfn, ptt); 2906 return status; 2907 } 2908 2909 static int 2910 qed_get_sb_info(struct qed_dev *cdev, struct qed_sb_info *sb, 2911 u16 qid, struct qed_sb_info_dbg *sb_dbg) 2912 { 2913 struct qed_hwfn *hwfn = &cdev->hwfns[qid % cdev->num_hwfns]; 2914 struct qed_ptt *ptt; 2915 int rc; 2916 2917 if (IS_VF(cdev)) 2918 return -EINVAL; 2919 2920 ptt = qed_ptt_acquire(hwfn); 2921 if (!ptt) { 2922 DP_NOTICE(hwfn, "Can't acquire PTT\n"); 2923 return -EAGAIN; 2924 } 2925 2926 memset(sb_dbg, 0, sizeof(*sb_dbg)); 2927 rc = qed_int_get_sb_dbg(hwfn, ptt, sb, sb_dbg); 2928 2929 qed_ptt_release(hwfn, ptt); 2930 return rc; 2931 } 2932 2933 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, 2934 u8 dev_addr, u32 offset, u32 len) 2935 { 2936 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2937 struct qed_ptt *ptt; 2938 int rc = 0; 2939 2940 if (IS_VF(cdev)) 2941 return 0; 2942 2943 ptt = qed_ptt_acquire(hwfn); 2944 if (!ptt) 2945 return -EAGAIN; 2946 2947 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, 2948 offset, len, buf); 2949 2950 qed_ptt_release(hwfn, ptt); 2951 2952 return rc; 2953 } 2954 2955 static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val) 2956 { 2957 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2958 struct qed_ptt *ptt; 2959 int rc = 0; 2960 2961 if (IS_VF(cdev)) 2962 return 0; 2963 2964 ptt = qed_ptt_acquire(hwfn); 2965 if (!ptt) 2966 return -EAGAIN; 2967 2968 rc = qed_dbg_grc_config(hwfn, cfg_id, val); 2969 2970 qed_ptt_release(hwfn, ptt); 2971 2972 return rc; 2973 } 2974 2975 static __printf(2, 3) void qed_mfw_report(struct qed_dev *cdev, char *fmt, ...) 2976 { 2977 char buf[QED_MFW_REPORT_STR_SIZE]; 2978 struct qed_hwfn *p_hwfn; 2979 struct qed_ptt *p_ptt; 2980 va_list vl; 2981 2982 va_start(vl, fmt); 2983 vsnprintf(buf, QED_MFW_REPORT_STR_SIZE, fmt, vl); 2984 va_end(vl); 2985 2986 if (IS_PF(cdev)) { 2987 p_hwfn = QED_LEADING_HWFN(cdev); 2988 p_ptt = qed_ptt_acquire(p_hwfn); 2989 if (p_ptt) { 2990 qed_mcp_send_raw_debug_data(p_hwfn, p_ptt, buf, strlen(buf)); 2991 qed_ptt_release(p_hwfn, p_ptt); 2992 } 2993 } 2994 } 2995 2996 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) 2997 { 2998 return QED_AFFIN_HWFN_IDX(cdev); 2999 } 3000 3001 static int qed_get_esl_status(struct qed_dev *cdev, bool *esl_active) 3002 { 3003 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 3004 struct qed_ptt *ptt; 3005 int rc = 0; 3006 3007 *esl_active = false; 3008 3009 if (IS_VF(cdev)) 3010 return 0; 3011 3012 ptt = qed_ptt_acquire(hwfn); 3013 if (!ptt) 3014 return -EAGAIN; 3015 3016 rc = qed_mcp_get_esl_status(hwfn, ptt, esl_active); 3017 3018 qed_ptt_release(hwfn, ptt); 3019 3020 return rc; 3021 } 3022 3023 static struct qed_selftest_ops qed_selftest_ops_pass = { 3024 .selftest_memory = &qed_selftest_memory, 3025 .selftest_interrupt = &qed_selftest_interrupt, 3026 .selftest_register = &qed_selftest_register, 3027 .selftest_clock = &qed_selftest_clock, 3028 .selftest_nvram = &qed_selftest_nvram, 3029 }; 3030 3031 const struct qed_common_ops qed_common_ops_pass = { 3032 .selftest = &qed_selftest_ops_pass, 3033 .probe = &qed_probe, 3034 .remove = &qed_remove, 3035 .set_power_state = &qed_set_power_state, 3036 .set_name = &qed_set_name, 3037 .update_pf_params = &qed_update_pf_params, 3038 .slowpath_start = &qed_slowpath_start, 3039 .slowpath_stop = &qed_slowpath_stop, 3040 .set_fp_int = &qed_set_int_fp, 3041 .get_fp_int = &qed_get_int_fp, 3042 .sb_init = &qed_sb_init, 3043 .sb_release = &qed_sb_release, 3044 .simd_handler_config = &qed_simd_handler_config, 3045 .simd_handler_clean = &qed_simd_handler_clean, 3046 .dbg_grc = &qed_dbg_grc, 3047 .dbg_grc_size = &qed_dbg_grc_size, 3048 .can_link_change = &qed_can_link_change, 3049 .set_link = &qed_set_link, 3050 .get_link = &qed_get_current_link, 3051 .drain = &qed_drain, 3052 .update_msglvl = &qed_init_dp, 3053 .devlink_register = qed_devlink_register, 3054 .devlink_unregister = qed_devlink_unregister, 3055 .report_fatal_error = qed_report_fatal_error, 3056 .dbg_all_data = &qed_dbg_all_data, 3057 .dbg_all_data_size = &qed_dbg_all_data_size, 3058 .chain_alloc = &qed_chain_alloc, 3059 .chain_free = &qed_chain_free, 3060 .nvm_flash = &qed_nvm_flash, 3061 .nvm_get_image = &qed_nvm_get_image, 3062 .set_coalesce = &qed_set_coalesce, 3063 .set_led = &qed_set_led, 3064 .recovery_process = &qed_recovery_process, 3065 .recovery_prolog = &qed_recovery_prolog, 3066 .attn_clr_enable = &qed_int_attn_clr_enable, 3067 .update_drv_state = &qed_update_drv_state, 3068 .update_mac = &qed_update_mac, 3069 .update_mtu = &qed_update_mtu, 3070 .update_wol = &qed_update_wol, 3071 .db_recovery_add = &qed_db_recovery_add, 3072 .db_recovery_del = &qed_db_recovery_del, 3073 .read_module_eeprom = &qed_read_module_eeprom, 3074 .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, 3075 .read_nvm_cfg = &qed_nvm_flash_cfg_read, 3076 .read_nvm_cfg_len = &qed_nvm_flash_cfg_len, 3077 .set_grc_config = &qed_set_grc_config, 3078 .mfw_report = &qed_mfw_report, 3079 .get_sb_info = &qed_get_sb_info, 3080 .get_esl_status = &qed_get_esl_status, 3081 }; 3082 3083 void qed_get_protocol_stats(struct qed_dev *cdev, 3084 enum qed_mcp_protocol_type type, 3085 union qed_mcp_protocol_stats *stats) 3086 { 3087 struct qed_eth_stats eth_stats; 3088 3089 memset(stats, 0, sizeof(*stats)); 3090 3091 switch (type) { 3092 case QED_MCP_LAN_STATS: 3093 qed_get_vport_stats_context(cdev, ð_stats, true); 3094 stats->lan_stats.ucast_rx_pkts = 3095 eth_stats.common.rx_ucast_pkts; 3096 stats->lan_stats.ucast_tx_pkts = 3097 eth_stats.common.tx_ucast_pkts; 3098 stats->lan_stats.fcs_err = -1; 3099 break; 3100 case QED_MCP_FCOE_STATS: 3101 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats, true); 3102 break; 3103 case QED_MCP_ISCSI_STATS: 3104 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats, true); 3105 break; 3106 default: 3107 DP_VERBOSE(cdev, QED_MSG_SP, 3108 "Invalid protocol type = %d\n", type); 3109 return; 3110 } 3111 } 3112 3113 int qed_mfw_tlv_req(struct qed_hwfn *hwfn) 3114 { 3115 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 3116 "Scheduling slowpath task [Flag: %d]\n", 3117 QED_SLOWPATH_MFW_TLV_REQ); 3118 /* Memory barrier for setting atomic bit */ 3119 smp_mb__before_atomic(); 3120 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); 3121 /* Memory barrier after setting atomic bit */ 3122 smp_mb__after_atomic(); 3123 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); 3124 3125 return 0; 3126 } 3127 3128 static void 3129 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) 3130 { 3131 struct qed_common_cb_ops *op = cdev->protocol_ops.common; 3132 struct qed_eth_stats_common *p_common; 3133 struct qed_generic_tlvs gen_tlvs; 3134 struct qed_eth_stats stats; 3135 int i; 3136 3137 memset(&gen_tlvs, 0, sizeof(gen_tlvs)); 3138 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); 3139 3140 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) 3141 tlv->flags.ipv4_csum_offload = true; 3142 if (gen_tlvs.feat_flags & QED_TLV_LSO) 3143 tlv->flags.lso_supported = true; 3144 tlv->flags.b_set = true; 3145 3146 for (i = 0; i < QED_TLV_MAC_COUNT; i++) { 3147 if (is_valid_ether_addr(gen_tlvs.mac[i])) { 3148 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); 3149 tlv->mac_set[i] = true; 3150 } 3151 } 3152 3153 qed_get_vport_stats(cdev, &stats); 3154 p_common = &stats.common; 3155 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 3156 p_common->rx_bcast_pkts; 3157 tlv->rx_frames_set = true; 3158 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 3159 p_common->rx_bcast_bytes; 3160 tlv->rx_bytes_set = true; 3161 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 3162 p_common->tx_bcast_pkts; 3163 tlv->tx_frames_set = true; 3164 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 3165 p_common->tx_bcast_bytes; 3166 tlv->rx_bytes_set = true; 3167 } 3168 3169 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, 3170 union qed_mfw_tlv_data *tlv_buf) 3171 { 3172 struct qed_dev *cdev = hwfn->cdev; 3173 struct qed_common_cb_ops *ops; 3174 3175 ops = cdev->protocol_ops.common; 3176 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { 3177 DP_NOTICE(hwfn, "Can't collect TLV management info\n"); 3178 return -EINVAL; 3179 } 3180 3181 switch (type) { 3182 case QED_MFW_TLV_GENERIC: 3183 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); 3184 break; 3185 case QED_MFW_TLV_ETH: 3186 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); 3187 break; 3188 case QED_MFW_TLV_FCOE: 3189 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); 3190 break; 3191 case QED_MFW_TLV_ISCSI: 3192 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); 3193 break; 3194 default: 3195 break; 3196 } 3197 3198 return 0; 3199 } 3200 3201 unsigned long qed_get_epoch_time(void) 3202 { 3203 return ktime_get_real_seconds(); 3204 } 3205