1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/stddef.h> 34 #include <linux/pci.h> 35 #include <linux/kernel.h> 36 #include <linux/slab.h> 37 #include <linux/delay.h> 38 #include <asm/byteorder.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/string.h> 41 #include <linux/module.h> 42 #include <linux/interrupt.h> 43 #include <linux/workqueue.h> 44 #include <linux/ethtool.h> 45 #include <linux/etherdevice.h> 46 #include <linux/vmalloc.h> 47 #include <linux/crash_dump.h> 48 #include <linux/crc32.h> 49 #include <linux/qed/qed_if.h> 50 #include <linux/qed/qed_ll2_if.h> 51 #include <net/devlink.h> 52 53 #include "qed.h" 54 #include "qed_sriov.h" 55 #include "qed_sp.h" 56 #include "qed_dev_api.h" 57 #include "qed_ll2.h" 58 #include "qed_fcoe.h" 59 #include "qed_iscsi.h" 60 61 #include "qed_mcp.h" 62 #include "qed_reg_addr.h" 63 #include "qed_hw.h" 64 #include "qed_selftest.h" 65 #include "qed_debug.h" 66 67 #define QED_ROCE_QPS (8192) 68 #define QED_ROCE_DPIS (8) 69 #define QED_RDMA_SRQS QED_ROCE_QPS 70 71 static char version[] = 72 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 73 74 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 75 MODULE_LICENSE("GPL"); 76 MODULE_VERSION(DRV_MODULE_VERSION); 77 78 #define FW_FILE_VERSION \ 79 __stringify(FW_MAJOR_VERSION) "." \ 80 __stringify(FW_MINOR_VERSION) "." \ 81 __stringify(FW_REVISION_VERSION) "." \ 82 __stringify(FW_ENGINEERING_VERSION) 83 84 #define QED_FW_FILE_NAME \ 85 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 86 87 MODULE_FIRMWARE(QED_FW_FILE_NAME); 88 89 static int __init qed_init(void) 90 { 91 pr_info("%s", version); 92 93 return 0; 94 } 95 96 static void __exit qed_cleanup(void) 97 { 98 pr_notice("qed_cleanup called\n"); 99 } 100 101 module_init(qed_init); 102 module_exit(qed_cleanup); 103 104 /* Check if the DMA controller on the machine can properly handle the DMA 105 * addressing required by the device. 106 */ 107 static int qed_set_coherency_mask(struct qed_dev *cdev) 108 { 109 struct device *dev = &cdev->pdev->dev; 110 111 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 112 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 113 DP_NOTICE(cdev, 114 "Can't request 64-bit consistent allocations\n"); 115 return -EIO; 116 } 117 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 118 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); 119 return -EIO; 120 } 121 122 return 0; 123 } 124 125 static void qed_free_pci(struct qed_dev *cdev) 126 { 127 struct pci_dev *pdev = cdev->pdev; 128 129 if (cdev->doorbells && cdev->db_size) 130 iounmap(cdev->doorbells); 131 if (cdev->regview) 132 iounmap(cdev->regview); 133 if (atomic_read(&pdev->enable_cnt) == 1) 134 pci_release_regions(pdev); 135 136 pci_disable_device(pdev); 137 } 138 139 #define PCI_REVISION_ID_ERROR_VAL 0xff 140 141 /* Performs PCI initializations as well as initializing PCI-related parameters 142 * in the device structrue. Returns 0 in case of success. 143 */ 144 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 145 { 146 u8 rev_id; 147 int rc; 148 149 cdev->pdev = pdev; 150 151 rc = pci_enable_device(pdev); 152 if (rc) { 153 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 154 goto err0; 155 } 156 157 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 158 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 159 rc = -EIO; 160 goto err1; 161 } 162 163 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 164 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 165 rc = -EIO; 166 goto err1; 167 } 168 169 if (atomic_read(&pdev->enable_cnt) == 1) { 170 rc = pci_request_regions(pdev, "qed"); 171 if (rc) { 172 DP_NOTICE(cdev, 173 "Failed to request PCI memory resources\n"); 174 goto err1; 175 } 176 pci_set_master(pdev); 177 pci_save_state(pdev); 178 } 179 180 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 181 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 182 DP_NOTICE(cdev, 183 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 184 rev_id); 185 rc = -ENODEV; 186 goto err2; 187 } 188 if (!pci_is_pcie(pdev)) { 189 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 190 rc = -EIO; 191 goto err2; 192 } 193 194 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 195 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 196 DP_NOTICE(cdev, "Cannot find power management capability\n"); 197 198 rc = qed_set_coherency_mask(cdev); 199 if (rc) 200 goto err2; 201 202 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 203 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 204 cdev->pci_params.irq = pdev->irq; 205 206 cdev->regview = pci_ioremap_bar(pdev, 0); 207 if (!cdev->regview) { 208 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 209 rc = -ENOMEM; 210 goto err2; 211 } 212 213 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 214 cdev->db_size = pci_resource_len(cdev->pdev, 2); 215 if (!cdev->db_size) { 216 if (IS_PF(cdev)) { 217 DP_NOTICE(cdev, "No Doorbell bar available\n"); 218 return -EINVAL; 219 } else { 220 return 0; 221 } 222 } 223 224 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 225 226 if (!cdev->doorbells) { 227 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 228 return -ENOMEM; 229 } 230 231 return 0; 232 233 err2: 234 pci_release_regions(pdev); 235 err1: 236 pci_disable_device(pdev); 237 err0: 238 return rc; 239 } 240 241 int qed_fill_dev_info(struct qed_dev *cdev, 242 struct qed_dev_info *dev_info) 243 { 244 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 245 struct qed_hw_info *hw_info = &p_hwfn->hw_info; 246 struct qed_tunnel_info *tun = &cdev->tunnel; 247 struct qed_ptt *ptt; 248 249 memset(dev_info, 0, sizeof(struct qed_dev_info)); 250 251 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 252 tun->vxlan.b_mode_enabled) 253 dev_info->vxlan_enable = true; 254 255 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 256 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 257 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 258 dev_info->gre_enable = true; 259 260 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 261 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 262 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 263 dev_info->geneve_enable = true; 264 265 dev_info->num_hwfns = cdev->num_hwfns; 266 dev_info->pci_mem_start = cdev->pci_params.mem_start; 267 dev_info->pci_mem_end = cdev->pci_params.mem_end; 268 dev_info->pci_irq = cdev->pci_params.irq; 269 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); 270 dev_info->dev_type = cdev->type; 271 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); 272 273 if (IS_PF(cdev)) { 274 dev_info->fw_major = FW_MAJOR_VERSION; 275 dev_info->fw_minor = FW_MINOR_VERSION; 276 dev_info->fw_rev = FW_REVISION_VERSION; 277 dev_info->fw_eng = FW_ENGINEERING_VERSION; 278 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, 279 &cdev->mf_bits); 280 dev_info->tx_switching = true; 281 282 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) 283 dev_info->wol_support = true; 284 285 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn); 286 287 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; 288 } else { 289 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 290 &dev_info->fw_minor, &dev_info->fw_rev, 291 &dev_info->fw_eng); 292 } 293 294 if (IS_PF(cdev)) { 295 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 296 if (ptt) { 297 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 298 &dev_info->mfw_rev, NULL); 299 300 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, 301 &dev_info->mbi_version); 302 303 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 304 &dev_info->flash_size); 305 306 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 307 } 308 } else { 309 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 310 &dev_info->mfw_rev, NULL); 311 } 312 313 dev_info->mtu = hw_info->mtu; 314 315 return 0; 316 } 317 318 static void qed_free_cdev(struct qed_dev *cdev) 319 { 320 kfree((void *)cdev); 321 } 322 323 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 324 { 325 struct qed_dev *cdev; 326 327 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 328 if (!cdev) 329 return cdev; 330 331 qed_init_struct(cdev); 332 333 return cdev; 334 } 335 336 /* Sets the requested power state */ 337 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 338 { 339 if (!cdev) 340 return -ENODEV; 341 342 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 343 return 0; 344 } 345 346 struct qed_devlink { 347 struct qed_dev *cdev; 348 }; 349 350 enum qed_devlink_param_id { 351 QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, 352 QED_DEVLINK_PARAM_ID_IWARP_CMT, 353 }; 354 355 static int qed_dl_param_get(struct devlink *dl, u32 id, 356 struct devlink_param_gset_ctx *ctx) 357 { 358 struct qed_devlink *qed_dl; 359 struct qed_dev *cdev; 360 361 qed_dl = devlink_priv(dl); 362 cdev = qed_dl->cdev; 363 ctx->val.vbool = cdev->iwarp_cmt; 364 365 return 0; 366 } 367 368 static int qed_dl_param_set(struct devlink *dl, u32 id, 369 struct devlink_param_gset_ctx *ctx) 370 { 371 struct qed_devlink *qed_dl; 372 struct qed_dev *cdev; 373 374 qed_dl = devlink_priv(dl); 375 cdev = qed_dl->cdev; 376 cdev->iwarp_cmt = ctx->val.vbool; 377 378 return 0; 379 } 380 381 static const struct devlink_param qed_devlink_params[] = { 382 DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT, 383 "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL, 384 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 385 qed_dl_param_get, qed_dl_param_set, NULL), 386 }; 387 388 static const struct devlink_ops qed_dl_ops; 389 390 static int qed_devlink_register(struct qed_dev *cdev) 391 { 392 union devlink_param_value value; 393 struct qed_devlink *qed_dl; 394 struct devlink *dl; 395 int rc; 396 397 dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl)); 398 if (!dl) 399 return -ENOMEM; 400 401 qed_dl = devlink_priv(dl); 402 403 cdev->dl = dl; 404 qed_dl->cdev = cdev; 405 406 rc = devlink_register(dl, &cdev->pdev->dev); 407 if (rc) 408 goto err_free; 409 410 rc = devlink_params_register(dl, qed_devlink_params, 411 ARRAY_SIZE(qed_devlink_params)); 412 if (rc) 413 goto err_unregister; 414 415 value.vbool = false; 416 devlink_param_driverinit_value_set(dl, 417 QED_DEVLINK_PARAM_ID_IWARP_CMT, 418 value); 419 420 devlink_params_publish(dl); 421 cdev->iwarp_cmt = false; 422 423 return 0; 424 425 err_unregister: 426 devlink_unregister(dl); 427 428 err_free: 429 cdev->dl = NULL; 430 devlink_free(dl); 431 432 return rc; 433 } 434 435 static void qed_devlink_unregister(struct qed_dev *cdev) 436 { 437 if (!cdev->dl) 438 return; 439 440 devlink_params_unregister(cdev->dl, qed_devlink_params, 441 ARRAY_SIZE(qed_devlink_params)); 442 443 devlink_unregister(cdev->dl); 444 devlink_free(cdev->dl); 445 } 446 447 /* probing */ 448 static struct qed_dev *qed_probe(struct pci_dev *pdev, 449 struct qed_probe_params *params) 450 { 451 struct qed_dev *cdev; 452 int rc; 453 454 cdev = qed_alloc_cdev(pdev); 455 if (!cdev) 456 goto err0; 457 458 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 459 cdev->protocol = params->protocol; 460 461 if (params->is_vf) 462 cdev->b_is_vf = true; 463 464 qed_init_dp(cdev, params->dp_module, params->dp_level); 465 466 cdev->recov_in_prog = params->recov_in_prog; 467 468 rc = qed_init_pci(cdev, pdev); 469 if (rc) { 470 DP_ERR(cdev, "init pci failed\n"); 471 goto err1; 472 } 473 DP_INFO(cdev, "PCI init completed successfully\n"); 474 475 rc = qed_devlink_register(cdev); 476 if (rc) { 477 DP_INFO(cdev, "Failed to register devlink.\n"); 478 goto err2; 479 } 480 481 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 482 if (rc) { 483 DP_ERR(cdev, "hw prepare failed\n"); 484 goto err2; 485 } 486 487 DP_INFO(cdev, "qed_probe completed successfully\n"); 488 489 return cdev; 490 491 err2: 492 qed_free_pci(cdev); 493 err1: 494 qed_free_cdev(cdev); 495 err0: 496 return NULL; 497 } 498 499 static void qed_remove(struct qed_dev *cdev) 500 { 501 if (!cdev) 502 return; 503 504 qed_hw_remove(cdev); 505 506 qed_free_pci(cdev); 507 508 qed_set_power_state(cdev, PCI_D3hot); 509 510 qed_devlink_unregister(cdev); 511 512 qed_free_cdev(cdev); 513 } 514 515 static void qed_disable_msix(struct qed_dev *cdev) 516 { 517 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 518 pci_disable_msix(cdev->pdev); 519 kfree(cdev->int_params.msix_table); 520 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 521 pci_disable_msi(cdev->pdev); 522 } 523 524 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 525 } 526 527 static int qed_enable_msix(struct qed_dev *cdev, 528 struct qed_int_params *int_params) 529 { 530 int i, rc, cnt; 531 532 cnt = int_params->in.num_vectors; 533 534 for (i = 0; i < cnt; i++) 535 int_params->msix_table[i].entry = i; 536 537 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 538 int_params->in.min_msix_cnt, cnt); 539 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 540 (rc % cdev->num_hwfns)) { 541 pci_disable_msix(cdev->pdev); 542 543 /* If fastpath is initialized, we need at least one interrupt 544 * per hwfn [and the slow path interrupts]. New requested number 545 * should be a multiple of the number of hwfns. 546 */ 547 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 548 DP_NOTICE(cdev, 549 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 550 cnt, int_params->in.num_vectors); 551 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 552 cnt); 553 if (!rc) 554 rc = cnt; 555 } 556 557 if (rc > 0) { 558 /* MSI-x configuration was achieved */ 559 int_params->out.int_mode = QED_INT_MODE_MSIX; 560 int_params->out.num_vectors = rc; 561 rc = 0; 562 } else { 563 DP_NOTICE(cdev, 564 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 565 cnt, rc); 566 } 567 568 return rc; 569 } 570 571 /* This function outputs the int mode and the number of enabled msix vector */ 572 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 573 { 574 struct qed_int_params *int_params = &cdev->int_params; 575 struct msix_entry *tbl; 576 int rc = 0, cnt; 577 578 switch (int_params->in.int_mode) { 579 case QED_INT_MODE_MSIX: 580 /* Allocate MSIX table */ 581 cnt = int_params->in.num_vectors; 582 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 583 if (!int_params->msix_table) { 584 rc = -ENOMEM; 585 goto out; 586 } 587 588 /* Enable MSIX */ 589 rc = qed_enable_msix(cdev, int_params); 590 if (!rc) 591 goto out; 592 593 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 594 kfree(int_params->msix_table); 595 if (force_mode) 596 goto out; 597 /* Fallthrough */ 598 599 case QED_INT_MODE_MSI: 600 if (cdev->num_hwfns == 1) { 601 rc = pci_enable_msi(cdev->pdev); 602 if (!rc) { 603 int_params->out.int_mode = QED_INT_MODE_MSI; 604 goto out; 605 } 606 607 DP_NOTICE(cdev, "Failed to enable MSI\n"); 608 if (force_mode) 609 goto out; 610 } 611 /* Fallthrough */ 612 613 case QED_INT_MODE_INTA: 614 int_params->out.int_mode = QED_INT_MODE_INTA; 615 rc = 0; 616 goto out; 617 default: 618 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 619 int_params->in.int_mode); 620 rc = -EINVAL; 621 } 622 623 out: 624 if (!rc) 625 DP_INFO(cdev, "Using %s interrupts\n", 626 int_params->out.int_mode == QED_INT_MODE_INTA ? 627 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 628 "MSI" : "MSIX"); 629 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 630 631 return rc; 632 } 633 634 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 635 int index, void(*handler)(void *)) 636 { 637 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 638 int relative_idx = index / cdev->num_hwfns; 639 640 hwfn->simd_proto_handler[relative_idx].func = handler; 641 hwfn->simd_proto_handler[relative_idx].token = token; 642 } 643 644 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 645 { 646 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 647 int relative_idx = index / cdev->num_hwfns; 648 649 memset(&hwfn->simd_proto_handler[relative_idx], 0, 650 sizeof(struct qed_simd_fp_handler)); 651 } 652 653 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 654 { 655 tasklet_schedule((struct tasklet_struct *)tasklet); 656 return IRQ_HANDLED; 657 } 658 659 static irqreturn_t qed_single_int(int irq, void *dev_instance) 660 { 661 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 662 struct qed_hwfn *hwfn; 663 irqreturn_t rc = IRQ_NONE; 664 u64 status; 665 int i, j; 666 667 for (i = 0; i < cdev->num_hwfns; i++) { 668 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 669 670 if (!status) 671 continue; 672 673 hwfn = &cdev->hwfns[i]; 674 675 /* Slowpath interrupt */ 676 if (unlikely(status & 0x1)) { 677 tasklet_schedule(hwfn->sp_dpc); 678 status &= ~0x1; 679 rc = IRQ_HANDLED; 680 } 681 682 /* Fastpath interrupts */ 683 for (j = 0; j < 64; j++) { 684 if ((0x2ULL << j) & status) { 685 struct qed_simd_fp_handler *p_handler = 686 &hwfn->simd_proto_handler[j]; 687 688 if (p_handler->func) 689 p_handler->func(p_handler->token); 690 else 691 DP_NOTICE(hwfn, 692 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", 693 j, status); 694 695 status &= ~(0x2ULL << j); 696 rc = IRQ_HANDLED; 697 } 698 } 699 700 if (unlikely(status)) 701 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 702 "got an unknown interrupt status 0x%llx\n", 703 status); 704 } 705 706 return rc; 707 } 708 709 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 710 { 711 struct qed_dev *cdev = hwfn->cdev; 712 u32 int_mode; 713 int rc = 0; 714 u8 id; 715 716 int_mode = cdev->int_params.out.int_mode; 717 if (int_mode == QED_INT_MODE_MSIX) { 718 id = hwfn->my_id; 719 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 720 id, cdev->pdev->bus->number, 721 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 722 rc = request_irq(cdev->int_params.msix_table[id].vector, 723 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); 724 } else { 725 unsigned long flags = 0; 726 727 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 728 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 729 PCI_FUNC(cdev->pdev->devfn)); 730 731 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 732 flags |= IRQF_SHARED; 733 734 rc = request_irq(cdev->pdev->irq, qed_single_int, 735 flags, cdev->name, cdev); 736 } 737 738 if (rc) 739 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 740 else 741 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 742 "Requested slowpath %s\n", 743 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 744 745 return rc; 746 } 747 748 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) 749 { 750 /* Calling the disable function will make sure that any 751 * currently-running function is completed. The following call to the 752 * enable function makes this sequence a flush-like operation. 753 */ 754 if (p_hwfn->b_sp_dpc_enabled) { 755 tasklet_disable(p_hwfn->sp_dpc); 756 tasklet_enable(p_hwfn->sp_dpc); 757 } 758 } 759 760 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 761 { 762 struct qed_dev *cdev = p_hwfn->cdev; 763 u8 id = p_hwfn->my_id; 764 u32 int_mode; 765 766 int_mode = cdev->int_params.out.int_mode; 767 if (int_mode == QED_INT_MODE_MSIX) 768 synchronize_irq(cdev->int_params.msix_table[id].vector); 769 else 770 synchronize_irq(cdev->pdev->irq); 771 772 qed_slowpath_tasklet_flush(p_hwfn); 773 } 774 775 static void qed_slowpath_irq_free(struct qed_dev *cdev) 776 { 777 int i; 778 779 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 780 for_each_hwfn(cdev, i) { 781 if (!cdev->hwfns[i].b_int_requested) 782 break; 783 synchronize_irq(cdev->int_params.msix_table[i].vector); 784 free_irq(cdev->int_params.msix_table[i].vector, 785 cdev->hwfns[i].sp_dpc); 786 } 787 } else { 788 if (QED_LEADING_HWFN(cdev)->b_int_requested) 789 free_irq(cdev->pdev->irq, cdev); 790 } 791 qed_int_disable_post_isr_release(cdev); 792 } 793 794 static int qed_nic_stop(struct qed_dev *cdev) 795 { 796 int i, rc; 797 798 rc = qed_hw_stop(cdev); 799 800 for (i = 0; i < cdev->num_hwfns; i++) { 801 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 802 803 if (p_hwfn->b_sp_dpc_enabled) { 804 tasklet_disable(p_hwfn->sp_dpc); 805 p_hwfn->b_sp_dpc_enabled = false; 806 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 807 "Disabled sp tasklet [hwfn %d] at %p\n", 808 i, p_hwfn->sp_dpc); 809 } 810 } 811 812 qed_dbg_pf_exit(cdev); 813 814 return rc; 815 } 816 817 static int qed_nic_setup(struct qed_dev *cdev) 818 { 819 int rc, i; 820 821 /* Determine if interface is going to require LL2 */ 822 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 823 for (i = 0; i < cdev->num_hwfns; i++) { 824 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 825 826 p_hwfn->using_ll2 = true; 827 } 828 } 829 830 rc = qed_resc_alloc(cdev); 831 if (rc) 832 return rc; 833 834 DP_INFO(cdev, "Allocated qed resources\n"); 835 836 qed_resc_setup(cdev); 837 838 return rc; 839 } 840 841 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 842 { 843 int limit = 0; 844 845 /* Mark the fastpath as free/used */ 846 cdev->int_params.fp_initialized = cnt ? true : false; 847 848 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 849 limit = cdev->num_hwfns * 63; 850 else if (cdev->int_params.fp_msix_cnt) 851 limit = cdev->int_params.fp_msix_cnt; 852 853 if (!limit) 854 return -ENOMEM; 855 856 return min_t(int, cnt, limit); 857 } 858 859 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 860 { 861 memset(info, 0, sizeof(struct qed_int_info)); 862 863 if (!cdev->int_params.fp_initialized) { 864 DP_INFO(cdev, 865 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 866 return -EINVAL; 867 } 868 869 /* Need to expose only MSI-X information; Single IRQ is handled solely 870 * by qed. 871 */ 872 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 873 int msix_base = cdev->int_params.fp_msix_base; 874 875 info->msix_cnt = cdev->int_params.fp_msix_cnt; 876 info->msix = &cdev->int_params.msix_table[msix_base]; 877 } 878 879 return 0; 880 } 881 882 static int qed_slowpath_setup_int(struct qed_dev *cdev, 883 enum qed_int_mode int_mode) 884 { 885 struct qed_sb_cnt_info sb_cnt_info; 886 int num_l2_queues = 0; 887 int rc; 888 int i; 889 890 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 891 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 892 return -EINVAL; 893 } 894 895 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 896 cdev->int_params.in.int_mode = int_mode; 897 for_each_hwfn(cdev, i) { 898 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 899 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 900 cdev->int_params.in.num_vectors += sb_cnt_info.cnt; 901 cdev->int_params.in.num_vectors++; /* slowpath */ 902 } 903 904 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 905 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 906 907 if (is_kdump_kernel()) { 908 DP_INFO(cdev, 909 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", 910 cdev->int_params.in.min_msix_cnt); 911 cdev->int_params.in.num_vectors = 912 cdev->int_params.in.min_msix_cnt; 913 } 914 915 rc = qed_set_int_mode(cdev, false); 916 if (rc) { 917 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 918 return rc; 919 } 920 921 cdev->int_params.fp_msix_base = cdev->num_hwfns; 922 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 923 cdev->num_hwfns; 924 925 if (!IS_ENABLED(CONFIG_QED_RDMA) || 926 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) 927 return 0; 928 929 for_each_hwfn(cdev, i) 930 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 931 932 DP_VERBOSE(cdev, QED_MSG_RDMA, 933 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 934 cdev->int_params.fp_msix_cnt, num_l2_queues); 935 936 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 937 cdev->int_params.rdma_msix_cnt = 938 (cdev->int_params.fp_msix_cnt - num_l2_queues) 939 / cdev->num_hwfns; 940 cdev->int_params.rdma_msix_base = 941 cdev->int_params.fp_msix_base + num_l2_queues; 942 cdev->int_params.fp_msix_cnt = num_l2_queues; 943 } else { 944 cdev->int_params.rdma_msix_cnt = 0; 945 } 946 947 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 948 cdev->int_params.rdma_msix_cnt, 949 cdev->int_params.rdma_msix_base); 950 951 return 0; 952 } 953 954 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 955 { 956 int rc; 957 958 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 959 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 960 961 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 962 &cdev->int_params.in.num_vectors); 963 if (cdev->num_hwfns > 1) { 964 u8 vectors = 0; 965 966 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 967 cdev->int_params.in.num_vectors += vectors; 968 } 969 970 /* We want a minimum of one fastpath vector per vf hwfn */ 971 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 972 973 rc = qed_set_int_mode(cdev, true); 974 if (rc) 975 return rc; 976 977 cdev->int_params.fp_msix_base = 0; 978 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 979 980 return 0; 981 } 982 983 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 984 u8 *input_buf, u32 max_size, u8 *unzip_buf) 985 { 986 int rc; 987 988 p_hwfn->stream->next_in = input_buf; 989 p_hwfn->stream->avail_in = input_len; 990 p_hwfn->stream->next_out = unzip_buf; 991 p_hwfn->stream->avail_out = max_size; 992 993 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 994 995 if (rc != Z_OK) { 996 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 997 rc); 998 return 0; 999 } 1000 1001 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 1002 zlib_inflateEnd(p_hwfn->stream); 1003 1004 if (rc != Z_OK && rc != Z_STREAM_END) { 1005 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 1006 p_hwfn->stream->msg, rc); 1007 return 0; 1008 } 1009 1010 return p_hwfn->stream->total_out / 4; 1011 } 1012 1013 static int qed_alloc_stream_mem(struct qed_dev *cdev) 1014 { 1015 int i; 1016 void *workspace; 1017 1018 for_each_hwfn(cdev, i) { 1019 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1020 1021 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 1022 if (!p_hwfn->stream) 1023 return -ENOMEM; 1024 1025 workspace = vzalloc(zlib_inflate_workspacesize()); 1026 if (!workspace) 1027 return -ENOMEM; 1028 p_hwfn->stream->workspace = workspace; 1029 } 1030 1031 return 0; 1032 } 1033 1034 static void qed_free_stream_mem(struct qed_dev *cdev) 1035 { 1036 int i; 1037 1038 for_each_hwfn(cdev, i) { 1039 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1040 1041 if (!p_hwfn->stream) 1042 return; 1043 1044 vfree(p_hwfn->stream->workspace); 1045 kfree(p_hwfn->stream); 1046 } 1047 } 1048 1049 static void qed_update_pf_params(struct qed_dev *cdev, 1050 struct qed_pf_params *params) 1051 { 1052 int i; 1053 1054 if (IS_ENABLED(CONFIG_QED_RDMA)) { 1055 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 1056 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 1057 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; 1058 /* divide by 3 the MRs to avoid MF ILT overflow */ 1059 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 1060 } 1061 1062 if (cdev->num_hwfns > 1 || IS_VF(cdev)) 1063 params->eth_pf_params.num_arfs_filters = 0; 1064 1065 /* In case we might support RDMA, don't allow qede to be greedy 1066 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] 1067 * per hwfn. 1068 */ 1069 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { 1070 u16 *num_cons; 1071 1072 num_cons = ¶ms->eth_pf_params.num_cons; 1073 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); 1074 } 1075 1076 for (i = 0; i < cdev->num_hwfns; i++) { 1077 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 1078 1079 p_hwfn->pf_params = *params; 1080 } 1081 } 1082 1083 #define QED_PERIODIC_DB_REC_COUNT 10 1084 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100 1085 #define QED_PERIODIC_DB_REC_INTERVAL \ 1086 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) 1087 #define QED_PERIODIC_DB_REC_WAIT_COUNT 10 1088 #define QED_PERIODIC_DB_REC_WAIT_INTERVAL \ 1089 (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT) 1090 1091 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, 1092 enum qed_slowpath_wq_flag wq_flag, 1093 unsigned long delay) 1094 { 1095 if (!hwfn->slowpath_wq_active) 1096 return -EINVAL; 1097 1098 /* Memory barrier for setting atomic bit */ 1099 smp_mb__before_atomic(); 1100 set_bit(wq_flag, &hwfn->slowpath_task_flags); 1101 smp_mb__after_atomic(); 1102 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); 1103 1104 return 0; 1105 } 1106 1107 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) 1108 { 1109 /* Reset periodic Doorbell Recovery counter */ 1110 p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; 1111 1112 /* Don't schedule periodic Doorbell Recovery if already scheduled */ 1113 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1114 &p_hwfn->slowpath_task_flags)) 1115 return; 1116 1117 qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, 1118 QED_PERIODIC_DB_REC_INTERVAL); 1119 } 1120 1121 static void qed_slowpath_wq_stop(struct qed_dev *cdev) 1122 { 1123 int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT; 1124 1125 if (IS_VF(cdev)) 1126 return; 1127 1128 for_each_hwfn(cdev, i) { 1129 if (!cdev->hwfns[i].slowpath_wq) 1130 continue; 1131 1132 /* Stop queuing new delayed works */ 1133 cdev->hwfns[i].slowpath_wq_active = false; 1134 1135 /* Wait until the last periodic doorbell recovery is executed */ 1136 while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1137 &cdev->hwfns[i].slowpath_task_flags) && 1138 sleep_count--) 1139 msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL); 1140 1141 flush_workqueue(cdev->hwfns[i].slowpath_wq); 1142 destroy_workqueue(cdev->hwfns[i].slowpath_wq); 1143 } 1144 } 1145 1146 static void qed_slowpath_task(struct work_struct *work) 1147 { 1148 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 1149 slowpath_task.work); 1150 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 1151 1152 if (!ptt) { 1153 if (hwfn->slowpath_wq_active) 1154 queue_delayed_work(hwfn->slowpath_wq, 1155 &hwfn->slowpath_task, 0); 1156 1157 return; 1158 } 1159 1160 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, 1161 &hwfn->slowpath_task_flags)) 1162 qed_mfw_process_tlv_req(hwfn, ptt); 1163 1164 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, 1165 &hwfn->slowpath_task_flags)) { 1166 qed_db_rec_handler(hwfn, ptt); 1167 if (hwfn->periodic_db_rec_count--) 1168 qed_slowpath_delayed_work(hwfn, 1169 QED_SLOWPATH_PERIODIC_DB_REC, 1170 QED_PERIODIC_DB_REC_INTERVAL); 1171 } 1172 1173 qed_ptt_release(hwfn, ptt); 1174 } 1175 1176 static int qed_slowpath_wq_start(struct qed_dev *cdev) 1177 { 1178 struct qed_hwfn *hwfn; 1179 char name[NAME_SIZE]; 1180 int i; 1181 1182 if (IS_VF(cdev)) 1183 return 0; 1184 1185 for_each_hwfn(cdev, i) { 1186 hwfn = &cdev->hwfns[i]; 1187 1188 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", 1189 cdev->pdev->bus->number, 1190 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 1191 1192 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); 1193 if (!hwfn->slowpath_wq) { 1194 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); 1195 return -ENOMEM; 1196 } 1197 1198 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); 1199 hwfn->slowpath_wq_active = true; 1200 } 1201 1202 return 0; 1203 } 1204 1205 static int qed_slowpath_start(struct qed_dev *cdev, 1206 struct qed_slowpath_params *params) 1207 { 1208 struct qed_drv_load_params drv_load_params; 1209 struct qed_hw_init_params hw_init_params; 1210 struct qed_mcp_drv_version drv_version; 1211 struct qed_tunnel_info tunn_info; 1212 const u8 *data = NULL; 1213 struct qed_hwfn *hwfn; 1214 struct qed_ptt *p_ptt; 1215 int rc = -EINVAL; 1216 1217 if (qed_iov_wq_start(cdev)) 1218 goto err; 1219 1220 if (qed_slowpath_wq_start(cdev)) 1221 goto err; 1222 1223 if (IS_PF(cdev)) { 1224 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 1225 &cdev->pdev->dev); 1226 if (rc) { 1227 DP_NOTICE(cdev, 1228 "Failed to find fw file - /lib/firmware/%s\n", 1229 QED_FW_FILE_NAME); 1230 goto err; 1231 } 1232 1233 if (cdev->num_hwfns == 1) { 1234 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 1235 if (p_ptt) { 1236 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; 1237 } else { 1238 DP_NOTICE(cdev, 1239 "Failed to acquire PTT for aRFS\n"); 1240 goto err; 1241 } 1242 } 1243 } 1244 1245 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 1246 rc = qed_nic_setup(cdev); 1247 if (rc) 1248 goto err; 1249 1250 if (IS_PF(cdev)) 1251 rc = qed_slowpath_setup_int(cdev, params->int_mode); 1252 else 1253 rc = qed_slowpath_vf_setup_int(cdev); 1254 if (rc) 1255 goto err1; 1256 1257 if (IS_PF(cdev)) { 1258 /* Allocate stream for unzipping */ 1259 rc = qed_alloc_stream_mem(cdev); 1260 if (rc) 1261 goto err2; 1262 1263 /* First Dword used to differentiate between various sources */ 1264 data = cdev->firmware->data + sizeof(u32); 1265 1266 qed_dbg_pf_init(cdev); 1267 } 1268 1269 /* Start the slowpath */ 1270 memset(&hw_init_params, 0, sizeof(hw_init_params)); 1271 memset(&tunn_info, 0, sizeof(tunn_info)); 1272 tunn_info.vxlan.b_mode_enabled = true; 1273 tunn_info.l2_gre.b_mode_enabled = true; 1274 tunn_info.ip_gre.b_mode_enabled = true; 1275 tunn_info.l2_geneve.b_mode_enabled = true; 1276 tunn_info.ip_geneve.b_mode_enabled = true; 1277 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1278 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1279 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1280 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1281 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1282 hw_init_params.p_tunn = &tunn_info; 1283 hw_init_params.b_hw_start = true; 1284 hw_init_params.int_mode = cdev->int_params.out.int_mode; 1285 hw_init_params.allow_npar_tx_switch = true; 1286 hw_init_params.bin_fw_data = data; 1287 1288 memset(&drv_load_params, 0, sizeof(drv_load_params)); 1289 drv_load_params.is_crash_kernel = is_kdump_kernel(); 1290 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 1291 drv_load_params.avoid_eng_reset = false; 1292 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 1293 hw_init_params.p_drv_load_params = &drv_load_params; 1294 1295 rc = qed_hw_init(cdev, &hw_init_params); 1296 if (rc) 1297 goto err2; 1298 1299 DP_INFO(cdev, 1300 "HW initialization and function start completed successfully\n"); 1301 1302 if (IS_PF(cdev)) { 1303 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1304 BIT(QED_MODE_L2GENEVE_TUNN) | 1305 BIT(QED_MODE_IPGENEVE_TUNN) | 1306 BIT(QED_MODE_L2GRE_TUNN) | 1307 BIT(QED_MODE_IPGRE_TUNN)); 1308 } 1309 1310 /* Allocate LL2 interface if needed */ 1311 if (QED_LEADING_HWFN(cdev)->using_ll2) { 1312 rc = qed_ll2_alloc_if(cdev); 1313 if (rc) 1314 goto err3; 1315 } 1316 if (IS_PF(cdev)) { 1317 hwfn = QED_LEADING_HWFN(cdev); 1318 drv_version.version = (params->drv_major << 24) | 1319 (params->drv_minor << 16) | 1320 (params->drv_rev << 8) | 1321 (params->drv_eng); 1322 strlcpy(drv_version.name, params->name, 1323 MCP_DRV_VER_STR_SIZE - 4); 1324 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1325 &drv_version); 1326 if (rc) { 1327 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1328 return rc; 1329 } 1330 } 1331 1332 qed_reset_vport_stats(cdev); 1333 1334 return 0; 1335 1336 err3: 1337 qed_hw_stop(cdev); 1338 err2: 1339 qed_hw_timers_stop_all(cdev); 1340 if (IS_PF(cdev)) 1341 qed_slowpath_irq_free(cdev); 1342 qed_free_stream_mem(cdev); 1343 qed_disable_msix(cdev); 1344 err1: 1345 qed_resc_free(cdev); 1346 err: 1347 if (IS_PF(cdev)) 1348 release_firmware(cdev->firmware); 1349 1350 if (IS_PF(cdev) && (cdev->num_hwfns == 1) && 1351 QED_LEADING_HWFN(cdev)->p_arfs_ptt) 1352 qed_ptt_release(QED_LEADING_HWFN(cdev), 1353 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1354 1355 qed_iov_wq_stop(cdev, false); 1356 1357 qed_slowpath_wq_stop(cdev); 1358 1359 return rc; 1360 } 1361 1362 static int qed_slowpath_stop(struct qed_dev *cdev) 1363 { 1364 if (!cdev) 1365 return -ENODEV; 1366 1367 qed_slowpath_wq_stop(cdev); 1368 1369 qed_ll2_dealloc_if(cdev); 1370 1371 if (IS_PF(cdev)) { 1372 if (cdev->num_hwfns == 1) 1373 qed_ptt_release(QED_LEADING_HWFN(cdev), 1374 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1375 qed_free_stream_mem(cdev); 1376 if (IS_QED_ETH_IF(cdev)) 1377 qed_sriov_disable(cdev, true); 1378 } 1379 1380 qed_nic_stop(cdev); 1381 1382 if (IS_PF(cdev)) 1383 qed_slowpath_irq_free(cdev); 1384 1385 qed_disable_msix(cdev); 1386 1387 qed_resc_free(cdev); 1388 1389 qed_iov_wq_stop(cdev, true); 1390 1391 if (IS_PF(cdev)) 1392 release_firmware(cdev->firmware); 1393 1394 return 0; 1395 } 1396 1397 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) 1398 { 1399 int i; 1400 1401 memcpy(cdev->name, name, NAME_SIZE); 1402 for_each_hwfn(cdev, i) 1403 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1404 } 1405 1406 static u32 qed_sb_init(struct qed_dev *cdev, 1407 struct qed_sb_info *sb_info, 1408 void *sb_virt_addr, 1409 dma_addr_t sb_phy_addr, u16 sb_id, 1410 enum qed_sb_type type) 1411 { 1412 struct qed_hwfn *p_hwfn; 1413 struct qed_ptt *p_ptt; 1414 u16 rel_sb_id; 1415 u32 rc; 1416 1417 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1418 if (type == QED_SB_TYPE_L2_QUEUE) { 1419 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1420 rel_sb_id = sb_id / cdev->num_hwfns; 1421 } else { 1422 p_hwfn = QED_AFFIN_HWFN(cdev); 1423 rel_sb_id = sb_id; 1424 } 1425 1426 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1427 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1428 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1429 1430 if (IS_PF(p_hwfn->cdev)) { 1431 p_ptt = qed_ptt_acquire(p_hwfn); 1432 if (!p_ptt) 1433 return -EBUSY; 1434 1435 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1436 sb_phy_addr, rel_sb_id); 1437 qed_ptt_release(p_hwfn, p_ptt); 1438 } else { 1439 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1440 sb_phy_addr, rel_sb_id); 1441 } 1442 1443 return rc; 1444 } 1445 1446 static u32 qed_sb_release(struct qed_dev *cdev, 1447 struct qed_sb_info *sb_info, 1448 u16 sb_id, 1449 enum qed_sb_type type) 1450 { 1451 struct qed_hwfn *p_hwfn; 1452 u16 rel_sb_id; 1453 u32 rc; 1454 1455 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */ 1456 if (type == QED_SB_TYPE_L2_QUEUE) { 1457 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns]; 1458 rel_sb_id = sb_id / cdev->num_hwfns; 1459 } else { 1460 p_hwfn = QED_AFFIN_HWFN(cdev); 1461 rel_sb_id = sb_id; 1462 } 1463 1464 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1465 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1466 IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id); 1467 1468 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1469 1470 return rc; 1471 } 1472 1473 static bool qed_can_link_change(struct qed_dev *cdev) 1474 { 1475 return true; 1476 } 1477 1478 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1479 { 1480 struct qed_hwfn *hwfn; 1481 struct qed_mcp_link_params *link_params; 1482 struct qed_ptt *ptt; 1483 u32 sup_caps; 1484 int rc; 1485 1486 if (!cdev) 1487 return -ENODEV; 1488 1489 /* The link should be set only once per PF */ 1490 hwfn = &cdev->hwfns[0]; 1491 1492 /* When VF wants to set link, force it to read the bulletin instead. 1493 * This mimics the PF behavior, where a noitification [both immediate 1494 * and possible later] would be generated when changing properties. 1495 */ 1496 if (IS_VF(cdev)) { 1497 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1498 return 0; 1499 } 1500 1501 ptt = qed_ptt_acquire(hwfn); 1502 if (!ptt) 1503 return -EBUSY; 1504 1505 link_params = qed_mcp_get_link_params(hwfn); 1506 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1507 link_params->speed.autoneg = params->autoneg; 1508 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1509 link_params->speed.advertised_speeds = 0; 1510 sup_caps = QED_LM_1000baseT_Full_BIT | 1511 QED_LM_1000baseKX_Full_BIT | 1512 QED_LM_1000baseX_Full_BIT; 1513 if (params->adv_speeds & sup_caps) 1514 link_params->speed.advertised_speeds |= 1515 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 1516 sup_caps = QED_LM_10000baseT_Full_BIT | 1517 QED_LM_10000baseKR_Full_BIT | 1518 QED_LM_10000baseKX4_Full_BIT | 1519 QED_LM_10000baseR_FEC_BIT | 1520 QED_LM_10000baseCR_Full_BIT | 1521 QED_LM_10000baseSR_Full_BIT | 1522 QED_LM_10000baseLR_Full_BIT | 1523 QED_LM_10000baseLRM_Full_BIT; 1524 if (params->adv_speeds & sup_caps) 1525 link_params->speed.advertised_speeds |= 1526 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 1527 if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT) 1528 link_params->speed.advertised_speeds |= 1529 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; 1530 sup_caps = QED_LM_25000baseKR_Full_BIT | 1531 QED_LM_25000baseCR_Full_BIT | 1532 QED_LM_25000baseSR_Full_BIT; 1533 if (params->adv_speeds & sup_caps) 1534 link_params->speed.advertised_speeds |= 1535 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 1536 sup_caps = QED_LM_40000baseLR4_Full_BIT | 1537 QED_LM_40000baseKR4_Full_BIT | 1538 QED_LM_40000baseCR4_Full_BIT | 1539 QED_LM_40000baseSR4_Full_BIT; 1540 if (params->adv_speeds & sup_caps) 1541 link_params->speed.advertised_speeds |= 1542 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 1543 sup_caps = QED_LM_50000baseKR2_Full_BIT | 1544 QED_LM_50000baseCR2_Full_BIT | 1545 QED_LM_50000baseSR2_Full_BIT; 1546 if (params->adv_speeds & sup_caps) 1547 link_params->speed.advertised_speeds |= 1548 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 1549 sup_caps = QED_LM_100000baseKR4_Full_BIT | 1550 QED_LM_100000baseSR4_Full_BIT | 1551 QED_LM_100000baseCR4_Full_BIT | 1552 QED_LM_100000baseLR4_ER4_Full_BIT; 1553 if (params->adv_speeds & sup_caps) 1554 link_params->speed.advertised_speeds |= 1555 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 1556 } 1557 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1558 link_params->speed.forced_speed = params->forced_speed; 1559 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1560 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1561 link_params->pause.autoneg = true; 1562 else 1563 link_params->pause.autoneg = false; 1564 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1565 link_params->pause.forced_rx = true; 1566 else 1567 link_params->pause.forced_rx = false; 1568 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1569 link_params->pause.forced_tx = true; 1570 else 1571 link_params->pause.forced_tx = false; 1572 } 1573 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1574 switch (params->loopback_mode) { 1575 case QED_LINK_LOOPBACK_INT_PHY: 1576 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1577 break; 1578 case QED_LINK_LOOPBACK_EXT_PHY: 1579 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1580 break; 1581 case QED_LINK_LOOPBACK_EXT: 1582 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1583 break; 1584 case QED_LINK_LOOPBACK_MAC: 1585 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1586 break; 1587 default: 1588 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1589 break; 1590 } 1591 } 1592 1593 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) 1594 memcpy(&link_params->eee, ¶ms->eee, 1595 sizeof(link_params->eee)); 1596 1597 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1598 1599 qed_ptt_release(hwfn, ptt); 1600 1601 return rc; 1602 } 1603 1604 static int qed_get_port_type(u32 media_type) 1605 { 1606 int port_type; 1607 1608 switch (media_type) { 1609 case MEDIA_SFPP_10G_FIBER: 1610 case MEDIA_SFP_1G_FIBER: 1611 case MEDIA_XFP_FIBER: 1612 case MEDIA_MODULE_FIBER: 1613 case MEDIA_KR: 1614 port_type = PORT_FIBRE; 1615 break; 1616 case MEDIA_DA_TWINAX: 1617 port_type = PORT_DA; 1618 break; 1619 case MEDIA_BASE_T: 1620 port_type = PORT_TP; 1621 break; 1622 case MEDIA_NOT_PRESENT: 1623 port_type = PORT_NONE; 1624 break; 1625 case MEDIA_UNSPECIFIED: 1626 default: 1627 port_type = PORT_OTHER; 1628 break; 1629 } 1630 return port_type; 1631 } 1632 1633 static int qed_get_link_data(struct qed_hwfn *hwfn, 1634 struct qed_mcp_link_params *params, 1635 struct qed_mcp_link_state *link, 1636 struct qed_mcp_link_capabilities *link_caps) 1637 { 1638 void *p; 1639 1640 if (!IS_PF(hwfn->cdev)) { 1641 qed_vf_get_link_params(hwfn, params); 1642 qed_vf_get_link_state(hwfn, link); 1643 qed_vf_get_link_caps(hwfn, link_caps); 1644 1645 return 0; 1646 } 1647 1648 p = qed_mcp_get_link_params(hwfn); 1649 if (!p) 1650 return -ENXIO; 1651 memcpy(params, p, sizeof(*params)); 1652 1653 p = qed_mcp_get_link_state(hwfn); 1654 if (!p) 1655 return -ENXIO; 1656 memcpy(link, p, sizeof(*link)); 1657 1658 p = qed_mcp_get_link_capabilities(hwfn); 1659 if (!p) 1660 return -ENXIO; 1661 memcpy(link_caps, p, sizeof(*link_caps)); 1662 1663 return 0; 1664 } 1665 1666 static void qed_fill_link_capability(struct qed_hwfn *hwfn, 1667 struct qed_ptt *ptt, u32 capability, 1668 u32 *if_capability) 1669 { 1670 u32 media_type, tcvr_state, tcvr_type; 1671 u32 speed_mask, board_cfg; 1672 1673 if (qed_mcp_get_media_type(hwfn, ptt, &media_type)) 1674 media_type = MEDIA_UNSPECIFIED; 1675 1676 if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type)) 1677 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED; 1678 1679 if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask)) 1680 speed_mask = 0xFFFFFFFF; 1681 1682 if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg)) 1683 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED; 1684 1685 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 1686 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n", 1687 media_type, tcvr_state, tcvr_type, speed_mask, board_cfg); 1688 1689 switch (media_type) { 1690 case MEDIA_DA_TWINAX: 1691 *if_capability |= QED_LM_FIBRE_BIT; 1692 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1693 *if_capability |= QED_LM_20000baseKR2_Full_BIT; 1694 /* For DAC media multiple speed capabilities are supported*/ 1695 capability = capability & speed_mask; 1696 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1697 *if_capability |= QED_LM_1000baseKX_Full_BIT; 1698 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1699 *if_capability |= QED_LM_10000baseCR_Full_BIT; 1700 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1701 *if_capability |= QED_LM_40000baseCR4_Full_BIT; 1702 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1703 *if_capability |= QED_LM_25000baseCR_Full_BIT; 1704 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1705 *if_capability |= QED_LM_50000baseCR2_Full_BIT; 1706 if (capability & 1707 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1708 *if_capability |= QED_LM_100000baseCR4_Full_BIT; 1709 break; 1710 case MEDIA_BASE_T: 1711 *if_capability |= QED_LM_TP_BIT; 1712 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) { 1713 if (capability & 1714 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { 1715 *if_capability |= QED_LM_1000baseT_Full_BIT; 1716 } 1717 if (capability & 1718 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { 1719 *if_capability |= QED_LM_10000baseT_Full_BIT; 1720 } 1721 } 1722 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) { 1723 *if_capability |= QED_LM_FIBRE_BIT; 1724 if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET) 1725 *if_capability |= QED_LM_1000baseT_Full_BIT; 1726 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET) 1727 *if_capability |= QED_LM_10000baseT_Full_BIT; 1728 } 1729 break; 1730 case MEDIA_SFP_1G_FIBER: 1731 case MEDIA_SFPP_10G_FIBER: 1732 case MEDIA_XFP_FIBER: 1733 case MEDIA_MODULE_FIBER: 1734 *if_capability |= QED_LM_FIBRE_BIT; 1735 if (capability & 1736 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) { 1737 if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) || 1738 (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX)) 1739 *if_capability |= QED_LM_1000baseKX_Full_BIT; 1740 } 1741 if (capability & 1742 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) { 1743 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR) 1744 *if_capability |= QED_LM_10000baseSR_Full_BIT; 1745 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR) 1746 *if_capability |= QED_LM_10000baseLR_Full_BIT; 1747 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM) 1748 *if_capability |= QED_LM_10000baseLRM_Full_BIT; 1749 if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER) 1750 *if_capability |= QED_LM_10000baseR_FEC_BIT; 1751 } 1752 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1753 *if_capability |= QED_LM_20000baseKR2_Full_BIT; 1754 if (capability & 1755 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) { 1756 if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR) 1757 *if_capability |= QED_LM_25000baseSR_Full_BIT; 1758 } 1759 if (capability & 1760 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) { 1761 if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4) 1762 *if_capability |= QED_LM_40000baseLR4_Full_BIT; 1763 if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4) 1764 *if_capability |= QED_LM_40000baseSR4_Full_BIT; 1765 } 1766 if (capability & 1767 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1768 *if_capability |= QED_LM_50000baseKR2_Full_BIT; 1769 if (capability & 1770 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) { 1771 if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4) 1772 *if_capability |= QED_LM_100000baseSR4_Full_BIT; 1773 } 1774 1775 break; 1776 case MEDIA_KR: 1777 *if_capability |= QED_LM_Backplane_BIT; 1778 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1779 *if_capability |= QED_LM_20000baseKR2_Full_BIT; 1780 if (capability & 1781 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1782 *if_capability |= QED_LM_1000baseKX_Full_BIT; 1783 if (capability & 1784 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1785 *if_capability |= QED_LM_10000baseKR_Full_BIT; 1786 if (capability & 1787 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1788 *if_capability |= QED_LM_25000baseKR_Full_BIT; 1789 if (capability & 1790 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1791 *if_capability |= QED_LM_40000baseKR4_Full_BIT; 1792 if (capability & 1793 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1794 *if_capability |= QED_LM_50000baseKR2_Full_BIT; 1795 if (capability & 1796 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1797 *if_capability |= QED_LM_100000baseKR4_Full_BIT; 1798 break; 1799 case MEDIA_UNSPECIFIED: 1800 case MEDIA_NOT_PRESENT: 1801 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG, 1802 "Unknown media and transceiver type;\n"); 1803 break; 1804 } 1805 } 1806 1807 static void qed_fill_link(struct qed_hwfn *hwfn, 1808 struct qed_ptt *ptt, 1809 struct qed_link_output *if_link) 1810 { 1811 struct qed_mcp_link_capabilities link_caps; 1812 struct qed_mcp_link_params params; 1813 struct qed_mcp_link_state link; 1814 u32 media_type; 1815 1816 memset(if_link, 0, sizeof(*if_link)); 1817 1818 /* Prepare source inputs */ 1819 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 1820 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 1821 return; 1822 } 1823 1824 /* Set the link parameters to pass to protocol driver */ 1825 if (link.link_up) 1826 if_link->link_up = true; 1827 1828 /* TODO - at the moment assume supported and advertised speed equal */ 1829 if (link_caps.default_speed_autoneg) 1830 if_link->supported_caps |= QED_LM_Autoneg_BIT; 1831 if (params.pause.autoneg || 1832 (params.pause.forced_rx && params.pause.forced_tx)) 1833 if_link->supported_caps |= QED_LM_Asym_Pause_BIT; 1834 if (params.pause.autoneg || params.pause.forced_rx || 1835 params.pause.forced_tx) 1836 if_link->supported_caps |= QED_LM_Pause_BIT; 1837 1838 if_link->advertised_caps = if_link->supported_caps; 1839 if (params.speed.autoneg) 1840 if_link->advertised_caps |= QED_LM_Autoneg_BIT; 1841 else 1842 if_link->advertised_caps &= ~QED_LM_Autoneg_BIT; 1843 1844 /* Fill link advertised capability*/ 1845 qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds, 1846 &if_link->advertised_caps); 1847 /* Fill link supported capability*/ 1848 qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities, 1849 &if_link->supported_caps); 1850 1851 if (link.link_up) 1852 if_link->speed = link.speed; 1853 1854 /* TODO - fill duplex properly */ 1855 if_link->duplex = DUPLEX_FULL; 1856 qed_mcp_get_media_type(hwfn, ptt, &media_type); 1857 if_link->port = qed_get_port_type(media_type); 1858 1859 if_link->autoneg = params.speed.autoneg; 1860 1861 if (params.pause.autoneg) 1862 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1863 if (params.pause.forced_rx) 1864 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1865 if (params.pause.forced_tx) 1866 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1867 1868 /* Link partner capabilities */ 1869 if (link.partner_adv_speed & 1870 QED_LINK_PARTNER_SPEED_1G_FD) 1871 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT; 1872 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) 1873 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT; 1874 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G) 1875 if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT; 1876 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) 1877 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT; 1878 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) 1879 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT; 1880 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G) 1881 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT; 1882 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G) 1883 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT; 1884 1885 if (link.an_complete) 1886 if_link->lp_caps |= QED_LM_Autoneg_BIT; 1887 1888 if (link.partner_adv_pause) 1889 if_link->lp_caps |= QED_LM_Pause_BIT; 1890 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 1891 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 1892 if_link->lp_caps |= QED_LM_Asym_Pause_BIT; 1893 1894 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { 1895 if_link->eee_supported = false; 1896 } else { 1897 if_link->eee_supported = true; 1898 if_link->eee_active = link.eee_active; 1899 if_link->sup_caps = link_caps.eee_speed_caps; 1900 /* MFW clears adv_caps on eee disable; use configured value */ 1901 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : 1902 params.eee.adv_caps; 1903 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; 1904 if_link->eee.enable = params.eee.enable; 1905 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; 1906 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; 1907 } 1908 } 1909 1910 static void qed_get_current_link(struct qed_dev *cdev, 1911 struct qed_link_output *if_link) 1912 { 1913 struct qed_hwfn *hwfn; 1914 struct qed_ptt *ptt; 1915 int i; 1916 1917 hwfn = &cdev->hwfns[0]; 1918 if (IS_PF(cdev)) { 1919 ptt = qed_ptt_acquire(hwfn); 1920 if (ptt) { 1921 qed_fill_link(hwfn, ptt, if_link); 1922 qed_ptt_release(hwfn, ptt); 1923 } else { 1924 DP_NOTICE(hwfn, "Failed to fill link; No PTT\n"); 1925 } 1926 } else { 1927 qed_fill_link(hwfn, NULL, if_link); 1928 } 1929 1930 for_each_hwfn(cdev, i) 1931 qed_inform_vf_link_state(&cdev->hwfns[i]); 1932 } 1933 1934 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt) 1935 { 1936 void *cookie = hwfn->cdev->ops_cookie; 1937 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 1938 struct qed_link_output if_link; 1939 1940 qed_fill_link(hwfn, ptt, &if_link); 1941 qed_inform_vf_link_state(hwfn); 1942 1943 if (IS_LEAD_HWFN(hwfn) && cookie) 1944 op->link_update(cookie, &if_link); 1945 } 1946 1947 static int qed_drain(struct qed_dev *cdev) 1948 { 1949 struct qed_hwfn *hwfn; 1950 struct qed_ptt *ptt; 1951 int i, rc; 1952 1953 if (IS_VF(cdev)) 1954 return 0; 1955 1956 for_each_hwfn(cdev, i) { 1957 hwfn = &cdev->hwfns[i]; 1958 ptt = qed_ptt_acquire(hwfn); 1959 if (!ptt) { 1960 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 1961 return -EBUSY; 1962 } 1963 rc = qed_mcp_drain(hwfn, ptt); 1964 qed_ptt_release(hwfn, ptt); 1965 if (rc) 1966 return rc; 1967 } 1968 1969 return 0; 1970 } 1971 1972 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, 1973 struct qed_nvm_image_att *nvm_image, 1974 u32 *crc) 1975 { 1976 u8 *buf = NULL; 1977 int rc, j; 1978 u32 val; 1979 1980 /* Allocate a buffer for holding the nvram image */ 1981 buf = kzalloc(nvm_image->length, GFP_KERNEL); 1982 if (!buf) 1983 return -ENOMEM; 1984 1985 /* Read image into buffer */ 1986 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, 1987 buf, nvm_image->length); 1988 if (rc) { 1989 DP_ERR(cdev, "Failed reading image from nvm\n"); 1990 goto out; 1991 } 1992 1993 /* Convert the buffer into big-endian format (excluding the 1994 * closing 4 bytes of CRC). 1995 */ 1996 for (j = 0; j < nvm_image->length - 4; j += 4) { 1997 val = cpu_to_be32(*(u32 *)&buf[j]); 1998 *(u32 *)&buf[j] = val; 1999 } 2000 2001 /* Calc CRC for the "actual" image buffer, i.e. not including 2002 * the last 4 CRC bytes. 2003 */ 2004 *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4))); 2005 2006 out: 2007 kfree(buf); 2008 2009 return rc; 2010 } 2011 2012 /* Binary file format - 2013 * /----------------------------------------------------------------------\ 2014 * 0B | 0x4 [command index] | 2015 * 4B | image_type | Options | Number of register settings | 2016 * 8B | Value | 2017 * 12B | Mask | 2018 * 16B | Offset | 2019 * \----------------------------------------------------------------------/ 2020 * There can be several Value-Mask-Offset sets as specified by 'Number of...'. 2021 * Options - 0'b - Calculate & Update CRC for image 2022 */ 2023 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, 2024 bool *check_resp) 2025 { 2026 struct qed_nvm_image_att nvm_image; 2027 struct qed_hwfn *p_hwfn; 2028 bool is_crc = false; 2029 u32 image_type; 2030 int rc = 0, i; 2031 u16 len; 2032 2033 *data += 4; 2034 image_type = **data; 2035 p_hwfn = QED_LEADING_HWFN(cdev); 2036 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 2037 if (image_type == p_hwfn->nvm_info.image_att[i].image_type) 2038 break; 2039 if (i == p_hwfn->nvm_info.num_images) { 2040 DP_ERR(cdev, "Failed to find nvram image of type %08x\n", 2041 image_type); 2042 return -ENOENT; 2043 } 2044 2045 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; 2046 nvm_image.length = p_hwfn->nvm_info.image_att[i].len; 2047 2048 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2049 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", 2050 **data, image_type, nvm_image.start_addr, 2051 nvm_image.start_addr + nvm_image.length - 1); 2052 (*data)++; 2053 is_crc = !!(**data & BIT(0)); 2054 (*data)++; 2055 len = *((u16 *)*data); 2056 *data += 2; 2057 if (is_crc) { 2058 u32 crc = 0; 2059 2060 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); 2061 if (rc) { 2062 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); 2063 goto exit; 2064 } 2065 2066 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2067 (nvm_image.start_addr + 2068 nvm_image.length - 4), (u8 *)&crc, 4); 2069 if (rc) 2070 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", 2071 nvm_image.start_addr + nvm_image.length - 4, rc); 2072 goto exit; 2073 } 2074 2075 /* Iterate over the values for setting */ 2076 while (len) { 2077 u32 offset, mask, value, cur_value; 2078 u8 buf[4]; 2079 2080 value = *((u32 *)*data); 2081 *data += 4; 2082 mask = *((u32 *)*data); 2083 *data += 4; 2084 offset = *((u32 *)*data); 2085 *data += 4; 2086 2087 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, 2088 4); 2089 if (rc) { 2090 DP_ERR(cdev, "Failed reading from %08x\n", 2091 nvm_image.start_addr + offset); 2092 goto exit; 2093 } 2094 2095 cur_value = le32_to_cpu(*((__le32 *)buf)); 2096 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2097 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", 2098 nvm_image.start_addr + offset, cur_value, 2099 (cur_value & ~mask) | (value & mask), value, mask); 2100 value = (value & mask) | (cur_value & ~mask); 2101 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 2102 nvm_image.start_addr + offset, 2103 (u8 *)&value, 4); 2104 if (rc) { 2105 DP_ERR(cdev, "Failed writing to %08x\n", 2106 nvm_image.start_addr + offset); 2107 goto exit; 2108 } 2109 2110 len--; 2111 } 2112 exit: 2113 return rc; 2114 } 2115 2116 /* Binary file format - 2117 * /----------------------------------------------------------------------\ 2118 * 0B | 0x3 [command index] | 2119 * 4B | b'0: check_response? | b'1-31 reserved | 2120 * 8B | File-type | reserved | 2121 * 12B | Image length in bytes | 2122 * \----------------------------------------------------------------------/ 2123 * Start a new file of the provided type 2124 */ 2125 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, 2126 const u8 **data, bool *check_resp) 2127 { 2128 u32 file_type, file_size = 0; 2129 int rc; 2130 2131 *data += 4; 2132 *check_resp = !!(**data & BIT(0)); 2133 *data += 4; 2134 file_type = **data; 2135 2136 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2137 "About to start a new file of type %02x\n", file_type); 2138 if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { 2139 *data += 4; 2140 file_size = *((u32 *)(*data)); 2141 } 2142 2143 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, 2144 (u8 *)(&file_size), 4); 2145 *data += 4; 2146 2147 return rc; 2148 } 2149 2150 /* Binary file format - 2151 * /----------------------------------------------------------------------\ 2152 * 0B | 0x2 [command index] | 2153 * 4B | Length in bytes | 2154 * 8B | b'0: check_response? | b'1-31 reserved | 2155 * 12B | Offset in bytes | 2156 * 16B | Data ... | 2157 * \----------------------------------------------------------------------/ 2158 * Write data as part of a file that was previously started. Data should be 2159 * of length equal to that provided in the message 2160 */ 2161 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, 2162 const u8 **data, bool *check_resp) 2163 { 2164 u32 offset, len; 2165 int rc; 2166 2167 *data += 4; 2168 len = *((u32 *)(*data)); 2169 *data += 4; 2170 *check_resp = !!(**data & BIT(0)); 2171 *data += 4; 2172 offset = *((u32 *)(*data)); 2173 *data += 4; 2174 2175 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2176 "About to write File-data: %08x bytes to offset %08x\n", 2177 len, offset); 2178 2179 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, 2180 (char *)(*data), len); 2181 *data += len; 2182 2183 return rc; 2184 } 2185 2186 /* Binary file format [General header] - 2187 * /----------------------------------------------------------------------\ 2188 * 0B | QED_NVM_SIGNATURE | 2189 * 4B | Length in bytes | 2190 * 8B | Highest command in this batchfile | Reserved | 2191 * \----------------------------------------------------------------------/ 2192 */ 2193 static int qed_nvm_flash_image_validate(struct qed_dev *cdev, 2194 const struct firmware *image, 2195 const u8 **data) 2196 { 2197 u32 signature, len; 2198 2199 /* Check minimum size */ 2200 if (image->size < 12) { 2201 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); 2202 return -EINVAL; 2203 } 2204 2205 /* Check signature */ 2206 signature = *((u32 *)(*data)); 2207 if (signature != QED_NVM_SIGNATURE) { 2208 DP_ERR(cdev, "Wrong signature '%08x'\n", signature); 2209 return -EINVAL; 2210 } 2211 2212 *data += 4; 2213 /* Validate internal size equals the image-size */ 2214 len = *((u32 *)(*data)); 2215 if (len != image->size) { 2216 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", 2217 len, (u32)image->size); 2218 return -EINVAL; 2219 } 2220 2221 *data += 4; 2222 /* Make sure driver familiar with all commands necessary for this */ 2223 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { 2224 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", 2225 *((u16 *)(*data))); 2226 return -EINVAL; 2227 } 2228 2229 *data += 4; 2230 2231 return 0; 2232 } 2233 2234 static int qed_nvm_flash(struct qed_dev *cdev, const char *name) 2235 { 2236 const struct firmware *image; 2237 const u8 *data, *data_end; 2238 u32 cmd_type; 2239 int rc; 2240 2241 rc = request_firmware(&image, name, &cdev->pdev->dev); 2242 if (rc) { 2243 DP_ERR(cdev, "Failed to find '%s'\n", name); 2244 return rc; 2245 } 2246 2247 DP_VERBOSE(cdev, NETIF_MSG_DRV, 2248 "Flashing '%s' - firmware's data at %p, size is %08x\n", 2249 name, image->data, (u32)image->size); 2250 data = image->data; 2251 data_end = data + image->size; 2252 2253 rc = qed_nvm_flash_image_validate(cdev, image, &data); 2254 if (rc) 2255 goto exit; 2256 2257 while (data < data_end) { 2258 bool check_resp = false; 2259 2260 /* Parse the actual command */ 2261 cmd_type = *((u32 *)data); 2262 switch (cmd_type) { 2263 case QED_NVM_FLASH_CMD_FILE_DATA: 2264 rc = qed_nvm_flash_image_file_data(cdev, &data, 2265 &check_resp); 2266 break; 2267 case QED_NVM_FLASH_CMD_FILE_START: 2268 rc = qed_nvm_flash_image_file_start(cdev, &data, 2269 &check_resp); 2270 break; 2271 case QED_NVM_FLASH_CMD_NVM_CHANGE: 2272 rc = qed_nvm_flash_image_access(cdev, &data, 2273 &check_resp); 2274 break; 2275 default: 2276 DP_ERR(cdev, "Unknown command %08x\n", cmd_type); 2277 rc = -EINVAL; 2278 goto exit; 2279 } 2280 2281 if (rc) { 2282 DP_ERR(cdev, "Command %08x failed\n", cmd_type); 2283 goto exit; 2284 } 2285 2286 /* Check response if needed */ 2287 if (check_resp) { 2288 u32 mcp_response = 0; 2289 2290 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { 2291 DP_ERR(cdev, "Failed getting MCP response\n"); 2292 rc = -EINVAL; 2293 goto exit; 2294 } 2295 2296 switch (mcp_response & FW_MSG_CODE_MASK) { 2297 case FW_MSG_CODE_OK: 2298 case FW_MSG_CODE_NVM_OK: 2299 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: 2300 case FW_MSG_CODE_PHY_OK: 2301 break; 2302 default: 2303 DP_ERR(cdev, "MFW returns error: %08x\n", 2304 mcp_response); 2305 rc = -EINVAL; 2306 goto exit; 2307 } 2308 } 2309 } 2310 2311 exit: 2312 release_firmware(image); 2313 2314 return rc; 2315 } 2316 2317 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, 2318 u8 *buf, u16 len) 2319 { 2320 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2321 2322 return qed_mcp_get_nvm_image(hwfn, type, buf, len); 2323 } 2324 2325 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn) 2326 { 2327 struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common; 2328 void *cookie = p_hwfn->cdev->ops_cookie; 2329 2330 if (ops && ops->schedule_recovery_handler) 2331 ops->schedule_recovery_handler(cookie); 2332 } 2333 2334 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 2335 void *handle) 2336 { 2337 return qed_set_queue_coalesce(rx_coal, tx_coal, handle); 2338 } 2339 2340 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 2341 { 2342 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2343 struct qed_ptt *ptt; 2344 int status = 0; 2345 2346 ptt = qed_ptt_acquire(hwfn); 2347 if (!ptt) 2348 return -EAGAIN; 2349 2350 status = qed_mcp_set_led(hwfn, ptt, mode); 2351 2352 qed_ptt_release(hwfn, ptt); 2353 2354 return status; 2355 } 2356 2357 static int qed_recovery_process(struct qed_dev *cdev) 2358 { 2359 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 2360 struct qed_ptt *p_ptt; 2361 int rc = 0; 2362 2363 p_ptt = qed_ptt_acquire(p_hwfn); 2364 if (!p_ptt) 2365 return -EAGAIN; 2366 2367 rc = qed_start_recovery_process(p_hwfn, p_ptt); 2368 2369 qed_ptt_release(p_hwfn, p_ptt); 2370 2371 return rc; 2372 } 2373 2374 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 2375 { 2376 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2377 struct qed_ptt *ptt; 2378 int rc = 0; 2379 2380 if (IS_VF(cdev)) 2381 return 0; 2382 2383 ptt = qed_ptt_acquire(hwfn); 2384 if (!ptt) 2385 return -EAGAIN; 2386 2387 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 2388 : QED_OV_WOL_DISABLED); 2389 if (rc) 2390 goto out; 2391 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2392 2393 out: 2394 qed_ptt_release(hwfn, ptt); 2395 return rc; 2396 } 2397 2398 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 2399 { 2400 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2401 struct qed_ptt *ptt; 2402 int status = 0; 2403 2404 if (IS_VF(cdev)) 2405 return 0; 2406 2407 ptt = qed_ptt_acquire(hwfn); 2408 if (!ptt) 2409 return -EAGAIN; 2410 2411 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 2412 QED_OV_DRIVER_STATE_ACTIVE : 2413 QED_OV_DRIVER_STATE_DISABLED); 2414 2415 qed_ptt_release(hwfn, ptt); 2416 2417 return status; 2418 } 2419 2420 static int qed_update_mac(struct qed_dev *cdev, u8 *mac) 2421 { 2422 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2423 struct qed_ptt *ptt; 2424 int status = 0; 2425 2426 if (IS_VF(cdev)) 2427 return 0; 2428 2429 ptt = qed_ptt_acquire(hwfn); 2430 if (!ptt) 2431 return -EAGAIN; 2432 2433 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 2434 if (status) 2435 goto out; 2436 2437 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2438 2439 out: 2440 qed_ptt_release(hwfn, ptt); 2441 return status; 2442 } 2443 2444 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 2445 { 2446 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2447 struct qed_ptt *ptt; 2448 int status = 0; 2449 2450 if (IS_VF(cdev)) 2451 return 0; 2452 2453 ptt = qed_ptt_acquire(hwfn); 2454 if (!ptt) 2455 return -EAGAIN; 2456 2457 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 2458 if (status) 2459 goto out; 2460 2461 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2462 2463 out: 2464 qed_ptt_release(hwfn, ptt); 2465 return status; 2466 } 2467 2468 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, 2469 u8 dev_addr, u32 offset, u32 len) 2470 { 2471 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2472 struct qed_ptt *ptt; 2473 int rc = 0; 2474 2475 if (IS_VF(cdev)) 2476 return 0; 2477 2478 ptt = qed_ptt_acquire(hwfn); 2479 if (!ptt) 2480 return -EAGAIN; 2481 2482 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, 2483 offset, len, buf); 2484 2485 qed_ptt_release(hwfn, ptt); 2486 2487 return rc; 2488 } 2489 2490 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev) 2491 { 2492 return QED_AFFIN_HWFN_IDX(cdev); 2493 } 2494 2495 static struct qed_selftest_ops qed_selftest_ops_pass = { 2496 .selftest_memory = &qed_selftest_memory, 2497 .selftest_interrupt = &qed_selftest_interrupt, 2498 .selftest_register = &qed_selftest_register, 2499 .selftest_clock = &qed_selftest_clock, 2500 .selftest_nvram = &qed_selftest_nvram, 2501 }; 2502 2503 const struct qed_common_ops qed_common_ops_pass = { 2504 .selftest = &qed_selftest_ops_pass, 2505 .probe = &qed_probe, 2506 .remove = &qed_remove, 2507 .set_power_state = &qed_set_power_state, 2508 .set_name = &qed_set_name, 2509 .update_pf_params = &qed_update_pf_params, 2510 .slowpath_start = &qed_slowpath_start, 2511 .slowpath_stop = &qed_slowpath_stop, 2512 .set_fp_int = &qed_set_int_fp, 2513 .get_fp_int = &qed_get_int_fp, 2514 .sb_init = &qed_sb_init, 2515 .sb_release = &qed_sb_release, 2516 .simd_handler_config = &qed_simd_handler_config, 2517 .simd_handler_clean = &qed_simd_handler_clean, 2518 .dbg_grc = &qed_dbg_grc, 2519 .dbg_grc_size = &qed_dbg_grc_size, 2520 .can_link_change = &qed_can_link_change, 2521 .set_link = &qed_set_link, 2522 .get_link = &qed_get_current_link, 2523 .drain = &qed_drain, 2524 .update_msglvl = &qed_init_dp, 2525 .dbg_all_data = &qed_dbg_all_data, 2526 .dbg_all_data_size = &qed_dbg_all_data_size, 2527 .chain_alloc = &qed_chain_alloc, 2528 .chain_free = &qed_chain_free, 2529 .nvm_flash = &qed_nvm_flash, 2530 .nvm_get_image = &qed_nvm_get_image, 2531 .set_coalesce = &qed_set_coalesce, 2532 .set_led = &qed_set_led, 2533 .recovery_process = &qed_recovery_process, 2534 .recovery_prolog = &qed_recovery_prolog, 2535 .update_drv_state = &qed_update_drv_state, 2536 .update_mac = &qed_update_mac, 2537 .update_mtu = &qed_update_mtu, 2538 .update_wol = &qed_update_wol, 2539 .db_recovery_add = &qed_db_recovery_add, 2540 .db_recovery_del = &qed_db_recovery_del, 2541 .read_module_eeprom = &qed_read_module_eeprom, 2542 .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx, 2543 }; 2544 2545 void qed_get_protocol_stats(struct qed_dev *cdev, 2546 enum qed_mcp_protocol_type type, 2547 union qed_mcp_protocol_stats *stats) 2548 { 2549 struct qed_eth_stats eth_stats; 2550 2551 memset(stats, 0, sizeof(*stats)); 2552 2553 switch (type) { 2554 case QED_MCP_LAN_STATS: 2555 qed_get_vport_stats(cdev, ð_stats); 2556 stats->lan_stats.ucast_rx_pkts = 2557 eth_stats.common.rx_ucast_pkts; 2558 stats->lan_stats.ucast_tx_pkts = 2559 eth_stats.common.tx_ucast_pkts; 2560 stats->lan_stats.fcs_err = -1; 2561 break; 2562 case QED_MCP_FCOE_STATS: 2563 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); 2564 break; 2565 case QED_MCP_ISCSI_STATS: 2566 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 2567 break; 2568 default: 2569 DP_VERBOSE(cdev, QED_MSG_SP, 2570 "Invalid protocol type = %d\n", type); 2571 return; 2572 } 2573 } 2574 2575 int qed_mfw_tlv_req(struct qed_hwfn *hwfn) 2576 { 2577 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 2578 "Scheduling slowpath task [Flag: %d]\n", 2579 QED_SLOWPATH_MFW_TLV_REQ); 2580 smp_mb__before_atomic(); 2581 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); 2582 smp_mb__after_atomic(); 2583 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); 2584 2585 return 0; 2586 } 2587 2588 static void 2589 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) 2590 { 2591 struct qed_common_cb_ops *op = cdev->protocol_ops.common; 2592 struct qed_eth_stats_common *p_common; 2593 struct qed_generic_tlvs gen_tlvs; 2594 struct qed_eth_stats stats; 2595 int i; 2596 2597 memset(&gen_tlvs, 0, sizeof(gen_tlvs)); 2598 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); 2599 2600 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) 2601 tlv->flags.ipv4_csum_offload = true; 2602 if (gen_tlvs.feat_flags & QED_TLV_LSO) 2603 tlv->flags.lso_supported = true; 2604 tlv->flags.b_set = true; 2605 2606 for (i = 0; i < QED_TLV_MAC_COUNT; i++) { 2607 if (is_valid_ether_addr(gen_tlvs.mac[i])) { 2608 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); 2609 tlv->mac_set[i] = true; 2610 } 2611 } 2612 2613 qed_get_vport_stats(cdev, &stats); 2614 p_common = &stats.common; 2615 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 2616 p_common->rx_bcast_pkts; 2617 tlv->rx_frames_set = true; 2618 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 2619 p_common->rx_bcast_bytes; 2620 tlv->rx_bytes_set = true; 2621 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 2622 p_common->tx_bcast_pkts; 2623 tlv->tx_frames_set = true; 2624 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 2625 p_common->tx_bcast_bytes; 2626 tlv->rx_bytes_set = true; 2627 } 2628 2629 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, 2630 union qed_mfw_tlv_data *tlv_buf) 2631 { 2632 struct qed_dev *cdev = hwfn->cdev; 2633 struct qed_common_cb_ops *ops; 2634 2635 ops = cdev->protocol_ops.common; 2636 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { 2637 DP_NOTICE(hwfn, "Can't collect TLV management info\n"); 2638 return -EINVAL; 2639 } 2640 2641 switch (type) { 2642 case QED_MFW_TLV_GENERIC: 2643 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); 2644 break; 2645 case QED_MFW_TLV_ETH: 2646 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); 2647 break; 2648 case QED_MFW_TLV_FCOE: 2649 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); 2650 break; 2651 case QED_MFW_TLV_ISCSI: 2652 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); 2653 break; 2654 default: 2655 break; 2656 } 2657 2658 return 0; 2659 } 2660