1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015-2017 QLogic Corporation 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and /or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33 #include <linux/stddef.h> 34 #include <linux/pci.h> 35 #include <linux/kernel.h> 36 #include <linux/slab.h> 37 #include <linux/delay.h> 38 #include <asm/byteorder.h> 39 #include <linux/dma-mapping.h> 40 #include <linux/string.h> 41 #include <linux/module.h> 42 #include <linux/interrupt.h> 43 #include <linux/workqueue.h> 44 #include <linux/ethtool.h> 45 #include <linux/etherdevice.h> 46 #include <linux/vmalloc.h> 47 #include <linux/crash_dump.h> 48 #include <linux/crc32.h> 49 #include <linux/qed/qed_if.h> 50 #include <linux/qed/qed_ll2_if.h> 51 52 #include "qed.h" 53 #include "qed_sriov.h" 54 #include "qed_sp.h" 55 #include "qed_dev_api.h" 56 #include "qed_ll2.h" 57 #include "qed_fcoe.h" 58 #include "qed_iscsi.h" 59 60 #include "qed_mcp.h" 61 #include "qed_hw.h" 62 #include "qed_selftest.h" 63 #include "qed_debug.h" 64 65 #define QED_ROCE_QPS (8192) 66 #define QED_ROCE_DPIS (8) 67 #define QED_RDMA_SRQS QED_ROCE_QPS 68 69 static char version[] = 70 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 71 72 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 73 MODULE_LICENSE("GPL"); 74 MODULE_VERSION(DRV_MODULE_VERSION); 75 76 #define FW_FILE_VERSION \ 77 __stringify(FW_MAJOR_VERSION) "." \ 78 __stringify(FW_MINOR_VERSION) "." \ 79 __stringify(FW_REVISION_VERSION) "." \ 80 __stringify(FW_ENGINEERING_VERSION) 81 82 #define QED_FW_FILE_NAME \ 83 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 84 85 MODULE_FIRMWARE(QED_FW_FILE_NAME); 86 87 static int __init qed_init(void) 88 { 89 pr_info("%s", version); 90 91 return 0; 92 } 93 94 static void __exit qed_cleanup(void) 95 { 96 pr_notice("qed_cleanup called\n"); 97 } 98 99 module_init(qed_init); 100 module_exit(qed_cleanup); 101 102 /* Check if the DMA controller on the machine can properly handle the DMA 103 * addressing required by the device. 104 */ 105 static int qed_set_coherency_mask(struct qed_dev *cdev) 106 { 107 struct device *dev = &cdev->pdev->dev; 108 109 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 110 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 111 DP_NOTICE(cdev, 112 "Can't request 64-bit consistent allocations\n"); 113 return -EIO; 114 } 115 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 116 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); 117 return -EIO; 118 } 119 120 return 0; 121 } 122 123 static void qed_free_pci(struct qed_dev *cdev) 124 { 125 struct pci_dev *pdev = cdev->pdev; 126 127 if (cdev->doorbells && cdev->db_size) 128 iounmap(cdev->doorbells); 129 if (cdev->regview) 130 iounmap(cdev->regview); 131 if (atomic_read(&pdev->enable_cnt) == 1) 132 pci_release_regions(pdev); 133 134 pci_disable_device(pdev); 135 } 136 137 #define PCI_REVISION_ID_ERROR_VAL 0xff 138 139 /* Performs PCI initializations as well as initializing PCI-related parameters 140 * in the device structrue. Returns 0 in case of success. 141 */ 142 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev) 143 { 144 u8 rev_id; 145 int rc; 146 147 cdev->pdev = pdev; 148 149 rc = pci_enable_device(pdev); 150 if (rc) { 151 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 152 goto err0; 153 } 154 155 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 156 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 157 rc = -EIO; 158 goto err1; 159 } 160 161 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 162 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 163 rc = -EIO; 164 goto err1; 165 } 166 167 if (atomic_read(&pdev->enable_cnt) == 1) { 168 rc = pci_request_regions(pdev, "qed"); 169 if (rc) { 170 DP_NOTICE(cdev, 171 "Failed to request PCI memory resources\n"); 172 goto err1; 173 } 174 pci_set_master(pdev); 175 pci_save_state(pdev); 176 } 177 178 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 179 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 180 DP_NOTICE(cdev, 181 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 182 rev_id); 183 rc = -ENODEV; 184 goto err2; 185 } 186 if (!pci_is_pcie(pdev)) { 187 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 188 rc = -EIO; 189 goto err2; 190 } 191 192 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 193 if (IS_PF(cdev) && !cdev->pci_params.pm_cap) 194 DP_NOTICE(cdev, "Cannot find power management capability\n"); 195 196 rc = qed_set_coherency_mask(cdev); 197 if (rc) 198 goto err2; 199 200 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 201 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 202 cdev->pci_params.irq = pdev->irq; 203 204 cdev->regview = pci_ioremap_bar(pdev, 0); 205 if (!cdev->regview) { 206 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 207 rc = -ENOMEM; 208 goto err2; 209 } 210 211 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 212 cdev->db_size = pci_resource_len(cdev->pdev, 2); 213 if (!cdev->db_size) { 214 if (IS_PF(cdev)) { 215 DP_NOTICE(cdev, "No Doorbell bar available\n"); 216 return -EINVAL; 217 } else { 218 return 0; 219 } 220 } 221 222 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 223 224 if (!cdev->doorbells) { 225 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 226 return -ENOMEM; 227 } 228 229 return 0; 230 231 err2: 232 pci_release_regions(pdev); 233 err1: 234 pci_disable_device(pdev); 235 err0: 236 return rc; 237 } 238 239 int qed_fill_dev_info(struct qed_dev *cdev, 240 struct qed_dev_info *dev_info) 241 { 242 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); 243 struct qed_hw_info *hw_info = &p_hwfn->hw_info; 244 struct qed_tunnel_info *tun = &cdev->tunnel; 245 struct qed_ptt *ptt; 246 247 memset(dev_info, 0, sizeof(struct qed_dev_info)); 248 249 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 250 tun->vxlan.b_mode_enabled) 251 dev_info->vxlan_enable = true; 252 253 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled && 254 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 255 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 256 dev_info->gre_enable = true; 257 258 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled && 259 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN && 260 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN) 261 dev_info->geneve_enable = true; 262 263 dev_info->num_hwfns = cdev->num_hwfns; 264 dev_info->pci_mem_start = cdev->pci_params.mem_start; 265 dev_info->pci_mem_end = cdev->pci_params.mem_end; 266 dev_info->pci_irq = cdev->pci_params.irq; 267 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn); 268 dev_info->dev_type = cdev->type; 269 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr); 270 271 if (IS_PF(cdev)) { 272 dev_info->fw_major = FW_MAJOR_VERSION; 273 dev_info->fw_minor = FW_MINOR_VERSION; 274 dev_info->fw_rev = FW_REVISION_VERSION; 275 dev_info->fw_eng = FW_ENGINEERING_VERSION; 276 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, 277 &cdev->mf_bits); 278 dev_info->tx_switching = true; 279 280 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) 281 dev_info->wol_support = true; 282 283 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id; 284 } else { 285 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major, 286 &dev_info->fw_minor, &dev_info->fw_rev, 287 &dev_info->fw_eng); 288 } 289 290 if (IS_PF(cdev)) { 291 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 292 if (ptt) { 293 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt, 294 &dev_info->mfw_rev, NULL); 295 296 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt, 297 &dev_info->mbi_version); 298 299 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 300 &dev_info->flash_size); 301 302 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 303 } 304 } else { 305 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL, 306 &dev_info->mfw_rev, NULL); 307 } 308 309 dev_info->mtu = hw_info->mtu; 310 311 return 0; 312 } 313 314 static void qed_free_cdev(struct qed_dev *cdev) 315 { 316 kfree((void *)cdev); 317 } 318 319 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 320 { 321 struct qed_dev *cdev; 322 323 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 324 if (!cdev) 325 return cdev; 326 327 qed_init_struct(cdev); 328 329 return cdev; 330 } 331 332 /* Sets the requested power state */ 333 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state) 334 { 335 if (!cdev) 336 return -ENODEV; 337 338 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 339 return 0; 340 } 341 342 /* probing */ 343 static struct qed_dev *qed_probe(struct pci_dev *pdev, 344 struct qed_probe_params *params) 345 { 346 struct qed_dev *cdev; 347 int rc; 348 349 cdev = qed_alloc_cdev(pdev); 350 if (!cdev) 351 goto err0; 352 353 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 354 cdev->protocol = params->protocol; 355 356 if (params->is_vf) 357 cdev->b_is_vf = true; 358 359 qed_init_dp(cdev, params->dp_module, params->dp_level); 360 361 rc = qed_init_pci(cdev, pdev); 362 if (rc) { 363 DP_ERR(cdev, "init pci failed\n"); 364 goto err1; 365 } 366 DP_INFO(cdev, "PCI init completed successfully\n"); 367 368 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 369 if (rc) { 370 DP_ERR(cdev, "hw prepare failed\n"); 371 goto err2; 372 } 373 374 DP_INFO(cdev, "qed_probe completed successfully\n"); 375 376 return cdev; 377 378 err2: 379 qed_free_pci(cdev); 380 err1: 381 qed_free_cdev(cdev); 382 err0: 383 return NULL; 384 } 385 386 static void qed_remove(struct qed_dev *cdev) 387 { 388 if (!cdev) 389 return; 390 391 qed_hw_remove(cdev); 392 393 qed_free_pci(cdev); 394 395 qed_set_power_state(cdev, PCI_D3hot); 396 397 qed_free_cdev(cdev); 398 } 399 400 static void qed_disable_msix(struct qed_dev *cdev) 401 { 402 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 403 pci_disable_msix(cdev->pdev); 404 kfree(cdev->int_params.msix_table); 405 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 406 pci_disable_msi(cdev->pdev); 407 } 408 409 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 410 } 411 412 static int qed_enable_msix(struct qed_dev *cdev, 413 struct qed_int_params *int_params) 414 { 415 int i, rc, cnt; 416 417 cnt = int_params->in.num_vectors; 418 419 for (i = 0; i < cnt; i++) 420 int_params->msix_table[i].entry = i; 421 422 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 423 int_params->in.min_msix_cnt, cnt); 424 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 425 (rc % cdev->num_hwfns)) { 426 pci_disable_msix(cdev->pdev); 427 428 /* If fastpath is initialized, we need at least one interrupt 429 * per hwfn [and the slow path interrupts]. New requested number 430 * should be a multiple of the number of hwfns. 431 */ 432 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 433 DP_NOTICE(cdev, 434 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 435 cnt, int_params->in.num_vectors); 436 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table, 437 cnt); 438 if (!rc) 439 rc = cnt; 440 } 441 442 if (rc > 0) { 443 /* MSI-x configuration was achieved */ 444 int_params->out.int_mode = QED_INT_MODE_MSIX; 445 int_params->out.num_vectors = rc; 446 rc = 0; 447 } else { 448 DP_NOTICE(cdev, 449 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 450 cnt, rc); 451 } 452 453 return rc; 454 } 455 456 /* This function outputs the int mode and the number of enabled msix vector */ 457 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 458 { 459 struct qed_int_params *int_params = &cdev->int_params; 460 struct msix_entry *tbl; 461 int rc = 0, cnt; 462 463 switch (int_params->in.int_mode) { 464 case QED_INT_MODE_MSIX: 465 /* Allocate MSIX table */ 466 cnt = int_params->in.num_vectors; 467 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 468 if (!int_params->msix_table) { 469 rc = -ENOMEM; 470 goto out; 471 } 472 473 /* Enable MSIX */ 474 rc = qed_enable_msix(cdev, int_params); 475 if (!rc) 476 goto out; 477 478 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 479 kfree(int_params->msix_table); 480 if (force_mode) 481 goto out; 482 /* Fallthrough */ 483 484 case QED_INT_MODE_MSI: 485 if (cdev->num_hwfns == 1) { 486 rc = pci_enable_msi(cdev->pdev); 487 if (!rc) { 488 int_params->out.int_mode = QED_INT_MODE_MSI; 489 goto out; 490 } 491 492 DP_NOTICE(cdev, "Failed to enable MSI\n"); 493 if (force_mode) 494 goto out; 495 } 496 /* Fallthrough */ 497 498 case QED_INT_MODE_INTA: 499 int_params->out.int_mode = QED_INT_MODE_INTA; 500 rc = 0; 501 goto out; 502 default: 503 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 504 int_params->in.int_mode); 505 rc = -EINVAL; 506 } 507 508 out: 509 if (!rc) 510 DP_INFO(cdev, "Using %s interrupts\n", 511 int_params->out.int_mode == QED_INT_MODE_INTA ? 512 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ? 513 "MSI" : "MSIX"); 514 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 515 516 return rc; 517 } 518 519 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 520 int index, void(*handler)(void *)) 521 { 522 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 523 int relative_idx = index / cdev->num_hwfns; 524 525 hwfn->simd_proto_handler[relative_idx].func = handler; 526 hwfn->simd_proto_handler[relative_idx].token = token; 527 } 528 529 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 530 { 531 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 532 int relative_idx = index / cdev->num_hwfns; 533 534 memset(&hwfn->simd_proto_handler[relative_idx], 0, 535 sizeof(struct qed_simd_fp_handler)); 536 } 537 538 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 539 { 540 tasklet_schedule((struct tasklet_struct *)tasklet); 541 return IRQ_HANDLED; 542 } 543 544 static irqreturn_t qed_single_int(int irq, void *dev_instance) 545 { 546 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 547 struct qed_hwfn *hwfn; 548 irqreturn_t rc = IRQ_NONE; 549 u64 status; 550 int i, j; 551 552 for (i = 0; i < cdev->num_hwfns; i++) { 553 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 554 555 if (!status) 556 continue; 557 558 hwfn = &cdev->hwfns[i]; 559 560 /* Slowpath interrupt */ 561 if (unlikely(status & 0x1)) { 562 tasklet_schedule(hwfn->sp_dpc); 563 status &= ~0x1; 564 rc = IRQ_HANDLED; 565 } 566 567 /* Fastpath interrupts */ 568 for (j = 0; j < 64; j++) { 569 if ((0x2ULL << j) & status) { 570 struct qed_simd_fp_handler *p_handler = 571 &hwfn->simd_proto_handler[j]; 572 573 if (p_handler->func) 574 p_handler->func(p_handler->token); 575 else 576 DP_NOTICE(hwfn, 577 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n", 578 j, status); 579 580 status &= ~(0x2ULL << j); 581 rc = IRQ_HANDLED; 582 } 583 } 584 585 if (unlikely(status)) 586 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 587 "got an unknown interrupt status 0x%llx\n", 588 status); 589 } 590 591 return rc; 592 } 593 594 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 595 { 596 struct qed_dev *cdev = hwfn->cdev; 597 u32 int_mode; 598 int rc = 0; 599 u8 id; 600 601 int_mode = cdev->int_params.out.int_mode; 602 if (int_mode == QED_INT_MODE_MSIX) { 603 id = hwfn->my_id; 604 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 605 id, cdev->pdev->bus->number, 606 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 607 rc = request_irq(cdev->int_params.msix_table[id].vector, 608 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); 609 } else { 610 unsigned long flags = 0; 611 612 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 613 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 614 PCI_FUNC(cdev->pdev->devfn)); 615 616 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 617 flags |= IRQF_SHARED; 618 619 rc = request_irq(cdev->pdev->irq, qed_single_int, 620 flags, cdev->name, cdev); 621 } 622 623 if (rc) 624 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc); 625 else 626 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 627 "Requested slowpath %s\n", 628 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ"); 629 630 return rc; 631 } 632 633 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn) 634 { 635 /* Calling the disable function will make sure that any 636 * currently-running function is completed. The following call to the 637 * enable function makes this sequence a flush-like operation. 638 */ 639 if (p_hwfn->b_sp_dpc_enabled) { 640 tasklet_disable(p_hwfn->sp_dpc); 641 tasklet_enable(p_hwfn->sp_dpc); 642 } 643 } 644 645 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn) 646 { 647 struct qed_dev *cdev = p_hwfn->cdev; 648 u8 id = p_hwfn->my_id; 649 u32 int_mode; 650 651 int_mode = cdev->int_params.out.int_mode; 652 if (int_mode == QED_INT_MODE_MSIX) 653 synchronize_irq(cdev->int_params.msix_table[id].vector); 654 else 655 synchronize_irq(cdev->pdev->irq); 656 657 qed_slowpath_tasklet_flush(p_hwfn); 658 } 659 660 static void qed_slowpath_irq_free(struct qed_dev *cdev) 661 { 662 int i; 663 664 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 665 for_each_hwfn(cdev, i) { 666 if (!cdev->hwfns[i].b_int_requested) 667 break; 668 synchronize_irq(cdev->int_params.msix_table[i].vector); 669 free_irq(cdev->int_params.msix_table[i].vector, 670 cdev->hwfns[i].sp_dpc); 671 } 672 } else { 673 if (QED_LEADING_HWFN(cdev)->b_int_requested) 674 free_irq(cdev->pdev->irq, cdev); 675 } 676 qed_int_disable_post_isr_release(cdev); 677 } 678 679 static int qed_nic_stop(struct qed_dev *cdev) 680 { 681 int i, rc; 682 683 rc = qed_hw_stop(cdev); 684 685 for (i = 0; i < cdev->num_hwfns; i++) { 686 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 687 688 if (p_hwfn->b_sp_dpc_enabled) { 689 tasklet_disable(p_hwfn->sp_dpc); 690 p_hwfn->b_sp_dpc_enabled = false; 691 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 692 "Disabled sp tasklet [hwfn %d] at %p\n", 693 i, p_hwfn->sp_dpc); 694 } 695 } 696 697 qed_dbg_pf_exit(cdev); 698 699 return rc; 700 } 701 702 static int qed_nic_setup(struct qed_dev *cdev) 703 { 704 int rc, i; 705 706 /* Determine if interface is going to require LL2 */ 707 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) { 708 for (i = 0; i < cdev->num_hwfns; i++) { 709 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 710 711 p_hwfn->using_ll2 = true; 712 } 713 } 714 715 rc = qed_resc_alloc(cdev); 716 if (rc) 717 return rc; 718 719 DP_INFO(cdev, "Allocated qed resources\n"); 720 721 qed_resc_setup(cdev); 722 723 return rc; 724 } 725 726 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 727 { 728 int limit = 0; 729 730 /* Mark the fastpath as free/used */ 731 cdev->int_params.fp_initialized = cnt ? true : false; 732 733 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 734 limit = cdev->num_hwfns * 63; 735 else if (cdev->int_params.fp_msix_cnt) 736 limit = cdev->int_params.fp_msix_cnt; 737 738 if (!limit) 739 return -ENOMEM; 740 741 return min_t(int, cnt, limit); 742 } 743 744 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 745 { 746 memset(info, 0, sizeof(struct qed_int_info)); 747 748 if (!cdev->int_params.fp_initialized) { 749 DP_INFO(cdev, 750 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 751 return -EINVAL; 752 } 753 754 /* Need to expose only MSI-X information; Single IRQ is handled solely 755 * by qed. 756 */ 757 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 758 int msix_base = cdev->int_params.fp_msix_base; 759 760 info->msix_cnt = cdev->int_params.fp_msix_cnt; 761 info->msix = &cdev->int_params.msix_table[msix_base]; 762 } 763 764 return 0; 765 } 766 767 static int qed_slowpath_setup_int(struct qed_dev *cdev, 768 enum qed_int_mode int_mode) 769 { 770 struct qed_sb_cnt_info sb_cnt_info; 771 int num_l2_queues = 0; 772 int rc; 773 int i; 774 775 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) { 776 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n"); 777 return -EINVAL; 778 } 779 780 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 781 cdev->int_params.in.int_mode = int_mode; 782 for_each_hwfn(cdev, i) { 783 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 784 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 785 cdev->int_params.in.num_vectors += sb_cnt_info.cnt; 786 cdev->int_params.in.num_vectors++; /* slowpath */ 787 } 788 789 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 790 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 791 792 if (is_kdump_kernel()) { 793 DP_INFO(cdev, 794 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n", 795 cdev->int_params.in.min_msix_cnt); 796 cdev->int_params.in.num_vectors = 797 cdev->int_params.in.min_msix_cnt; 798 } 799 800 rc = qed_set_int_mode(cdev, false); 801 if (rc) { 802 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 803 return rc; 804 } 805 806 cdev->int_params.fp_msix_base = cdev->num_hwfns; 807 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 808 cdev->num_hwfns; 809 810 if (!IS_ENABLED(CONFIG_QED_RDMA) || 811 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) 812 return 0; 813 814 for_each_hwfn(cdev, i) 815 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE); 816 817 DP_VERBOSE(cdev, QED_MSG_RDMA, 818 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n", 819 cdev->int_params.fp_msix_cnt, num_l2_queues); 820 821 if (cdev->int_params.fp_msix_cnt > num_l2_queues) { 822 cdev->int_params.rdma_msix_cnt = 823 (cdev->int_params.fp_msix_cnt - num_l2_queues) 824 / cdev->num_hwfns; 825 cdev->int_params.rdma_msix_base = 826 cdev->int_params.fp_msix_base + num_l2_queues; 827 cdev->int_params.fp_msix_cnt = num_l2_queues; 828 } else { 829 cdev->int_params.rdma_msix_cnt = 0; 830 } 831 832 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n", 833 cdev->int_params.rdma_msix_cnt, 834 cdev->int_params.rdma_msix_base); 835 836 return 0; 837 } 838 839 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev) 840 { 841 int rc; 842 843 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 844 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX; 845 846 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), 847 &cdev->int_params.in.num_vectors); 848 if (cdev->num_hwfns > 1) { 849 u8 vectors = 0; 850 851 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors); 852 cdev->int_params.in.num_vectors += vectors; 853 } 854 855 /* We want a minimum of one fastpath vector per vf hwfn */ 856 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns; 857 858 rc = qed_set_int_mode(cdev, true); 859 if (rc) 860 return rc; 861 862 cdev->int_params.fp_msix_base = 0; 863 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors; 864 865 return 0; 866 } 867 868 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 869 u8 *input_buf, u32 max_size, u8 *unzip_buf) 870 { 871 int rc; 872 873 p_hwfn->stream->next_in = input_buf; 874 p_hwfn->stream->avail_in = input_len; 875 p_hwfn->stream->next_out = unzip_buf; 876 p_hwfn->stream->avail_out = max_size; 877 878 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 879 880 if (rc != Z_OK) { 881 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 882 rc); 883 return 0; 884 } 885 886 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 887 zlib_inflateEnd(p_hwfn->stream); 888 889 if (rc != Z_OK && rc != Z_STREAM_END) { 890 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 891 p_hwfn->stream->msg, rc); 892 return 0; 893 } 894 895 return p_hwfn->stream->total_out / 4; 896 } 897 898 static int qed_alloc_stream_mem(struct qed_dev *cdev) 899 { 900 int i; 901 void *workspace; 902 903 for_each_hwfn(cdev, i) { 904 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 905 906 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 907 if (!p_hwfn->stream) 908 return -ENOMEM; 909 910 workspace = vzalloc(zlib_inflate_workspacesize()); 911 if (!workspace) 912 return -ENOMEM; 913 p_hwfn->stream->workspace = workspace; 914 } 915 916 return 0; 917 } 918 919 static void qed_free_stream_mem(struct qed_dev *cdev) 920 { 921 int i; 922 923 for_each_hwfn(cdev, i) { 924 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 925 926 if (!p_hwfn->stream) 927 return; 928 929 vfree(p_hwfn->stream->workspace); 930 kfree(p_hwfn->stream); 931 } 932 } 933 934 static void qed_update_pf_params(struct qed_dev *cdev, 935 struct qed_pf_params *params) 936 { 937 int i; 938 939 if (IS_ENABLED(CONFIG_QED_RDMA)) { 940 params->rdma_pf_params.num_qps = QED_ROCE_QPS; 941 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; 942 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS; 943 /* divide by 3 the MRs to avoid MF ILT overflow */ 944 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; 945 } 946 947 if (cdev->num_hwfns > 1 || IS_VF(cdev)) 948 params->eth_pf_params.num_arfs_filters = 0; 949 950 /* In case we might support RDMA, don't allow qede to be greedy 951 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp] 952 * per hwfn. 953 */ 954 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) { 955 u16 *num_cons; 956 957 num_cons = ¶ms->eth_pf_params.num_cons; 958 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS); 959 } 960 961 for (i = 0; i < cdev->num_hwfns; i++) { 962 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 963 964 p_hwfn->pf_params = *params; 965 } 966 } 967 968 static void qed_slowpath_wq_stop(struct qed_dev *cdev) 969 { 970 int i; 971 972 if (IS_VF(cdev)) 973 return; 974 975 for_each_hwfn(cdev, i) { 976 if (!cdev->hwfns[i].slowpath_wq) 977 continue; 978 979 flush_workqueue(cdev->hwfns[i].slowpath_wq); 980 destroy_workqueue(cdev->hwfns[i].slowpath_wq); 981 } 982 } 983 984 static void qed_slowpath_task(struct work_struct *work) 985 { 986 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn, 987 slowpath_task.work); 988 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 989 990 if (!ptt) { 991 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); 992 return; 993 } 994 995 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ, 996 &hwfn->slowpath_task_flags)) 997 qed_mfw_process_tlv_req(hwfn, ptt); 998 999 qed_ptt_release(hwfn, ptt); 1000 } 1001 1002 static int qed_slowpath_wq_start(struct qed_dev *cdev) 1003 { 1004 struct qed_hwfn *hwfn; 1005 char name[NAME_SIZE]; 1006 int i; 1007 1008 if (IS_VF(cdev)) 1009 return 0; 1010 1011 for_each_hwfn(cdev, i) { 1012 hwfn = &cdev->hwfns[i]; 1013 1014 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x", 1015 cdev->pdev->bus->number, 1016 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 1017 1018 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0); 1019 if (!hwfn->slowpath_wq) { 1020 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n"); 1021 return -ENOMEM; 1022 } 1023 1024 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); 1025 } 1026 1027 return 0; 1028 } 1029 1030 static int qed_slowpath_start(struct qed_dev *cdev, 1031 struct qed_slowpath_params *params) 1032 { 1033 struct qed_drv_load_params drv_load_params; 1034 struct qed_hw_init_params hw_init_params; 1035 struct qed_mcp_drv_version drv_version; 1036 struct qed_tunnel_info tunn_info; 1037 const u8 *data = NULL; 1038 struct qed_hwfn *hwfn; 1039 struct qed_ptt *p_ptt; 1040 int rc = -EINVAL; 1041 1042 if (qed_iov_wq_start(cdev)) 1043 goto err; 1044 1045 if (qed_slowpath_wq_start(cdev)) 1046 goto err; 1047 1048 if (IS_PF(cdev)) { 1049 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 1050 &cdev->pdev->dev); 1051 if (rc) { 1052 DP_NOTICE(cdev, 1053 "Failed to find fw file - /lib/firmware/%s\n", 1054 QED_FW_FILE_NAME); 1055 goto err; 1056 } 1057 1058 if (cdev->num_hwfns == 1) { 1059 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 1060 if (p_ptt) { 1061 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt; 1062 } else { 1063 DP_NOTICE(cdev, 1064 "Failed to acquire PTT for aRFS\n"); 1065 goto err; 1066 } 1067 } 1068 } 1069 1070 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS; 1071 rc = qed_nic_setup(cdev); 1072 if (rc) 1073 goto err; 1074 1075 if (IS_PF(cdev)) 1076 rc = qed_slowpath_setup_int(cdev, params->int_mode); 1077 else 1078 rc = qed_slowpath_vf_setup_int(cdev); 1079 if (rc) 1080 goto err1; 1081 1082 if (IS_PF(cdev)) { 1083 /* Allocate stream for unzipping */ 1084 rc = qed_alloc_stream_mem(cdev); 1085 if (rc) 1086 goto err2; 1087 1088 /* First Dword used to differentiate between various sources */ 1089 data = cdev->firmware->data + sizeof(u32); 1090 1091 qed_dbg_pf_init(cdev); 1092 } 1093 1094 /* Start the slowpath */ 1095 memset(&hw_init_params, 0, sizeof(hw_init_params)); 1096 memset(&tunn_info, 0, sizeof(tunn_info)); 1097 tunn_info.vxlan.b_mode_enabled = true; 1098 tunn_info.l2_gre.b_mode_enabled = true; 1099 tunn_info.ip_gre.b_mode_enabled = true; 1100 tunn_info.l2_geneve.b_mode_enabled = true; 1101 tunn_info.ip_geneve.b_mode_enabled = true; 1102 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1103 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1104 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1105 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1106 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN; 1107 hw_init_params.p_tunn = &tunn_info; 1108 hw_init_params.b_hw_start = true; 1109 hw_init_params.int_mode = cdev->int_params.out.int_mode; 1110 hw_init_params.allow_npar_tx_switch = true; 1111 hw_init_params.bin_fw_data = data; 1112 1113 memset(&drv_load_params, 0, sizeof(drv_load_params)); 1114 drv_load_params.is_crash_kernel = is_kdump_kernel(); 1115 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT; 1116 drv_load_params.avoid_eng_reset = false; 1117 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE; 1118 hw_init_params.p_drv_load_params = &drv_load_params; 1119 1120 rc = qed_hw_init(cdev, &hw_init_params); 1121 if (rc) 1122 goto err2; 1123 1124 DP_INFO(cdev, 1125 "HW initialization and function start completed successfully\n"); 1126 1127 if (IS_PF(cdev)) { 1128 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) | 1129 BIT(QED_MODE_L2GENEVE_TUNN) | 1130 BIT(QED_MODE_IPGENEVE_TUNN) | 1131 BIT(QED_MODE_L2GRE_TUNN) | 1132 BIT(QED_MODE_IPGRE_TUNN)); 1133 } 1134 1135 /* Allocate LL2 interface if needed */ 1136 if (QED_LEADING_HWFN(cdev)->using_ll2) { 1137 rc = qed_ll2_alloc_if(cdev); 1138 if (rc) 1139 goto err3; 1140 } 1141 if (IS_PF(cdev)) { 1142 hwfn = QED_LEADING_HWFN(cdev); 1143 drv_version.version = (params->drv_major << 24) | 1144 (params->drv_minor << 16) | 1145 (params->drv_rev << 8) | 1146 (params->drv_eng); 1147 strlcpy(drv_version.name, params->name, 1148 MCP_DRV_VER_STR_SIZE - 4); 1149 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 1150 &drv_version); 1151 if (rc) { 1152 DP_NOTICE(cdev, "Failed sending drv version command\n"); 1153 return rc; 1154 } 1155 } 1156 1157 qed_reset_vport_stats(cdev); 1158 1159 return 0; 1160 1161 err3: 1162 qed_hw_stop(cdev); 1163 err2: 1164 qed_hw_timers_stop_all(cdev); 1165 if (IS_PF(cdev)) 1166 qed_slowpath_irq_free(cdev); 1167 qed_free_stream_mem(cdev); 1168 qed_disable_msix(cdev); 1169 err1: 1170 qed_resc_free(cdev); 1171 err: 1172 if (IS_PF(cdev)) 1173 release_firmware(cdev->firmware); 1174 1175 if (IS_PF(cdev) && (cdev->num_hwfns == 1) && 1176 QED_LEADING_HWFN(cdev)->p_arfs_ptt) 1177 qed_ptt_release(QED_LEADING_HWFN(cdev), 1178 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1179 1180 qed_iov_wq_stop(cdev, false); 1181 1182 qed_slowpath_wq_stop(cdev); 1183 1184 return rc; 1185 } 1186 1187 static int qed_slowpath_stop(struct qed_dev *cdev) 1188 { 1189 if (!cdev) 1190 return -ENODEV; 1191 1192 qed_slowpath_wq_stop(cdev); 1193 1194 qed_ll2_dealloc_if(cdev); 1195 1196 if (IS_PF(cdev)) { 1197 if (cdev->num_hwfns == 1) 1198 qed_ptt_release(QED_LEADING_HWFN(cdev), 1199 QED_LEADING_HWFN(cdev)->p_arfs_ptt); 1200 qed_free_stream_mem(cdev); 1201 if (IS_QED_ETH_IF(cdev)) 1202 qed_sriov_disable(cdev, true); 1203 } 1204 1205 qed_nic_stop(cdev); 1206 1207 if (IS_PF(cdev)) 1208 qed_slowpath_irq_free(cdev); 1209 1210 qed_disable_msix(cdev); 1211 1212 qed_resc_free(cdev); 1213 1214 qed_iov_wq_stop(cdev, true); 1215 1216 if (IS_PF(cdev)) 1217 release_firmware(cdev->firmware); 1218 1219 return 0; 1220 } 1221 1222 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE]) 1223 { 1224 int i; 1225 1226 memcpy(cdev->name, name, NAME_SIZE); 1227 for_each_hwfn(cdev, i) 1228 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 1229 } 1230 1231 static u32 qed_sb_init(struct qed_dev *cdev, 1232 struct qed_sb_info *sb_info, 1233 void *sb_virt_addr, 1234 dma_addr_t sb_phy_addr, u16 sb_id, 1235 enum qed_sb_type type) 1236 { 1237 struct qed_hwfn *p_hwfn; 1238 struct qed_ptt *p_ptt; 1239 int hwfn_index; 1240 u16 rel_sb_id; 1241 u8 n_hwfns; 1242 u32 rc; 1243 1244 /* RoCE uses single engine and CMT uses two engines. When using both 1245 * we force only a single engine. Storage uses only engine 0 too. 1246 */ 1247 if (type == QED_SB_TYPE_L2_QUEUE) 1248 n_hwfns = cdev->num_hwfns; 1249 else 1250 n_hwfns = 1; 1251 1252 hwfn_index = sb_id % n_hwfns; 1253 p_hwfn = &cdev->hwfns[hwfn_index]; 1254 rel_sb_id = sb_id / n_hwfns; 1255 1256 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1257 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1258 hwfn_index, rel_sb_id, sb_id); 1259 1260 if (IS_PF(p_hwfn->cdev)) { 1261 p_ptt = qed_ptt_acquire(p_hwfn); 1262 if (!p_ptt) 1263 return -EBUSY; 1264 1265 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr, 1266 sb_phy_addr, rel_sb_id); 1267 qed_ptt_release(p_hwfn, p_ptt); 1268 } else { 1269 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr, 1270 sb_phy_addr, rel_sb_id); 1271 } 1272 1273 return rc; 1274 } 1275 1276 static u32 qed_sb_release(struct qed_dev *cdev, 1277 struct qed_sb_info *sb_info, u16 sb_id) 1278 { 1279 struct qed_hwfn *p_hwfn; 1280 int hwfn_index; 1281 u16 rel_sb_id; 1282 u32 rc; 1283 1284 hwfn_index = sb_id % cdev->num_hwfns; 1285 p_hwfn = &cdev->hwfns[hwfn_index]; 1286 rel_sb_id = sb_id / cdev->num_hwfns; 1287 1288 DP_VERBOSE(cdev, NETIF_MSG_INTR, 1289 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 1290 hwfn_index, rel_sb_id, sb_id); 1291 1292 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 1293 1294 return rc; 1295 } 1296 1297 static bool qed_can_link_change(struct qed_dev *cdev) 1298 { 1299 return true; 1300 } 1301 1302 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params) 1303 { 1304 struct qed_hwfn *hwfn; 1305 struct qed_mcp_link_params *link_params; 1306 struct qed_ptt *ptt; 1307 int rc; 1308 1309 if (!cdev) 1310 return -ENODEV; 1311 1312 /* The link should be set only once per PF */ 1313 hwfn = &cdev->hwfns[0]; 1314 1315 /* When VF wants to set link, force it to read the bulletin instead. 1316 * This mimics the PF behavior, where a noitification [both immediate 1317 * and possible later] would be generated when changing properties. 1318 */ 1319 if (IS_VF(cdev)) { 1320 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG); 1321 return 0; 1322 } 1323 1324 ptt = qed_ptt_acquire(hwfn); 1325 if (!ptt) 1326 return -EBUSY; 1327 1328 link_params = qed_mcp_get_link_params(hwfn); 1329 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 1330 link_params->speed.autoneg = params->autoneg; 1331 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 1332 link_params->speed.advertised_speeds = 0; 1333 if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) || 1334 (params->adv_speeds & QED_LM_1000baseT_Full_BIT)) 1335 link_params->speed.advertised_speeds |= 1336 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 1337 if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT) 1338 link_params->speed.advertised_speeds |= 1339 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 1340 if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT) 1341 link_params->speed.advertised_speeds |= 1342 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G; 1343 if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT) 1344 link_params->speed.advertised_speeds |= 1345 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G; 1346 if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT) 1347 link_params->speed.advertised_speeds |= 1348 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 1349 if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT) 1350 link_params->speed.advertised_speeds |= 1351 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 1352 if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT) 1353 link_params->speed.advertised_speeds |= 1354 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G; 1355 } 1356 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 1357 link_params->speed.forced_speed = params->forced_speed; 1358 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { 1359 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) 1360 link_params->pause.autoneg = true; 1361 else 1362 link_params->pause.autoneg = false; 1363 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) 1364 link_params->pause.forced_rx = true; 1365 else 1366 link_params->pause.forced_rx = false; 1367 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) 1368 link_params->pause.forced_tx = true; 1369 else 1370 link_params->pause.forced_tx = false; 1371 } 1372 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) { 1373 switch (params->loopback_mode) { 1374 case QED_LINK_LOOPBACK_INT_PHY: 1375 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY; 1376 break; 1377 case QED_LINK_LOOPBACK_EXT_PHY: 1378 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY; 1379 break; 1380 case QED_LINK_LOOPBACK_EXT: 1381 link_params->loopback_mode = ETH_LOOPBACK_EXT; 1382 break; 1383 case QED_LINK_LOOPBACK_MAC: 1384 link_params->loopback_mode = ETH_LOOPBACK_MAC; 1385 break; 1386 default: 1387 link_params->loopback_mode = ETH_LOOPBACK_NONE; 1388 break; 1389 } 1390 } 1391 1392 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG) 1393 memcpy(&link_params->eee, ¶ms->eee, 1394 sizeof(link_params->eee)); 1395 1396 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 1397 1398 qed_ptt_release(hwfn, ptt); 1399 1400 return rc; 1401 } 1402 1403 static int qed_get_port_type(u32 media_type) 1404 { 1405 int port_type; 1406 1407 switch (media_type) { 1408 case MEDIA_SFPP_10G_FIBER: 1409 case MEDIA_SFP_1G_FIBER: 1410 case MEDIA_XFP_FIBER: 1411 case MEDIA_MODULE_FIBER: 1412 case MEDIA_KR: 1413 port_type = PORT_FIBRE; 1414 break; 1415 case MEDIA_DA_TWINAX: 1416 port_type = PORT_DA; 1417 break; 1418 case MEDIA_BASE_T: 1419 port_type = PORT_TP; 1420 break; 1421 case MEDIA_NOT_PRESENT: 1422 port_type = PORT_NONE; 1423 break; 1424 case MEDIA_UNSPECIFIED: 1425 default: 1426 port_type = PORT_OTHER; 1427 break; 1428 } 1429 return port_type; 1430 } 1431 1432 static int qed_get_link_data(struct qed_hwfn *hwfn, 1433 struct qed_mcp_link_params *params, 1434 struct qed_mcp_link_state *link, 1435 struct qed_mcp_link_capabilities *link_caps) 1436 { 1437 void *p; 1438 1439 if (!IS_PF(hwfn->cdev)) { 1440 qed_vf_get_link_params(hwfn, params); 1441 qed_vf_get_link_state(hwfn, link); 1442 qed_vf_get_link_caps(hwfn, link_caps); 1443 1444 return 0; 1445 } 1446 1447 p = qed_mcp_get_link_params(hwfn); 1448 if (!p) 1449 return -ENXIO; 1450 memcpy(params, p, sizeof(*params)); 1451 1452 p = qed_mcp_get_link_state(hwfn); 1453 if (!p) 1454 return -ENXIO; 1455 memcpy(link, p, sizeof(*link)); 1456 1457 p = qed_mcp_get_link_capabilities(hwfn); 1458 if (!p) 1459 return -ENXIO; 1460 memcpy(link_caps, p, sizeof(*link_caps)); 1461 1462 return 0; 1463 } 1464 1465 static void qed_fill_link(struct qed_hwfn *hwfn, 1466 struct qed_link_output *if_link) 1467 { 1468 struct qed_mcp_link_params params; 1469 struct qed_mcp_link_state link; 1470 struct qed_mcp_link_capabilities link_caps; 1471 u32 media_type; 1472 1473 memset(if_link, 0, sizeof(*if_link)); 1474 1475 /* Prepare source inputs */ 1476 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { 1477 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); 1478 return; 1479 } 1480 1481 /* Set the link parameters to pass to protocol driver */ 1482 if (link.link_up) 1483 if_link->link_up = true; 1484 1485 /* TODO - at the moment assume supported and advertised speed equal */ 1486 if_link->supported_caps = QED_LM_FIBRE_BIT; 1487 if (link_caps.default_speed_autoneg) 1488 if_link->supported_caps |= QED_LM_Autoneg_BIT; 1489 if (params.pause.autoneg || 1490 (params.pause.forced_rx && params.pause.forced_tx)) 1491 if_link->supported_caps |= QED_LM_Asym_Pause_BIT; 1492 if (params.pause.autoneg || params.pause.forced_rx || 1493 params.pause.forced_tx) 1494 if_link->supported_caps |= QED_LM_Pause_BIT; 1495 1496 if_link->advertised_caps = if_link->supported_caps; 1497 if (params.speed.autoneg) 1498 if_link->advertised_caps |= QED_LM_Autoneg_BIT; 1499 else 1500 if_link->advertised_caps &= ~QED_LM_Autoneg_BIT; 1501 if (params.speed.advertised_speeds & 1502 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1503 if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT | 1504 QED_LM_1000baseT_Full_BIT; 1505 if (params.speed.advertised_speeds & 1506 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1507 if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT; 1508 if (params.speed.advertised_speeds & 1509 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1510 if_link->advertised_caps |= QED_LM_20000baseKR2_Full_BIT; 1511 if (params.speed.advertised_speeds & 1512 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1513 if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT; 1514 if (params.speed.advertised_speeds & 1515 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1516 if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT; 1517 if (params.speed.advertised_speeds & 1518 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1519 if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT; 1520 if (params.speed.advertised_speeds & 1521 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1522 if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT; 1523 1524 if (link_caps.speed_capabilities & 1525 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1526 if_link->supported_caps |= QED_LM_1000baseT_Half_BIT | 1527 QED_LM_1000baseT_Full_BIT; 1528 if (link_caps.speed_capabilities & 1529 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1530 if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT; 1531 if (link_caps.speed_capabilities & 1532 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G) 1533 if_link->supported_caps |= QED_LM_20000baseKR2_Full_BIT; 1534 if (link_caps.speed_capabilities & 1535 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 1536 if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT; 1537 if (link_caps.speed_capabilities & 1538 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1539 if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT; 1540 if (link_caps.speed_capabilities & 1541 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1542 if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT; 1543 if (link_caps.speed_capabilities & 1544 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 1545 if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT; 1546 1547 if (link.link_up) 1548 if_link->speed = link.speed; 1549 1550 /* TODO - fill duplex properly */ 1551 if_link->duplex = DUPLEX_FULL; 1552 qed_mcp_get_media_type(hwfn->cdev, &media_type); 1553 if_link->port = qed_get_port_type(media_type); 1554 1555 if_link->autoneg = params.speed.autoneg; 1556 1557 if (params.pause.autoneg) 1558 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1559 if (params.pause.forced_rx) 1560 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1561 if (params.pause.forced_tx) 1562 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1563 1564 /* Link partner capabilities */ 1565 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD) 1566 if_link->lp_caps |= QED_LM_1000baseT_Half_BIT; 1567 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD) 1568 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT; 1569 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G) 1570 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT; 1571 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G) 1572 if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT; 1573 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G) 1574 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT; 1575 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G) 1576 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT; 1577 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G) 1578 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT; 1579 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G) 1580 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT; 1581 1582 if (link.an_complete) 1583 if_link->lp_caps |= QED_LM_Autoneg_BIT; 1584 1585 if (link.partner_adv_pause) 1586 if_link->lp_caps |= QED_LM_Pause_BIT; 1587 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 1588 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 1589 if_link->lp_caps |= QED_LM_Asym_Pause_BIT; 1590 1591 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) { 1592 if_link->eee_supported = false; 1593 } else { 1594 if_link->eee_supported = true; 1595 if_link->eee_active = link.eee_active; 1596 if_link->sup_caps = link_caps.eee_speed_caps; 1597 /* MFW clears adv_caps on eee disable; use configured value */ 1598 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps : 1599 params.eee.adv_caps; 1600 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps; 1601 if_link->eee.enable = params.eee.enable; 1602 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable; 1603 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer; 1604 } 1605 } 1606 1607 static void qed_get_current_link(struct qed_dev *cdev, 1608 struct qed_link_output *if_link) 1609 { 1610 int i; 1611 1612 qed_fill_link(&cdev->hwfns[0], if_link); 1613 1614 for_each_hwfn(cdev, i) 1615 qed_inform_vf_link_state(&cdev->hwfns[i]); 1616 } 1617 1618 void qed_link_update(struct qed_hwfn *hwfn) 1619 { 1620 void *cookie = hwfn->cdev->ops_cookie; 1621 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 1622 struct qed_link_output if_link; 1623 1624 qed_fill_link(hwfn, &if_link); 1625 qed_inform_vf_link_state(hwfn); 1626 1627 if (IS_LEAD_HWFN(hwfn) && cookie) 1628 op->link_update(cookie, &if_link); 1629 } 1630 1631 static int qed_drain(struct qed_dev *cdev) 1632 { 1633 struct qed_hwfn *hwfn; 1634 struct qed_ptt *ptt; 1635 int i, rc; 1636 1637 if (IS_VF(cdev)) 1638 return 0; 1639 1640 for_each_hwfn(cdev, i) { 1641 hwfn = &cdev->hwfns[i]; 1642 ptt = qed_ptt_acquire(hwfn); 1643 if (!ptt) { 1644 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 1645 return -EBUSY; 1646 } 1647 rc = qed_mcp_drain(hwfn, ptt); 1648 if (rc) 1649 return rc; 1650 qed_ptt_release(hwfn, ptt); 1651 } 1652 1653 return 0; 1654 } 1655 1656 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev, 1657 struct qed_nvm_image_att *nvm_image, 1658 u32 *crc) 1659 { 1660 u8 *buf = NULL; 1661 int rc, j; 1662 u32 val; 1663 1664 /* Allocate a buffer for holding the nvram image */ 1665 buf = kzalloc(nvm_image->length, GFP_KERNEL); 1666 if (!buf) 1667 return -ENOMEM; 1668 1669 /* Read image into buffer */ 1670 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr, 1671 buf, nvm_image->length); 1672 if (rc) { 1673 DP_ERR(cdev, "Failed reading image from nvm\n"); 1674 goto out; 1675 } 1676 1677 /* Convert the buffer into big-endian format (excluding the 1678 * closing 4 bytes of CRC). 1679 */ 1680 for (j = 0; j < nvm_image->length - 4; j += 4) { 1681 val = cpu_to_be32(*(u32 *)&buf[j]); 1682 *(u32 *)&buf[j] = val; 1683 } 1684 1685 /* Calc CRC for the "actual" image buffer, i.e. not including 1686 * the last 4 CRC bytes. 1687 */ 1688 *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4))); 1689 1690 out: 1691 kfree(buf); 1692 1693 return rc; 1694 } 1695 1696 /* Binary file format - 1697 * /----------------------------------------------------------------------\ 1698 * 0B | 0x4 [command index] | 1699 * 4B | image_type | Options | Number of register settings | 1700 * 8B | Value | 1701 * 12B | Mask | 1702 * 16B | Offset | 1703 * \----------------------------------------------------------------------/ 1704 * There can be several Value-Mask-Offset sets as specified by 'Number of...'. 1705 * Options - 0'b - Calculate & Update CRC for image 1706 */ 1707 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data, 1708 bool *check_resp) 1709 { 1710 struct qed_nvm_image_att nvm_image; 1711 struct qed_hwfn *p_hwfn; 1712 bool is_crc = false; 1713 u32 image_type; 1714 int rc = 0, i; 1715 u16 len; 1716 1717 *data += 4; 1718 image_type = **data; 1719 p_hwfn = QED_LEADING_HWFN(cdev); 1720 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 1721 if (image_type == p_hwfn->nvm_info.image_att[i].image_type) 1722 break; 1723 if (i == p_hwfn->nvm_info.num_images) { 1724 DP_ERR(cdev, "Failed to find nvram image of type %08x\n", 1725 image_type); 1726 return -ENOENT; 1727 } 1728 1729 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr; 1730 nvm_image.length = p_hwfn->nvm_info.image_att[i].len; 1731 1732 DP_VERBOSE(cdev, NETIF_MSG_DRV, 1733 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n", 1734 **data, image_type, nvm_image.start_addr, 1735 nvm_image.start_addr + nvm_image.length - 1); 1736 (*data)++; 1737 is_crc = !!(**data & BIT(0)); 1738 (*data)++; 1739 len = *((u16 *)*data); 1740 *data += 2; 1741 if (is_crc) { 1742 u32 crc = 0; 1743 1744 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc); 1745 if (rc) { 1746 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc); 1747 goto exit; 1748 } 1749 1750 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 1751 (nvm_image.start_addr + 1752 nvm_image.length - 4), (u8 *)&crc, 4); 1753 if (rc) 1754 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n", 1755 nvm_image.start_addr + nvm_image.length - 4, rc); 1756 goto exit; 1757 } 1758 1759 /* Iterate over the values for setting */ 1760 while (len) { 1761 u32 offset, mask, value, cur_value; 1762 u8 buf[4]; 1763 1764 value = *((u32 *)*data); 1765 *data += 4; 1766 mask = *((u32 *)*data); 1767 *data += 4; 1768 offset = *((u32 *)*data); 1769 *data += 4; 1770 1771 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf, 1772 4); 1773 if (rc) { 1774 DP_ERR(cdev, "Failed reading from %08x\n", 1775 nvm_image.start_addr + offset); 1776 goto exit; 1777 } 1778 1779 cur_value = le32_to_cpu(*((__le32 *)buf)); 1780 DP_VERBOSE(cdev, NETIF_MSG_DRV, 1781 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n", 1782 nvm_image.start_addr + offset, cur_value, 1783 (cur_value & ~mask) | (value & mask), value, mask); 1784 value = (value & mask) | (cur_value & ~mask); 1785 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM, 1786 nvm_image.start_addr + offset, 1787 (u8 *)&value, 4); 1788 if (rc) { 1789 DP_ERR(cdev, "Failed writing to %08x\n", 1790 nvm_image.start_addr + offset); 1791 goto exit; 1792 } 1793 1794 len--; 1795 } 1796 exit: 1797 return rc; 1798 } 1799 1800 /* Binary file format - 1801 * /----------------------------------------------------------------------\ 1802 * 0B | 0x3 [command index] | 1803 * 4B | b'0: check_response? | b'1-31 reserved | 1804 * 8B | File-type | reserved | 1805 * \----------------------------------------------------------------------/ 1806 * Start a new file of the provided type 1807 */ 1808 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, 1809 const u8 **data, bool *check_resp) 1810 { 1811 int rc; 1812 1813 *data += 4; 1814 *check_resp = !!(**data & BIT(0)); 1815 *data += 4; 1816 1817 DP_VERBOSE(cdev, NETIF_MSG_DRV, 1818 "About to start a new file of type %02x\n", **data); 1819 rc = qed_mcp_nvm_put_file_begin(cdev, **data); 1820 *data += 4; 1821 1822 return rc; 1823 } 1824 1825 /* Binary file format - 1826 * /----------------------------------------------------------------------\ 1827 * 0B | 0x2 [command index] | 1828 * 4B | Length in bytes | 1829 * 8B | b'0: check_response? | b'1-31 reserved | 1830 * 12B | Offset in bytes | 1831 * 16B | Data ... | 1832 * \----------------------------------------------------------------------/ 1833 * Write data as part of a file that was previously started. Data should be 1834 * of length equal to that provided in the message 1835 */ 1836 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev, 1837 const u8 **data, bool *check_resp) 1838 { 1839 u32 offset, len; 1840 int rc; 1841 1842 *data += 4; 1843 len = *((u32 *)(*data)); 1844 *data += 4; 1845 *check_resp = !!(**data & BIT(0)); 1846 *data += 4; 1847 offset = *((u32 *)(*data)); 1848 *data += 4; 1849 1850 DP_VERBOSE(cdev, NETIF_MSG_DRV, 1851 "About to write File-data: %08x bytes to offset %08x\n", 1852 len, offset); 1853 1854 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset, 1855 (char *)(*data), len); 1856 *data += len; 1857 1858 return rc; 1859 } 1860 1861 /* Binary file format [General header] - 1862 * /----------------------------------------------------------------------\ 1863 * 0B | QED_NVM_SIGNATURE | 1864 * 4B | Length in bytes | 1865 * 8B | Highest command in this batchfile | Reserved | 1866 * \----------------------------------------------------------------------/ 1867 */ 1868 static int qed_nvm_flash_image_validate(struct qed_dev *cdev, 1869 const struct firmware *image, 1870 const u8 **data) 1871 { 1872 u32 signature, len; 1873 1874 /* Check minimum size */ 1875 if (image->size < 12) { 1876 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size); 1877 return -EINVAL; 1878 } 1879 1880 /* Check signature */ 1881 signature = *((u32 *)(*data)); 1882 if (signature != QED_NVM_SIGNATURE) { 1883 DP_ERR(cdev, "Wrong signature '%08x'\n", signature); 1884 return -EINVAL; 1885 } 1886 1887 *data += 4; 1888 /* Validate internal size equals the image-size */ 1889 len = *((u32 *)(*data)); 1890 if (len != image->size) { 1891 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n", 1892 len, (u32)image->size); 1893 return -EINVAL; 1894 } 1895 1896 *data += 4; 1897 /* Make sure driver familiar with all commands necessary for this */ 1898 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) { 1899 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n", 1900 *((u16 *)(*data))); 1901 return -EINVAL; 1902 } 1903 1904 *data += 4; 1905 1906 return 0; 1907 } 1908 1909 static int qed_nvm_flash(struct qed_dev *cdev, const char *name) 1910 { 1911 const struct firmware *image; 1912 const u8 *data, *data_end; 1913 u32 cmd_type; 1914 int rc; 1915 1916 rc = request_firmware(&image, name, &cdev->pdev->dev); 1917 if (rc) { 1918 DP_ERR(cdev, "Failed to find '%s'\n", name); 1919 return rc; 1920 } 1921 1922 DP_VERBOSE(cdev, NETIF_MSG_DRV, 1923 "Flashing '%s' - firmware's data at %p, size is %08x\n", 1924 name, image->data, (u32)image->size); 1925 data = image->data; 1926 data_end = data + image->size; 1927 1928 rc = qed_nvm_flash_image_validate(cdev, image, &data); 1929 if (rc) 1930 goto exit; 1931 1932 while (data < data_end) { 1933 bool check_resp = false; 1934 1935 /* Parse the actual command */ 1936 cmd_type = *((u32 *)data); 1937 switch (cmd_type) { 1938 case QED_NVM_FLASH_CMD_FILE_DATA: 1939 rc = qed_nvm_flash_image_file_data(cdev, &data, 1940 &check_resp); 1941 break; 1942 case QED_NVM_FLASH_CMD_FILE_START: 1943 rc = qed_nvm_flash_image_file_start(cdev, &data, 1944 &check_resp); 1945 break; 1946 case QED_NVM_FLASH_CMD_NVM_CHANGE: 1947 rc = qed_nvm_flash_image_access(cdev, &data, 1948 &check_resp); 1949 break; 1950 default: 1951 DP_ERR(cdev, "Unknown command %08x\n", cmd_type); 1952 rc = -EINVAL; 1953 goto exit; 1954 } 1955 1956 if (rc) { 1957 DP_ERR(cdev, "Command %08x failed\n", cmd_type); 1958 goto exit; 1959 } 1960 1961 /* Check response if needed */ 1962 if (check_resp) { 1963 u32 mcp_response = 0; 1964 1965 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) { 1966 DP_ERR(cdev, "Failed getting MCP response\n"); 1967 rc = -EINVAL; 1968 goto exit; 1969 } 1970 1971 switch (mcp_response & FW_MSG_CODE_MASK) { 1972 case FW_MSG_CODE_OK: 1973 case FW_MSG_CODE_NVM_OK: 1974 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK: 1975 case FW_MSG_CODE_PHY_OK: 1976 break; 1977 default: 1978 DP_ERR(cdev, "MFW returns error: %08x\n", 1979 mcp_response); 1980 rc = -EINVAL; 1981 goto exit; 1982 } 1983 } 1984 } 1985 1986 exit: 1987 release_firmware(image); 1988 1989 return rc; 1990 } 1991 1992 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type, 1993 u8 *buf, u16 len) 1994 { 1995 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1996 1997 return qed_mcp_get_nvm_image(hwfn, type, buf, len); 1998 } 1999 2000 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, 2001 void *handle) 2002 { 2003 return qed_set_queue_coalesce(rx_coal, tx_coal, handle); 2004 } 2005 2006 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 2007 { 2008 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2009 struct qed_ptt *ptt; 2010 int status = 0; 2011 2012 ptt = qed_ptt_acquire(hwfn); 2013 if (!ptt) 2014 return -EAGAIN; 2015 2016 status = qed_mcp_set_led(hwfn, ptt, mode); 2017 2018 qed_ptt_release(hwfn, ptt); 2019 2020 return status; 2021 } 2022 2023 static int qed_update_wol(struct qed_dev *cdev, bool enabled) 2024 { 2025 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2026 struct qed_ptt *ptt; 2027 int rc = 0; 2028 2029 if (IS_VF(cdev)) 2030 return 0; 2031 2032 ptt = qed_ptt_acquire(hwfn); 2033 if (!ptt) 2034 return -EAGAIN; 2035 2036 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED 2037 : QED_OV_WOL_DISABLED); 2038 if (rc) 2039 goto out; 2040 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2041 2042 out: 2043 qed_ptt_release(hwfn, ptt); 2044 return rc; 2045 } 2046 2047 static int qed_update_drv_state(struct qed_dev *cdev, bool active) 2048 { 2049 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2050 struct qed_ptt *ptt; 2051 int status = 0; 2052 2053 if (IS_VF(cdev)) 2054 return 0; 2055 2056 ptt = qed_ptt_acquire(hwfn); 2057 if (!ptt) 2058 return -EAGAIN; 2059 2060 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ? 2061 QED_OV_DRIVER_STATE_ACTIVE : 2062 QED_OV_DRIVER_STATE_DISABLED); 2063 2064 qed_ptt_release(hwfn, ptt); 2065 2066 return status; 2067 } 2068 2069 static int qed_update_mac(struct qed_dev *cdev, u8 *mac) 2070 { 2071 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2072 struct qed_ptt *ptt; 2073 int status = 0; 2074 2075 if (IS_VF(cdev)) 2076 return 0; 2077 2078 ptt = qed_ptt_acquire(hwfn); 2079 if (!ptt) 2080 return -EAGAIN; 2081 2082 status = qed_mcp_ov_update_mac(hwfn, ptt, mac); 2083 if (status) 2084 goto out; 2085 2086 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2087 2088 out: 2089 qed_ptt_release(hwfn, ptt); 2090 return status; 2091 } 2092 2093 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu) 2094 { 2095 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2096 struct qed_ptt *ptt; 2097 int status = 0; 2098 2099 if (IS_VF(cdev)) 2100 return 0; 2101 2102 ptt = qed_ptt_acquire(hwfn); 2103 if (!ptt) 2104 return -EAGAIN; 2105 2106 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu); 2107 if (status) 2108 goto out; 2109 2110 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV); 2111 2112 out: 2113 qed_ptt_release(hwfn, ptt); 2114 return status; 2115 } 2116 2117 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf, 2118 u8 dev_addr, u32 offset, u32 len) 2119 { 2120 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 2121 struct qed_ptt *ptt; 2122 int rc = 0; 2123 2124 if (IS_VF(cdev)) 2125 return 0; 2126 2127 ptt = qed_ptt_acquire(hwfn); 2128 if (!ptt) 2129 return -EAGAIN; 2130 2131 rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr, 2132 offset, len, buf); 2133 2134 qed_ptt_release(hwfn, ptt); 2135 2136 return rc; 2137 } 2138 2139 static struct qed_selftest_ops qed_selftest_ops_pass = { 2140 .selftest_memory = &qed_selftest_memory, 2141 .selftest_interrupt = &qed_selftest_interrupt, 2142 .selftest_register = &qed_selftest_register, 2143 .selftest_clock = &qed_selftest_clock, 2144 .selftest_nvram = &qed_selftest_nvram, 2145 }; 2146 2147 const struct qed_common_ops qed_common_ops_pass = { 2148 .selftest = &qed_selftest_ops_pass, 2149 .probe = &qed_probe, 2150 .remove = &qed_remove, 2151 .set_power_state = &qed_set_power_state, 2152 .set_name = &qed_set_name, 2153 .update_pf_params = &qed_update_pf_params, 2154 .slowpath_start = &qed_slowpath_start, 2155 .slowpath_stop = &qed_slowpath_stop, 2156 .set_fp_int = &qed_set_int_fp, 2157 .get_fp_int = &qed_get_int_fp, 2158 .sb_init = &qed_sb_init, 2159 .sb_release = &qed_sb_release, 2160 .simd_handler_config = &qed_simd_handler_config, 2161 .simd_handler_clean = &qed_simd_handler_clean, 2162 .dbg_grc = &qed_dbg_grc, 2163 .dbg_grc_size = &qed_dbg_grc_size, 2164 .can_link_change = &qed_can_link_change, 2165 .set_link = &qed_set_link, 2166 .get_link = &qed_get_current_link, 2167 .drain = &qed_drain, 2168 .update_msglvl = &qed_init_dp, 2169 .dbg_all_data = &qed_dbg_all_data, 2170 .dbg_all_data_size = &qed_dbg_all_data_size, 2171 .chain_alloc = &qed_chain_alloc, 2172 .chain_free = &qed_chain_free, 2173 .nvm_flash = &qed_nvm_flash, 2174 .nvm_get_image = &qed_nvm_get_image, 2175 .set_coalesce = &qed_set_coalesce, 2176 .set_led = &qed_set_led, 2177 .update_drv_state = &qed_update_drv_state, 2178 .update_mac = &qed_update_mac, 2179 .update_mtu = &qed_update_mtu, 2180 .update_wol = &qed_update_wol, 2181 .read_module_eeprom = &qed_read_module_eeprom, 2182 }; 2183 2184 void qed_get_protocol_stats(struct qed_dev *cdev, 2185 enum qed_mcp_protocol_type type, 2186 union qed_mcp_protocol_stats *stats) 2187 { 2188 struct qed_eth_stats eth_stats; 2189 2190 memset(stats, 0, sizeof(*stats)); 2191 2192 switch (type) { 2193 case QED_MCP_LAN_STATS: 2194 qed_get_vport_stats(cdev, ð_stats); 2195 stats->lan_stats.ucast_rx_pkts = 2196 eth_stats.common.rx_ucast_pkts; 2197 stats->lan_stats.ucast_tx_pkts = 2198 eth_stats.common.tx_ucast_pkts; 2199 stats->lan_stats.fcs_err = -1; 2200 break; 2201 case QED_MCP_FCOE_STATS: 2202 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); 2203 break; 2204 case QED_MCP_ISCSI_STATS: 2205 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); 2206 break; 2207 default: 2208 DP_VERBOSE(cdev, QED_MSG_SP, 2209 "Invalid protocol type = %d\n", type); 2210 return; 2211 } 2212 } 2213 2214 int qed_mfw_tlv_req(struct qed_hwfn *hwfn) 2215 { 2216 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV, 2217 "Scheduling slowpath task [Flag: %d]\n", 2218 QED_SLOWPATH_MFW_TLV_REQ); 2219 smp_mb__before_atomic(); 2220 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags); 2221 smp_mb__after_atomic(); 2222 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); 2223 2224 return 0; 2225 } 2226 2227 static void 2228 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv) 2229 { 2230 struct qed_common_cb_ops *op = cdev->protocol_ops.common; 2231 struct qed_eth_stats_common *p_common; 2232 struct qed_generic_tlvs gen_tlvs; 2233 struct qed_eth_stats stats; 2234 int i; 2235 2236 memset(&gen_tlvs, 0, sizeof(gen_tlvs)); 2237 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs); 2238 2239 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM) 2240 tlv->flags.ipv4_csum_offload = true; 2241 if (gen_tlvs.feat_flags & QED_TLV_LSO) 2242 tlv->flags.lso_supported = true; 2243 tlv->flags.b_set = true; 2244 2245 for (i = 0; i < QED_TLV_MAC_COUNT; i++) { 2246 if (is_valid_ether_addr(gen_tlvs.mac[i])) { 2247 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]); 2248 tlv->mac_set[i] = true; 2249 } 2250 } 2251 2252 qed_get_vport_stats(cdev, &stats); 2253 p_common = &stats.common; 2254 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts + 2255 p_common->rx_bcast_pkts; 2256 tlv->rx_frames_set = true; 2257 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes + 2258 p_common->rx_bcast_bytes; 2259 tlv->rx_bytes_set = true; 2260 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts + 2261 p_common->tx_bcast_pkts; 2262 tlv->tx_frames_set = true; 2263 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes + 2264 p_common->tx_bcast_bytes; 2265 tlv->rx_bytes_set = true; 2266 } 2267 2268 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type, 2269 union qed_mfw_tlv_data *tlv_buf) 2270 { 2271 struct qed_dev *cdev = hwfn->cdev; 2272 struct qed_common_cb_ops *ops; 2273 2274 ops = cdev->protocol_ops.common; 2275 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) { 2276 DP_NOTICE(hwfn, "Can't collect TLV management info\n"); 2277 return -EINVAL; 2278 } 2279 2280 switch (type) { 2281 case QED_MFW_TLV_GENERIC: 2282 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic); 2283 break; 2284 case QED_MFW_TLV_ETH: 2285 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth); 2286 break; 2287 case QED_MFW_TLV_FCOE: 2288 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe); 2289 break; 2290 case QED_MFW_TLV_ISCSI: 2291 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi); 2292 break; 2293 default: 2294 break; 2295 } 2296 2297 return 0; 2298 } 2299