1 /* QLogic qed NIC Driver 2 * Copyright (c) 2015 QLogic Corporation 3 * 4 * This software is available under the terms of the GNU General Public License 5 * (GPL) Version 2, available from the file COPYING in the main directory of 6 * this source tree. 7 */ 8 9 #include <linux/stddef.h> 10 #include <linux/pci.h> 11 #include <linux/kernel.h> 12 #include <linux/slab.h> 13 #include <linux/version.h> 14 #include <linux/delay.h> 15 #include <asm/byteorder.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/string.h> 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/workqueue.h> 21 #include <linux/ethtool.h> 22 #include <linux/etherdevice.h> 23 #include <linux/vmalloc.h> 24 #include <linux/qed/qed_if.h> 25 26 #include "qed.h" 27 #include "qed_sp.h" 28 #include "qed_dev_api.h" 29 #include "qed_mcp.h" 30 #include "qed_hw.h" 31 32 static char version[] = 33 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; 34 35 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); 36 MODULE_LICENSE("GPL"); 37 MODULE_VERSION(DRV_MODULE_VERSION); 38 39 #define FW_FILE_VERSION \ 40 __stringify(FW_MAJOR_VERSION) "." \ 41 __stringify(FW_MINOR_VERSION) "." \ 42 __stringify(FW_REVISION_VERSION) "." \ 43 __stringify(FW_ENGINEERING_VERSION) 44 45 #define QED_FW_FILE_NAME \ 46 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin" 47 48 MODULE_FIRMWARE(QED_FW_FILE_NAME); 49 50 static int __init qed_init(void) 51 { 52 pr_notice("qed_init called\n"); 53 54 pr_info("%s", version); 55 56 return 0; 57 } 58 59 static void __exit qed_cleanup(void) 60 { 61 pr_notice("qed_cleanup called\n"); 62 } 63 64 module_init(qed_init); 65 module_exit(qed_cleanup); 66 67 /* Check if the DMA controller on the machine can properly handle the DMA 68 * addressing required by the device. 69 */ 70 static int qed_set_coherency_mask(struct qed_dev *cdev) 71 { 72 struct device *dev = &cdev->pdev->dev; 73 74 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) { 75 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) { 76 DP_NOTICE(cdev, 77 "Can't request 64-bit consistent allocations\n"); 78 return -EIO; 79 } 80 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) { 81 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n"); 82 return -EIO; 83 } 84 85 return 0; 86 } 87 88 static void qed_free_pci(struct qed_dev *cdev) 89 { 90 struct pci_dev *pdev = cdev->pdev; 91 92 if (cdev->doorbells) 93 iounmap(cdev->doorbells); 94 if (cdev->regview) 95 iounmap(cdev->regview); 96 if (atomic_read(&pdev->enable_cnt) == 1) 97 pci_release_regions(pdev); 98 99 pci_disable_device(pdev); 100 } 101 102 #define PCI_REVISION_ID_ERROR_VAL 0xff 103 104 /* Performs PCI initializations as well as initializing PCI-related parameters 105 * in the device structrue. Returns 0 in case of success. 106 */ 107 static int qed_init_pci(struct qed_dev *cdev, 108 struct pci_dev *pdev) 109 { 110 u8 rev_id; 111 int rc; 112 113 cdev->pdev = pdev; 114 115 rc = pci_enable_device(pdev); 116 if (rc) { 117 DP_NOTICE(cdev, "Cannot enable PCI device\n"); 118 goto err0; 119 } 120 121 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 122 DP_NOTICE(cdev, "No memory region found in bar #0\n"); 123 rc = -EIO; 124 goto err1; 125 } 126 127 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 128 DP_NOTICE(cdev, "No memory region found in bar #2\n"); 129 rc = -EIO; 130 goto err1; 131 } 132 133 if (atomic_read(&pdev->enable_cnt) == 1) { 134 rc = pci_request_regions(pdev, "qed"); 135 if (rc) { 136 DP_NOTICE(cdev, 137 "Failed to request PCI memory resources\n"); 138 goto err1; 139 } 140 pci_set_master(pdev); 141 pci_save_state(pdev); 142 } 143 144 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id); 145 if (rev_id == PCI_REVISION_ID_ERROR_VAL) { 146 DP_NOTICE(cdev, 147 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n", 148 rev_id); 149 rc = -ENODEV; 150 goto err2; 151 } 152 if (!pci_is_pcie(pdev)) { 153 DP_NOTICE(cdev, "The bus is not PCI Express\n"); 154 rc = -EIO; 155 goto err2; 156 } 157 158 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 159 if (cdev->pci_params.pm_cap == 0) 160 DP_NOTICE(cdev, "Cannot find power management capability\n"); 161 162 rc = qed_set_coherency_mask(cdev); 163 if (rc) 164 goto err2; 165 166 cdev->pci_params.mem_start = pci_resource_start(pdev, 0); 167 cdev->pci_params.mem_end = pci_resource_end(pdev, 0); 168 cdev->pci_params.irq = pdev->irq; 169 170 cdev->regview = pci_ioremap_bar(pdev, 0); 171 if (!cdev->regview) { 172 DP_NOTICE(cdev, "Cannot map register space, aborting\n"); 173 rc = -ENOMEM; 174 goto err2; 175 } 176 177 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2); 178 cdev->db_size = pci_resource_len(cdev->pdev, 2); 179 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size); 180 if (!cdev->doorbells) { 181 DP_NOTICE(cdev, "Cannot map doorbell space\n"); 182 return -ENOMEM; 183 } 184 185 return 0; 186 187 err2: 188 pci_release_regions(pdev); 189 err1: 190 pci_disable_device(pdev); 191 err0: 192 return rc; 193 } 194 195 int qed_fill_dev_info(struct qed_dev *cdev, 196 struct qed_dev_info *dev_info) 197 { 198 struct qed_ptt *ptt; 199 200 memset(dev_info, 0, sizeof(struct qed_dev_info)); 201 202 dev_info->num_hwfns = cdev->num_hwfns; 203 dev_info->pci_mem_start = cdev->pci_params.mem_start; 204 dev_info->pci_mem_end = cdev->pci_params.mem_end; 205 dev_info->pci_irq = cdev->pci_params.irq; 206 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]); 207 ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr); 208 209 dev_info->fw_major = FW_MAJOR_VERSION; 210 dev_info->fw_minor = FW_MINOR_VERSION; 211 dev_info->fw_rev = FW_REVISION_VERSION; 212 dev_info->fw_eng = FW_ENGINEERING_VERSION; 213 dev_info->mf_mode = cdev->mf_mode; 214 215 qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev); 216 217 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev)); 218 if (ptt) { 219 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt, 220 &dev_info->flash_size); 221 222 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt); 223 } 224 225 return 0; 226 } 227 228 static void qed_free_cdev(struct qed_dev *cdev) 229 { 230 kfree((void *)cdev); 231 } 232 233 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev) 234 { 235 struct qed_dev *cdev; 236 237 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 238 if (!cdev) 239 return cdev; 240 241 qed_init_struct(cdev); 242 243 return cdev; 244 } 245 246 /* Sets the requested power state */ 247 static int qed_set_power_state(struct qed_dev *cdev, 248 pci_power_t state) 249 { 250 if (!cdev) 251 return -ENODEV; 252 253 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n"); 254 return 0; 255 } 256 257 /* probing */ 258 static struct qed_dev *qed_probe(struct pci_dev *pdev, 259 enum qed_protocol protocol, 260 u32 dp_module, 261 u8 dp_level) 262 { 263 struct qed_dev *cdev; 264 int rc; 265 266 cdev = qed_alloc_cdev(pdev); 267 if (!cdev) 268 goto err0; 269 270 cdev->protocol = protocol; 271 272 qed_init_dp(cdev, dp_module, dp_level); 273 274 rc = qed_init_pci(cdev, pdev); 275 if (rc) { 276 DP_ERR(cdev, "init pci failed\n"); 277 goto err1; 278 } 279 DP_INFO(cdev, "PCI init completed successfully\n"); 280 281 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT); 282 if (rc) { 283 DP_ERR(cdev, "hw prepare failed\n"); 284 goto err2; 285 } 286 287 DP_INFO(cdev, "qed_probe completed successffuly\n"); 288 289 return cdev; 290 291 err2: 292 qed_free_pci(cdev); 293 err1: 294 qed_free_cdev(cdev); 295 err0: 296 return NULL; 297 } 298 299 static void qed_remove(struct qed_dev *cdev) 300 { 301 if (!cdev) 302 return; 303 304 qed_hw_remove(cdev); 305 306 qed_free_pci(cdev); 307 308 qed_set_power_state(cdev, PCI_D3hot); 309 310 qed_free_cdev(cdev); 311 } 312 313 static void qed_disable_msix(struct qed_dev *cdev) 314 { 315 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 316 pci_disable_msix(cdev->pdev); 317 kfree(cdev->int_params.msix_table); 318 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) { 319 pci_disable_msi(cdev->pdev); 320 } 321 322 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param)); 323 } 324 325 static int qed_enable_msix(struct qed_dev *cdev, 326 struct qed_int_params *int_params) 327 { 328 int i, rc, cnt; 329 330 cnt = int_params->in.num_vectors; 331 332 for (i = 0; i < cnt; i++) 333 int_params->msix_table[i].entry = i; 334 335 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table, 336 int_params->in.min_msix_cnt, cnt); 337 if (rc < cnt && rc >= int_params->in.min_msix_cnt && 338 (rc % cdev->num_hwfns)) { 339 pci_disable_msix(cdev->pdev); 340 341 /* If fastpath is initialized, we need at least one interrupt 342 * per hwfn [and the slow path interrupts]. New requested number 343 * should be a multiple of the number of hwfns. 344 */ 345 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns; 346 DP_NOTICE(cdev, 347 "Trying to enable MSI-X with less vectors (%d out of %d)\n", 348 cnt, int_params->in.num_vectors); 349 rc = pci_enable_msix_exact(cdev->pdev, 350 int_params->msix_table, cnt); 351 if (!rc) 352 rc = cnt; 353 } 354 355 if (rc > 0) { 356 /* MSI-x configuration was achieved */ 357 int_params->out.int_mode = QED_INT_MODE_MSIX; 358 int_params->out.num_vectors = rc; 359 rc = 0; 360 } else { 361 DP_NOTICE(cdev, 362 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n", 363 cnt, rc); 364 } 365 366 return rc; 367 } 368 369 /* This function outputs the int mode and the number of enabled msix vector */ 370 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode) 371 { 372 struct qed_int_params *int_params = &cdev->int_params; 373 struct msix_entry *tbl; 374 int rc = 0, cnt; 375 376 switch (int_params->in.int_mode) { 377 case QED_INT_MODE_MSIX: 378 /* Allocate MSIX table */ 379 cnt = int_params->in.num_vectors; 380 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL); 381 if (!int_params->msix_table) { 382 rc = -ENOMEM; 383 goto out; 384 } 385 386 /* Enable MSIX */ 387 rc = qed_enable_msix(cdev, int_params); 388 if (!rc) 389 goto out; 390 391 DP_NOTICE(cdev, "Failed to enable MSI-X\n"); 392 kfree(int_params->msix_table); 393 if (force_mode) 394 goto out; 395 /* Fallthrough */ 396 397 case QED_INT_MODE_MSI: 398 rc = pci_enable_msi(cdev->pdev); 399 if (!rc) { 400 int_params->out.int_mode = QED_INT_MODE_MSI; 401 goto out; 402 } 403 404 DP_NOTICE(cdev, "Failed to enable MSI\n"); 405 if (force_mode) 406 goto out; 407 /* Fallthrough */ 408 409 case QED_INT_MODE_INTA: 410 int_params->out.int_mode = QED_INT_MODE_INTA; 411 rc = 0; 412 goto out; 413 default: 414 DP_NOTICE(cdev, "Unknown int_mode value %d\n", 415 int_params->in.int_mode); 416 rc = -EINVAL; 417 } 418 419 out: 420 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE; 421 422 return rc; 423 } 424 425 static void qed_simd_handler_config(struct qed_dev *cdev, void *token, 426 int index, void(*handler)(void *)) 427 { 428 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 429 int relative_idx = index / cdev->num_hwfns; 430 431 hwfn->simd_proto_handler[relative_idx].func = handler; 432 hwfn->simd_proto_handler[relative_idx].token = token; 433 } 434 435 static void qed_simd_handler_clean(struct qed_dev *cdev, int index) 436 { 437 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns]; 438 int relative_idx = index / cdev->num_hwfns; 439 440 memset(&hwfn->simd_proto_handler[relative_idx], 0, 441 sizeof(struct qed_simd_fp_handler)); 442 } 443 444 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet) 445 { 446 tasklet_schedule((struct tasklet_struct *)tasklet); 447 return IRQ_HANDLED; 448 } 449 450 static irqreturn_t qed_single_int(int irq, void *dev_instance) 451 { 452 struct qed_dev *cdev = (struct qed_dev *)dev_instance; 453 struct qed_hwfn *hwfn; 454 irqreturn_t rc = IRQ_NONE; 455 u64 status; 456 int i, j; 457 458 for (i = 0; i < cdev->num_hwfns; i++) { 459 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]); 460 461 if (!status) 462 continue; 463 464 hwfn = &cdev->hwfns[i]; 465 466 /* Slowpath interrupt */ 467 if (unlikely(status & 0x1)) { 468 tasklet_schedule(hwfn->sp_dpc); 469 status &= ~0x1; 470 rc = IRQ_HANDLED; 471 } 472 473 /* Fastpath interrupts */ 474 for (j = 0; j < 64; j++) { 475 if ((0x2ULL << j) & status) { 476 hwfn->simd_proto_handler[j].func( 477 hwfn->simd_proto_handler[j].token); 478 status &= ~(0x2ULL << j); 479 rc = IRQ_HANDLED; 480 } 481 } 482 483 if (unlikely(status)) 484 DP_VERBOSE(hwfn, NETIF_MSG_INTR, 485 "got an unknown interrupt status 0x%llx\n", 486 status); 487 } 488 489 return rc; 490 } 491 492 int qed_slowpath_irq_req(struct qed_hwfn *hwfn) 493 { 494 struct qed_dev *cdev = hwfn->cdev; 495 int rc = 0; 496 u8 id; 497 498 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 499 id = hwfn->my_id; 500 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x", 501 id, cdev->pdev->bus->number, 502 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id); 503 rc = request_irq(cdev->int_params.msix_table[id].vector, 504 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc); 505 if (!rc) 506 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP), 507 "Requested slowpath MSI-X\n"); 508 } else { 509 unsigned long flags = 0; 510 511 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x", 512 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn), 513 PCI_FUNC(cdev->pdev->devfn)); 514 515 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA) 516 flags |= IRQF_SHARED; 517 518 rc = request_irq(cdev->pdev->irq, qed_single_int, 519 flags, cdev->name, cdev); 520 } 521 522 return rc; 523 } 524 525 static void qed_slowpath_irq_free(struct qed_dev *cdev) 526 { 527 int i; 528 529 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 530 for_each_hwfn(cdev, i) { 531 if (!cdev->hwfns[i].b_int_requested) 532 break; 533 synchronize_irq(cdev->int_params.msix_table[i].vector); 534 free_irq(cdev->int_params.msix_table[i].vector, 535 cdev->hwfns[i].sp_dpc); 536 } 537 } else { 538 if (QED_LEADING_HWFN(cdev)->b_int_requested) 539 free_irq(cdev->pdev->irq, cdev); 540 } 541 qed_int_disable_post_isr_release(cdev); 542 } 543 544 static int qed_nic_stop(struct qed_dev *cdev) 545 { 546 int i, rc; 547 548 rc = qed_hw_stop(cdev); 549 550 for (i = 0; i < cdev->num_hwfns; i++) { 551 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 552 553 if (p_hwfn->b_sp_dpc_enabled) { 554 tasklet_disable(p_hwfn->sp_dpc); 555 p_hwfn->b_sp_dpc_enabled = false; 556 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN, 557 "Disabled sp taskelt [hwfn %d] at %p\n", 558 i, p_hwfn->sp_dpc); 559 } 560 } 561 562 return rc; 563 } 564 565 static int qed_nic_reset(struct qed_dev *cdev) 566 { 567 int rc; 568 569 rc = qed_hw_reset(cdev); 570 if (rc) 571 return rc; 572 573 qed_resc_free(cdev); 574 575 return 0; 576 } 577 578 static int qed_nic_setup(struct qed_dev *cdev) 579 { 580 int rc; 581 582 rc = qed_resc_alloc(cdev); 583 if (rc) 584 return rc; 585 586 DP_INFO(cdev, "Allocated qed resources\n"); 587 588 qed_resc_setup(cdev); 589 590 return rc; 591 } 592 593 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt) 594 { 595 int limit = 0; 596 597 /* Mark the fastpath as free/used */ 598 cdev->int_params.fp_initialized = cnt ? true : false; 599 600 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX) 601 limit = cdev->num_hwfns * 63; 602 else if (cdev->int_params.fp_msix_cnt) 603 limit = cdev->int_params.fp_msix_cnt; 604 605 if (!limit) 606 return -ENOMEM; 607 608 return min_t(int, cnt, limit); 609 } 610 611 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info) 612 { 613 memset(info, 0, sizeof(struct qed_int_info)); 614 615 if (!cdev->int_params.fp_initialized) { 616 DP_INFO(cdev, 617 "Protocol driver requested interrupt information, but its support is not yet configured\n"); 618 return -EINVAL; 619 } 620 621 /* Need to expose only MSI-X information; Single IRQ is handled solely 622 * by qed. 623 */ 624 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) { 625 int msix_base = cdev->int_params.fp_msix_base; 626 627 info->msix_cnt = cdev->int_params.fp_msix_cnt; 628 info->msix = &cdev->int_params.msix_table[msix_base]; 629 } 630 631 return 0; 632 } 633 634 static int qed_slowpath_setup_int(struct qed_dev *cdev, 635 enum qed_int_mode int_mode) 636 { 637 struct qed_sb_cnt_info sb_cnt_info; 638 int rc; 639 int i; 640 memset(&cdev->int_params, 0, sizeof(struct qed_int_params)); 641 642 cdev->int_params.in.int_mode = int_mode; 643 for_each_hwfn(cdev, i) { 644 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info)); 645 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info); 646 cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt; 647 cdev->int_params.in.num_vectors++; /* slowpath */ 648 } 649 650 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 651 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 652 653 rc = qed_set_int_mode(cdev, false); 654 if (rc) { 655 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 656 return rc; 657 } 658 659 cdev->int_params.fp_msix_base = cdev->num_hwfns; 660 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors - 661 cdev->num_hwfns; 662 663 return 0; 664 } 665 666 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len, 667 u8 *input_buf, u32 max_size, u8 *unzip_buf) 668 { 669 int rc; 670 671 p_hwfn->stream->next_in = input_buf; 672 p_hwfn->stream->avail_in = input_len; 673 p_hwfn->stream->next_out = unzip_buf; 674 p_hwfn->stream->avail_out = max_size; 675 676 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS); 677 678 if (rc != Z_OK) { 679 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n", 680 rc); 681 return 0; 682 } 683 684 rc = zlib_inflate(p_hwfn->stream, Z_FINISH); 685 zlib_inflateEnd(p_hwfn->stream); 686 687 if (rc != Z_OK && rc != Z_STREAM_END) { 688 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n", 689 p_hwfn->stream->msg, rc); 690 return 0; 691 } 692 693 return p_hwfn->stream->total_out / 4; 694 } 695 696 static int qed_alloc_stream_mem(struct qed_dev *cdev) 697 { 698 int i; 699 void *workspace; 700 701 for_each_hwfn(cdev, i) { 702 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 703 704 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL); 705 if (!p_hwfn->stream) 706 return -ENOMEM; 707 708 workspace = vzalloc(zlib_inflate_workspacesize()); 709 if (!workspace) 710 return -ENOMEM; 711 p_hwfn->stream->workspace = workspace; 712 } 713 714 return 0; 715 } 716 717 static void qed_free_stream_mem(struct qed_dev *cdev) 718 { 719 int i; 720 721 for_each_hwfn(cdev, i) { 722 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 723 724 if (!p_hwfn->stream) 725 return; 726 727 vfree(p_hwfn->stream->workspace); 728 kfree(p_hwfn->stream); 729 } 730 } 731 732 static void qed_update_pf_params(struct qed_dev *cdev, 733 struct qed_pf_params *params) 734 { 735 int i; 736 737 for (i = 0; i < cdev->num_hwfns; i++) { 738 struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; 739 740 p_hwfn->pf_params = *params; 741 } 742 } 743 744 static int qed_slowpath_start(struct qed_dev *cdev, 745 struct qed_slowpath_params *params) 746 { 747 struct qed_mcp_drv_version drv_version; 748 const u8 *data = NULL; 749 struct qed_hwfn *hwfn; 750 int rc; 751 752 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME, 753 &cdev->pdev->dev); 754 if (rc) { 755 DP_NOTICE(cdev, 756 "Failed to find fw file - /lib/firmware/%s\n", 757 QED_FW_FILE_NAME); 758 goto err; 759 } 760 761 rc = qed_nic_setup(cdev); 762 if (rc) 763 goto err; 764 765 rc = qed_slowpath_setup_int(cdev, params->int_mode); 766 if (rc) 767 goto err1; 768 769 /* Allocate stream for unzipping */ 770 rc = qed_alloc_stream_mem(cdev); 771 if (rc) { 772 DP_NOTICE(cdev, "Failed to allocate stream memory\n"); 773 goto err2; 774 } 775 776 /* Start the slowpath */ 777 data = cdev->firmware->data; 778 779 rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode, 780 true, data); 781 if (rc) 782 goto err2; 783 784 DP_INFO(cdev, 785 "HW initialization and function start completed successfully\n"); 786 787 hwfn = QED_LEADING_HWFN(cdev); 788 drv_version.version = (params->drv_major << 24) | 789 (params->drv_minor << 16) | 790 (params->drv_rev << 8) | 791 (params->drv_eng); 792 strlcpy(drv_version.name, params->name, 793 MCP_DRV_VER_STR_SIZE - 4); 794 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, 795 &drv_version); 796 if (rc) { 797 DP_NOTICE(cdev, "Failed sending drv version command\n"); 798 return rc; 799 } 800 801 qed_reset_vport_stats(cdev); 802 803 return 0; 804 805 err2: 806 qed_hw_timers_stop_all(cdev); 807 qed_slowpath_irq_free(cdev); 808 qed_free_stream_mem(cdev); 809 qed_disable_msix(cdev); 810 err1: 811 qed_resc_free(cdev); 812 err: 813 release_firmware(cdev->firmware); 814 815 return rc; 816 } 817 818 static int qed_slowpath_stop(struct qed_dev *cdev) 819 { 820 if (!cdev) 821 return -ENODEV; 822 823 qed_free_stream_mem(cdev); 824 825 qed_nic_stop(cdev); 826 qed_slowpath_irq_free(cdev); 827 828 qed_disable_msix(cdev); 829 qed_nic_reset(cdev); 830 831 release_firmware(cdev->firmware); 832 833 return 0; 834 } 835 836 static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE], 837 char ver_str[VER_SIZE]) 838 { 839 int i; 840 841 memcpy(cdev->name, name, NAME_SIZE); 842 for_each_hwfn(cdev, i) 843 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 844 845 memcpy(cdev->ver_str, ver_str, VER_SIZE); 846 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX; 847 } 848 849 static u32 qed_sb_init(struct qed_dev *cdev, 850 struct qed_sb_info *sb_info, 851 void *sb_virt_addr, 852 dma_addr_t sb_phy_addr, u16 sb_id, 853 enum qed_sb_type type) 854 { 855 struct qed_hwfn *p_hwfn; 856 int hwfn_index; 857 u16 rel_sb_id; 858 u8 n_hwfns; 859 u32 rc; 860 861 /* RoCE uses single engine and CMT uses two engines. When using both 862 * we force only a single engine. Storage uses only engine 0 too. 863 */ 864 if (type == QED_SB_TYPE_L2_QUEUE) 865 n_hwfns = cdev->num_hwfns; 866 else 867 n_hwfns = 1; 868 869 hwfn_index = sb_id % n_hwfns; 870 p_hwfn = &cdev->hwfns[hwfn_index]; 871 rel_sb_id = sb_id / n_hwfns; 872 873 DP_VERBOSE(cdev, NETIF_MSG_INTR, 874 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 875 hwfn_index, rel_sb_id, sb_id); 876 877 rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, 878 sb_virt_addr, sb_phy_addr, rel_sb_id); 879 880 return rc; 881 } 882 883 static u32 qed_sb_release(struct qed_dev *cdev, 884 struct qed_sb_info *sb_info, 885 u16 sb_id) 886 { 887 struct qed_hwfn *p_hwfn; 888 int hwfn_index; 889 u16 rel_sb_id; 890 u32 rc; 891 892 hwfn_index = sb_id % cdev->num_hwfns; 893 p_hwfn = &cdev->hwfns[hwfn_index]; 894 rel_sb_id = sb_id / cdev->num_hwfns; 895 896 DP_VERBOSE(cdev, NETIF_MSG_INTR, 897 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", 898 hwfn_index, rel_sb_id, sb_id); 899 900 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id); 901 902 return rc; 903 } 904 905 static int qed_set_link(struct qed_dev *cdev, 906 struct qed_link_params *params) 907 { 908 struct qed_hwfn *hwfn; 909 struct qed_mcp_link_params *link_params; 910 struct qed_ptt *ptt; 911 int rc; 912 913 if (!cdev) 914 return -ENODEV; 915 916 /* The link should be set only once per PF */ 917 hwfn = &cdev->hwfns[0]; 918 919 ptt = qed_ptt_acquire(hwfn); 920 if (!ptt) 921 return -EBUSY; 922 923 link_params = qed_mcp_get_link_params(hwfn); 924 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) 925 link_params->speed.autoneg = params->autoneg; 926 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) { 927 link_params->speed.advertised_speeds = 0; 928 if ((params->adv_speeds & SUPPORTED_1000baseT_Half) || 929 (params->adv_speeds & SUPPORTED_1000baseT_Full)) 930 link_params->speed.advertised_speeds |= 931 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G; 932 if (params->adv_speeds & SUPPORTED_10000baseKR_Full) 933 link_params->speed.advertised_speeds |= 934 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G; 935 if (params->adv_speeds & SUPPORTED_40000baseLR4_Full) 936 link_params->speed.advertised_speeds |= 937 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G; 938 if (params->adv_speeds & 0) 939 link_params->speed.advertised_speeds |= 940 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G; 941 if (params->adv_speeds & 0) 942 link_params->speed.advertised_speeds |= 943 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G; 944 } 945 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED) 946 link_params->speed.forced_speed = params->forced_speed; 947 948 rc = qed_mcp_set_link(hwfn, ptt, params->link_up); 949 950 qed_ptt_release(hwfn, ptt); 951 952 return rc; 953 } 954 955 static int qed_get_port_type(u32 media_type) 956 { 957 int port_type; 958 959 switch (media_type) { 960 case MEDIA_SFPP_10G_FIBER: 961 case MEDIA_SFP_1G_FIBER: 962 case MEDIA_XFP_FIBER: 963 case MEDIA_KR: 964 port_type = PORT_FIBRE; 965 break; 966 case MEDIA_DA_TWINAX: 967 port_type = PORT_DA; 968 break; 969 case MEDIA_BASE_T: 970 port_type = PORT_TP; 971 break; 972 case MEDIA_NOT_PRESENT: 973 port_type = PORT_NONE; 974 break; 975 case MEDIA_UNSPECIFIED: 976 default: 977 port_type = PORT_OTHER; 978 break; 979 } 980 return port_type; 981 } 982 983 static void qed_fill_link(struct qed_hwfn *hwfn, 984 struct qed_link_output *if_link) 985 { 986 struct qed_mcp_link_params params; 987 struct qed_mcp_link_state link; 988 struct qed_mcp_link_capabilities link_caps; 989 u32 media_type; 990 991 memset(if_link, 0, sizeof(*if_link)); 992 993 /* Prepare source inputs */ 994 memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); 995 memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); 996 memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), 997 sizeof(link_caps)); 998 999 /* Set the link parameters to pass to protocol driver */ 1000 if (link.link_up) 1001 if_link->link_up = true; 1002 1003 /* TODO - at the moment assume supported and advertised speed equal */ 1004 if_link->supported_caps = SUPPORTED_FIBRE; 1005 if (params.speed.autoneg) 1006 if_link->supported_caps |= SUPPORTED_Autoneg; 1007 if (params.pause.autoneg || 1008 (params.pause.forced_rx && params.pause.forced_tx)) 1009 if_link->supported_caps |= SUPPORTED_Asym_Pause; 1010 if (params.pause.autoneg || params.pause.forced_rx || 1011 params.pause.forced_tx) 1012 if_link->supported_caps |= SUPPORTED_Pause; 1013 1014 if_link->advertised_caps = if_link->supported_caps; 1015 if (params.speed.advertised_speeds & 1016 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1017 if_link->advertised_caps |= SUPPORTED_1000baseT_Half | 1018 SUPPORTED_1000baseT_Full; 1019 if (params.speed.advertised_speeds & 1020 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1021 if_link->advertised_caps |= SUPPORTED_10000baseKR_Full; 1022 if (params.speed.advertised_speeds & 1023 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1024 if_link->advertised_caps |= SUPPORTED_40000baseLR4_Full; 1025 if (params.speed.advertised_speeds & 1026 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1027 if_link->advertised_caps |= 0; 1028 if (params.speed.advertised_speeds & 1029 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) 1030 if_link->advertised_caps |= 0; 1031 1032 if (link_caps.speed_capabilities & 1033 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 1034 if_link->supported_caps |= SUPPORTED_1000baseT_Half | 1035 SUPPORTED_1000baseT_Full; 1036 if (link_caps.speed_capabilities & 1037 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 1038 if_link->supported_caps |= SUPPORTED_10000baseKR_Full; 1039 if (link_caps.speed_capabilities & 1040 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) 1041 if_link->supported_caps |= SUPPORTED_40000baseLR4_Full; 1042 if (link_caps.speed_capabilities & 1043 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 1044 if_link->supported_caps |= 0; 1045 if (link_caps.speed_capabilities & 1046 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G) 1047 if_link->supported_caps |= 0; 1048 1049 if (link.link_up) 1050 if_link->speed = link.speed; 1051 1052 /* TODO - fill duplex properly */ 1053 if_link->duplex = DUPLEX_FULL; 1054 qed_mcp_get_media_type(hwfn->cdev, &media_type); 1055 if_link->port = qed_get_port_type(media_type); 1056 1057 if_link->autoneg = params.speed.autoneg; 1058 1059 if (params.pause.autoneg) 1060 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; 1061 if (params.pause.forced_rx) 1062 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; 1063 if (params.pause.forced_tx) 1064 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; 1065 1066 /* Link partner capabilities */ 1067 if (link.partner_adv_speed & 1068 QED_LINK_PARTNER_SPEED_1G_HD) 1069 if_link->lp_caps |= SUPPORTED_1000baseT_Half; 1070 if (link.partner_adv_speed & 1071 QED_LINK_PARTNER_SPEED_1G_FD) 1072 if_link->lp_caps |= SUPPORTED_1000baseT_Full; 1073 if (link.partner_adv_speed & 1074 QED_LINK_PARTNER_SPEED_10G) 1075 if_link->lp_caps |= SUPPORTED_10000baseKR_Full; 1076 if (link.partner_adv_speed & 1077 QED_LINK_PARTNER_SPEED_40G) 1078 if_link->lp_caps |= SUPPORTED_40000baseLR4_Full; 1079 if (link.partner_adv_speed & 1080 QED_LINK_PARTNER_SPEED_50G) 1081 if_link->lp_caps |= 0; 1082 if (link.partner_adv_speed & 1083 QED_LINK_PARTNER_SPEED_100G) 1084 if_link->lp_caps |= 0; 1085 1086 if (link.an_complete) 1087 if_link->lp_caps |= SUPPORTED_Autoneg; 1088 1089 if (link.partner_adv_pause) 1090 if_link->lp_caps |= SUPPORTED_Pause; 1091 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE || 1092 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE) 1093 if_link->lp_caps |= SUPPORTED_Asym_Pause; 1094 } 1095 1096 static void qed_get_current_link(struct qed_dev *cdev, 1097 struct qed_link_output *if_link) 1098 { 1099 qed_fill_link(&cdev->hwfns[0], if_link); 1100 } 1101 1102 void qed_link_update(struct qed_hwfn *hwfn) 1103 { 1104 void *cookie = hwfn->cdev->ops_cookie; 1105 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common; 1106 struct qed_link_output if_link; 1107 1108 qed_fill_link(hwfn, &if_link); 1109 1110 if (IS_LEAD_HWFN(hwfn) && cookie) 1111 op->link_update(cookie, &if_link); 1112 } 1113 1114 static int qed_drain(struct qed_dev *cdev) 1115 { 1116 struct qed_hwfn *hwfn; 1117 struct qed_ptt *ptt; 1118 int i, rc; 1119 1120 for_each_hwfn(cdev, i) { 1121 hwfn = &cdev->hwfns[i]; 1122 ptt = qed_ptt_acquire(hwfn); 1123 if (!ptt) { 1124 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n"); 1125 return -EBUSY; 1126 } 1127 rc = qed_mcp_drain(hwfn, ptt); 1128 if (rc) 1129 return rc; 1130 qed_ptt_release(hwfn, ptt); 1131 } 1132 1133 return 0; 1134 } 1135 1136 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode) 1137 { 1138 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev); 1139 struct qed_ptt *ptt; 1140 int status = 0; 1141 1142 ptt = qed_ptt_acquire(hwfn); 1143 if (!ptt) 1144 return -EAGAIN; 1145 1146 status = qed_mcp_set_led(hwfn, ptt, mode); 1147 1148 qed_ptt_release(hwfn, ptt); 1149 1150 return status; 1151 } 1152 1153 const struct qed_common_ops qed_common_ops_pass = { 1154 .probe = &qed_probe, 1155 .remove = &qed_remove, 1156 .set_power_state = &qed_set_power_state, 1157 .set_id = &qed_set_id, 1158 .update_pf_params = &qed_update_pf_params, 1159 .slowpath_start = &qed_slowpath_start, 1160 .slowpath_stop = &qed_slowpath_stop, 1161 .set_fp_int = &qed_set_int_fp, 1162 .get_fp_int = &qed_get_int_fp, 1163 .sb_init = &qed_sb_init, 1164 .sb_release = &qed_sb_release, 1165 .simd_handler_config = &qed_simd_handler_config, 1166 .simd_handler_clean = &qed_simd_handler_clean, 1167 .set_link = &qed_set_link, 1168 .get_link = &qed_get_current_link, 1169 .drain = &qed_drain, 1170 .update_msglvl = &qed_init_dp, 1171 .chain_alloc = &qed_chain_alloc, 1172 .chain_free = &qed_chain_free, 1173 .set_led = &qed_set_led, 1174 }; 1175 1176 u32 qed_get_protocol_version(enum qed_protocol protocol) 1177 { 1178 switch (protocol) { 1179 case QED_PROTOCOL_ETH: 1180 return QED_ETH_INTERFACE_VERSION; 1181 default: 1182 return 0; 1183 } 1184 } 1185 EXPORT_SYMBOL(qed_get_protocol_version); 1186