1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 29 /* 30 * File: qlnx_os.c 31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "qlnx_os.h" 38 #include "bcm_osal.h" 39 #include "reg_addr.h" 40 #include "ecore_gtt_reg_addr.h" 41 #include "ecore.h" 42 #include "ecore_chain.h" 43 #include "ecore_status.h" 44 #include "ecore_hw.h" 45 #include "ecore_rt_defs.h" 46 #include "ecore_init_ops.h" 47 #include "ecore_int.h" 48 #include "ecore_cxt.h" 49 #include "ecore_spq.h" 50 #include "ecore_init_fw_funcs.h" 51 #include "ecore_sp_commands.h" 52 #include "ecore_dev_api.h" 53 #include "ecore_l2_api.h" 54 #include "ecore_mcp.h" 55 #include "ecore_hw_defs.h" 56 #include "mcp_public.h" 57 #include "ecore_iro.h" 58 #include "nvm_cfg.h" 59 #include "ecore_dev_api.h" 60 #include "ecore_dbg_fw_funcs.h" 61 62 #include "qlnx_ioctl.h" 63 #include "qlnx_def.h" 64 #include "qlnx_ver.h" 65 #include <sys/smp.h> 66 67 68 /* 69 * static functions 70 */ 71 /* 72 * ioctl related functions 73 */ 74 static void qlnx_add_sysctls(qlnx_host_t *ha); 75 76 /* 77 * main driver 78 */ 79 static void qlnx_release(qlnx_host_t *ha); 80 static void qlnx_fp_isr(void *arg); 81 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha); 82 static void qlnx_init(void *arg); 83 static void qlnx_init_locked(qlnx_host_t *ha); 84 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi); 85 static int qlnx_set_promisc(qlnx_host_t *ha); 86 static int qlnx_set_allmulti(qlnx_host_t *ha); 87 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 88 static int qlnx_media_change(struct ifnet *ifp); 89 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 90 static void qlnx_stop(qlnx_host_t *ha); 91 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, 92 struct mbuf **m_headp); 93 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha); 94 static uint32_t qlnx_get_optics(qlnx_host_t *ha, 95 struct qlnx_link_output *if_link); 96 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp); 97 static void qlnx_qflush(struct ifnet *ifp); 98 99 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha); 100 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha); 101 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha); 102 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha); 103 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha); 104 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha); 105 106 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver); 107 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size); 108 109 static int qlnx_nic_setup(struct ecore_dev *cdev, 110 struct ecore_pf_params *func_params); 111 static int qlnx_nic_start(struct ecore_dev *cdev); 112 static int qlnx_slowpath_start(qlnx_host_t *ha); 113 static int qlnx_slowpath_stop(qlnx_host_t *ha); 114 static int qlnx_init_hw(qlnx_host_t *ha); 115 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 116 char ver_str[VER_SIZE]); 117 static void qlnx_unload(qlnx_host_t *ha); 118 static int qlnx_load(qlnx_host_t *ha); 119 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 120 uint32_t add_mac); 121 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, 122 uint32_t len); 123 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq); 124 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq); 125 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, 126 struct qlnx_rx_queue *rxq); 127 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter); 128 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, 129 int hwfn_index); 130 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, 131 int hwfn_index); 132 static void qlnx_timer(void *arg); 133 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 134 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 135 static void qlnx_trigger_dump(qlnx_host_t *ha); 136 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 137 struct qlnx_tx_queue *txq); 138 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 139 int lro_enable); 140 static void qlnx_fp_taskqueue(void *context, int pending); 141 static void qlnx_sample_storm_stats(qlnx_host_t *ha); 142 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 143 struct qlnx_agg_info *tpa); 144 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa); 145 146 #if __FreeBSD_version >= 1100000 147 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt); 148 #endif 149 150 151 /* 152 * Hooks to the Operating Systems 153 */ 154 static int qlnx_pci_probe (device_t); 155 static int qlnx_pci_attach (device_t); 156 static int qlnx_pci_detach (device_t); 157 158 static device_method_t qlnx_pci_methods[] = { 159 /* Device interface */ 160 DEVMETHOD(device_probe, qlnx_pci_probe), 161 DEVMETHOD(device_attach, qlnx_pci_attach), 162 DEVMETHOD(device_detach, qlnx_pci_detach), 163 { 0, 0 } 164 }; 165 166 static driver_t qlnx_pci_driver = { 167 "ql", qlnx_pci_methods, sizeof (qlnx_host_t), 168 }; 169 170 static devclass_t qlnx_devclass; 171 172 MODULE_VERSION(if_qlnxe,1); 173 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0); 174 175 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1); 176 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1); 177 178 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver"); 179 180 181 char qlnx_dev_str[64]; 182 char qlnx_ver_str[VER_SIZE]; 183 char qlnx_name_str[NAME_SIZE]; 184 185 /* 186 * Some PCI Configuration Space Related Defines 187 */ 188 189 #ifndef PCI_VENDOR_QLOGIC 190 #define PCI_VENDOR_QLOGIC 0x1077 191 #endif 192 193 /* 40G Adapter QLE45xxx*/ 194 #ifndef QLOGIC_PCI_DEVICE_ID_1634 195 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634 196 #endif 197 198 /* 100G Adapter QLE45xxx*/ 199 #ifndef QLOGIC_PCI_DEVICE_ID_1644 200 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644 201 #endif 202 203 /* 25G Adapter QLE45xxx*/ 204 #ifndef QLOGIC_PCI_DEVICE_ID_1656 205 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656 206 #endif 207 208 /* 50G Adapter QLE45xxx*/ 209 #ifndef QLOGIC_PCI_DEVICE_ID_1654 210 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654 211 #endif 212 213 static int 214 qlnx_valid_device(device_t dev) 215 { 216 uint16_t device_id; 217 218 device_id = pci_get_device(dev); 219 220 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 221 (device_id == QLOGIC_PCI_DEVICE_ID_1644) || 222 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 223 (device_id == QLOGIC_PCI_DEVICE_ID_1654)) 224 return 0; 225 226 return -1; 227 } 228 229 /* 230 * Name: qlnx_pci_probe 231 * Function: Validate the PCI device to be a QLA80XX device 232 */ 233 static int 234 qlnx_pci_probe(device_t dev) 235 { 236 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d", 237 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); 238 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx"); 239 240 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) { 241 return (ENXIO); 242 } 243 244 switch (pci_get_device(dev)) { 245 246 case QLOGIC_PCI_DEVICE_ID_1644: 247 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 248 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function", 249 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 250 QLNX_VERSION_BUILD); 251 device_set_desc_copy(dev, qlnx_dev_str); 252 253 break; 254 255 case QLOGIC_PCI_DEVICE_ID_1634: 256 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 257 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function", 258 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 259 QLNX_VERSION_BUILD); 260 device_set_desc_copy(dev, qlnx_dev_str); 261 262 break; 263 264 case QLOGIC_PCI_DEVICE_ID_1656: 265 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 266 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function", 267 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 268 QLNX_VERSION_BUILD); 269 device_set_desc_copy(dev, qlnx_dev_str); 270 271 break; 272 273 case QLOGIC_PCI_DEVICE_ID_1654: 274 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 275 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function", 276 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 277 QLNX_VERSION_BUILD); 278 device_set_desc_copy(dev, qlnx_dev_str); 279 280 break; 281 282 default: 283 return (ENXIO); 284 } 285 286 return (BUS_PROBE_DEFAULT); 287 } 288 289 290 static void 291 qlnx_sp_intr(void *arg) 292 { 293 struct ecore_hwfn *p_hwfn; 294 qlnx_host_t *ha; 295 int i; 296 297 p_hwfn = arg; 298 299 if (p_hwfn == NULL) { 300 printf("%s: spurious slowpath intr\n", __func__); 301 return; 302 } 303 304 ha = (qlnx_host_t *)p_hwfn->p_dev; 305 306 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 307 308 for (i = 0; i < ha->cdev.num_hwfns; i++) { 309 if (&ha->cdev.hwfns[i] == p_hwfn) { 310 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]); 311 break; 312 } 313 } 314 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 315 316 return; 317 } 318 319 static void 320 qlnx_sp_taskqueue(void *context, int pending) 321 { 322 struct ecore_hwfn *p_hwfn; 323 324 p_hwfn = context; 325 326 if (p_hwfn != NULL) { 327 qlnx_sp_isr(p_hwfn); 328 } 329 return; 330 } 331 332 static int 333 qlnx_create_sp_taskqueues(qlnx_host_t *ha) 334 { 335 int i; 336 uint8_t tq_name[32]; 337 338 for (i = 0; i < ha->cdev.num_hwfns; i++) { 339 340 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 341 342 bzero(tq_name, sizeof (tq_name)); 343 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i); 344 345 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn); 346 347 ha->sp_taskqueue[i] = taskqueue_create_fast(tq_name, M_NOWAIT, 348 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]); 349 350 if (ha->sp_taskqueue[i] == NULL) 351 return (-1); 352 353 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s", 354 tq_name); 355 356 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__, 357 ha->sp_taskqueue[i])); 358 } 359 360 return (0); 361 } 362 363 static void 364 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha) 365 { 366 int i; 367 368 for (i = 0; i < ha->cdev.num_hwfns; i++) { 369 if (ha->sp_taskqueue[i] != NULL) { 370 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]); 371 taskqueue_free(ha->sp_taskqueue[i]); 372 } 373 } 374 return; 375 } 376 377 static void 378 qlnx_fp_taskqueue(void *context, int pending) 379 { 380 struct qlnx_fastpath *fp; 381 qlnx_host_t *ha; 382 struct ifnet *ifp; 383 struct mbuf *mp; 384 int ret; 385 386 fp = context; 387 388 if (fp == NULL) 389 return; 390 391 ha = (qlnx_host_t *)fp->edev; 392 393 ifp = ha->ifp; 394 395 mtx_lock(&fp->tx_mtx); 396 397 if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 398 IFF_DRV_RUNNING) || (!ha->link_up)) { 399 400 mtx_unlock(&fp->tx_mtx); 401 goto qlnx_fp_taskqueue_exit; 402 } 403 404 (void)qlnx_tx_int(ha, fp, fp->txq[0]); 405 406 mp = drbr_peek(ifp, fp->tx_br); 407 408 while (mp != NULL) { 409 410 ret = qlnx_send(ha, fp, &mp); 411 412 if (ret) { 413 414 if (mp != NULL) { 415 drbr_putback(ifp, fp->tx_br, mp); 416 } else { 417 fp->tx_pkts_processed++; 418 drbr_advance(ifp, fp->tx_br); 419 } 420 421 mtx_unlock(&fp->tx_mtx); 422 423 goto qlnx_fp_taskqueue_exit; 424 425 } else { 426 drbr_advance(ifp, fp->tx_br); 427 fp->tx_pkts_transmitted++; 428 fp->tx_pkts_processed++; 429 } 430 431 mp = drbr_peek(ifp, fp->tx_br); 432 } 433 434 (void)qlnx_tx_int(ha, fp, fp->txq[0]); 435 436 mtx_unlock(&fp->tx_mtx); 437 438 qlnx_fp_taskqueue_exit: 439 440 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 441 return; 442 } 443 444 static int 445 qlnx_create_fp_taskqueues(qlnx_host_t *ha) 446 { 447 int i; 448 uint8_t tq_name[32]; 449 struct qlnx_fastpath *fp; 450 451 for (i = 0; i < ha->num_rss; i++) { 452 453 fp = &ha->fp_array[i]; 454 455 bzero(tq_name, sizeof (tq_name)); 456 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 457 458 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp); 459 460 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, 461 taskqueue_thread_enqueue, 462 &fp->fp_taskqueue); 463 464 if (fp->fp_taskqueue == NULL) 465 return (-1); 466 467 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 468 tq_name); 469 470 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__, 471 fp->fp_taskqueue)); 472 } 473 474 return (0); 475 } 476 477 static void 478 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha) 479 { 480 int i; 481 struct qlnx_fastpath *fp; 482 483 for (i = 0; i < ha->num_rss; i++) { 484 485 fp = &ha->fp_array[i]; 486 487 if (fp->fp_taskqueue != NULL) { 488 489 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 490 taskqueue_free(fp->fp_taskqueue); 491 fp->fp_taskqueue = NULL; 492 } 493 } 494 return; 495 } 496 497 static void 498 qlnx_drain_fp_taskqueues(qlnx_host_t *ha) 499 { 500 int i; 501 struct qlnx_fastpath *fp; 502 503 for (i = 0; i < ha->num_rss; i++) { 504 fp = &ha->fp_array[i]; 505 506 if (fp->fp_taskqueue != NULL) { 507 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 508 } 509 } 510 return; 511 } 512 513 /* 514 * Name: qlnx_pci_attach 515 * Function: attaches the device to the operating system 516 */ 517 static int 518 qlnx_pci_attach(device_t dev) 519 { 520 qlnx_host_t *ha = NULL; 521 uint32_t rsrc_len_reg = 0; 522 uint32_t rsrc_len_dbells = 0; 523 uint32_t rsrc_len_msix = 0; 524 int i; 525 uint32_t mfw_ver; 526 527 if ((ha = device_get_softc(dev)) == NULL) { 528 device_printf(dev, "cannot get softc\n"); 529 return (ENOMEM); 530 } 531 532 memset(ha, 0, sizeof (qlnx_host_t)); 533 534 if (qlnx_valid_device(dev) != 0) { 535 device_printf(dev, "device is not valid device\n"); 536 return (ENXIO); 537 } 538 ha->pci_func = pci_get_function(dev); 539 540 ha->pci_dev = dev; 541 542 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 543 mtx_init(&ha->tx_lock, "qlnx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF); 544 545 ha->flags.lock_init = 1; 546 547 pci_enable_busmaster(dev); 548 549 /* 550 * map the PCI BARs 551 */ 552 553 ha->reg_rid = PCIR_BAR(0); 554 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 555 RF_ACTIVE); 556 557 if (ha->pci_reg == NULL) { 558 device_printf(dev, "unable to map BAR0\n"); 559 goto qlnx_pci_attach_err; 560 } 561 562 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 563 ha->reg_rid); 564 565 ha->dbells_rid = PCIR_BAR(2); 566 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 567 &ha->dbells_rid, RF_ACTIVE); 568 569 if (ha->pci_dbells == NULL) { 570 device_printf(dev, "unable to map BAR1\n"); 571 goto qlnx_pci_attach_err; 572 } 573 574 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 575 ha->dbells_rid); 576 577 ha->dbells_phys_addr = (uint64_t) 578 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);; 579 ha->dbells_size = rsrc_len_dbells; 580 581 ha->msix_rid = PCIR_BAR(4); 582 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 583 &ha->msix_rid, RF_ACTIVE); 584 585 if (ha->msix_bar == NULL) { 586 device_printf(dev, "unable to map BAR2\n"); 587 goto qlnx_pci_attach_err; 588 } 589 590 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 591 ha->msix_rid); 592 /* 593 * allocate dma tags 594 */ 595 596 if (qlnx_alloc_parent_dma_tag(ha)) 597 goto qlnx_pci_attach_err; 598 599 if (qlnx_alloc_tx_dma_tag(ha)) 600 goto qlnx_pci_attach_err; 601 602 if (qlnx_alloc_rx_dma_tag(ha)) 603 goto qlnx_pci_attach_err; 604 605 606 if (qlnx_init_hw(ha) != 0) 607 goto qlnx_pci_attach_err; 608 609 /* 610 * Allocate MSI-x vectors 611 */ 612 ha->num_rss = QLNX_MAX_RSS; 613 ha->num_tc = QLNX_MAX_TC; 614 615 ha->msix_count = pci_msix_count(dev); 616 617 if (ha->msix_count > (mp_ncpus + ha->cdev.num_hwfns)) 618 ha->msix_count = mp_ncpus + ha->cdev.num_hwfns; 619 620 if (!ha->msix_count || 621 (ha->msix_count < (ha->cdev.num_hwfns + 1 ))) { 622 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 623 ha->msix_count); 624 goto qlnx_pci_attach_err; 625 } 626 627 if (ha->msix_count > (ha->num_rss + ha->cdev.num_hwfns )) 628 ha->msix_count = ha->num_rss + ha->cdev.num_hwfns; 629 else 630 ha->num_rss = ha->msix_count - ha->cdev.num_hwfns; 631 632 QL_DPRINT1(ha, (dev, "%s:\n\t\t\tpci_reg [%p, 0x%08x 0x%08x]" 633 "\n\t\t\tdbells [%p, 0x%08x 0x%08x]" 634 "\n\t\t\tmsix [%p, 0x%08x 0x%08x 0x%x 0x%x]" 635 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n", 636 __func__, ha->pci_reg, rsrc_len_reg, 637 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 638 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 639 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc)); 640 641 if (pci_alloc_msix(dev, &ha->msix_count)) { 642 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__, 643 ha->msix_count); 644 ha->msix_count = 0; 645 goto qlnx_pci_attach_err; 646 } 647 648 /* 649 * Initialize slow path interrupt and task queue 650 */ 651 if (qlnx_create_sp_taskqueues(ha) != 0) 652 goto qlnx_pci_attach_err; 653 654 for (i = 0; i < ha->cdev.num_hwfns; i++) { 655 656 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 657 658 ha->sp_irq_rid[i] = i + 1; 659 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, 660 &ha->sp_irq_rid[i], 661 (RF_ACTIVE | RF_SHAREABLE)); 662 if (ha->sp_irq[i] == NULL) { 663 device_printf(dev, 664 "could not allocate mbx interrupt\n"); 665 goto qlnx_pci_attach_err; 666 } 667 668 if (bus_setup_intr(dev, ha->sp_irq[i], 669 (INTR_TYPE_NET | INTR_MPSAFE), NULL, 670 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) { 671 device_printf(dev, 672 "could not setup slow path interrupt\n"); 673 goto qlnx_pci_attach_err; 674 } 675 676 QL_DPRINT1(ha, (dev, "%s: p_hwfn [%p] sp_irq_rid %d" 677 " sp_irq %p sp_handle %p\n", __func__, p_hwfn, 678 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i])); 679 680 } 681 682 /* 683 * initialize fast path interrupt 684 */ 685 if (qlnx_create_fp_taskqueues(ha) != 0) 686 goto qlnx_pci_attach_err; 687 688 for (i = 0; i < ha->num_rss; i++) { 689 ha->irq_vec[i].rss_idx = i; 690 ha->irq_vec[i].ha = ha; 691 ha->irq_vec[i].irq_rid = (1 + ha->cdev.num_hwfns) + i; 692 693 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 694 &ha->irq_vec[i].irq_rid, 695 (RF_ACTIVE | RF_SHAREABLE)); 696 697 if (ha->irq_vec[i].irq == NULL) { 698 device_printf(dev, 699 "could not allocate interrupt[%d]\n", i); 700 goto qlnx_pci_attach_err; 701 } 702 703 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) { 704 device_printf(dev, "could not allocate tx_br[%d]\n", i); 705 goto qlnx_pci_attach_err; 706 707 } 708 } 709 710 callout_init(&ha->qlnx_callout, 1); 711 ha->flags.callout_init = 1; 712 713 for (i = 0; i < ha->cdev.num_hwfns; i++) { 714 715 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0) 716 goto qlnx_pci_attach_err; 717 if (ha->grcdump_size[i] == 0) 718 goto qlnx_pci_attach_err; 719 720 ha->grcdump_size[i] = ha->grcdump_size[i] << 2; 721 QL_DPRINT1(ha, (dev, "grcdump_size[%d] = 0x%08x\n", 722 i, ha->grcdump_size[i])); 723 724 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]); 725 if (ha->grcdump[i] == NULL) { 726 device_printf(dev, "grcdump alloc[%d] failed\n", i); 727 goto qlnx_pci_attach_err; 728 } 729 730 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0) 731 goto qlnx_pci_attach_err; 732 if (ha->idle_chk_size[i] == 0) 733 goto qlnx_pci_attach_err; 734 735 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2; 736 QL_DPRINT1(ha, (dev, "idle_chk_size[%d] = 0x%08x\n", 737 i, ha->idle_chk_size[i])); 738 739 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]); 740 741 if (ha->idle_chk[i] == NULL) { 742 device_printf(dev, "idle_chk alloc failed\n"); 743 goto qlnx_pci_attach_err; 744 } 745 } 746 747 if (qlnx_slowpath_start(ha) != 0) { 748 749 qlnx_mdelay(__func__, 1000); 750 qlnx_trigger_dump(ha); 751 752 goto qlnx_pci_attach_err0; 753 } else 754 ha->flags.slowpath_start = 1; 755 756 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) { 757 qlnx_mdelay(__func__, 1000); 758 qlnx_trigger_dump(ha); 759 760 goto qlnx_pci_attach_err0; 761 } 762 763 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) { 764 qlnx_mdelay(__func__, 1000); 765 qlnx_trigger_dump(ha); 766 767 goto qlnx_pci_attach_err0; 768 } 769 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d", 770 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF), 771 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF)); 772 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d", 773 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, 774 FW_ENGINEERING_VERSION); 775 776 QL_DPRINT1(ha, (dev, "%s: STORM_FW version %s MFW version %s\n", 777 __func__, ha->stormfw_ver, ha->mfw_ver)); 778 779 qlnx_init_ifnet(dev, ha); 780 781 /* 782 * add sysctls 783 */ 784 qlnx_add_sysctls(ha); 785 786 qlnx_pci_attach_err0: 787 /* 788 * create ioctl device interface 789 */ 790 if (qlnx_make_cdev(ha)) { 791 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 792 goto qlnx_pci_attach_err; 793 } 794 795 QL_DPRINT2(ha, (dev, "%s: success\n", __func__)); 796 797 return (0); 798 799 qlnx_pci_attach_err: 800 801 qlnx_release(ha); 802 803 return (ENXIO); 804 } 805 806 /* 807 * Name: qlnx_pci_detach 808 * Function: Unhooks the device from the operating system 809 */ 810 static int 811 qlnx_pci_detach(device_t dev) 812 { 813 qlnx_host_t *ha = NULL; 814 815 if ((ha = device_get_softc(dev)) == NULL) { 816 device_printf(dev, "cannot get softc\n"); 817 return (ENOMEM); 818 } 819 820 QLNX_LOCK(ha); 821 qlnx_stop(ha); 822 QLNX_UNLOCK(ha); 823 824 qlnx_release(ha); 825 826 return (0); 827 } 828 829 static int 830 qlnx_init_hw(qlnx_host_t *ha) 831 { 832 int rval = 0; 833 struct ecore_hw_prepare_params params; 834 835 ecore_init_struct(&ha->cdev); 836 837 /* ha->dp_module = ECORE_MSG_PROBE | 838 ECORE_MSG_INTR | 839 ECORE_MSG_SP | 840 ECORE_MSG_LINK | 841 ECORE_MSG_SPQ | 842 ECORE_MSG_RDMA; 843 ha->dp_level = ECORE_LEVEL_VERBOSE;*/ 844 ha->dp_level = ECORE_LEVEL_NOTICE; 845 846 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev); 847 848 ha->cdev.regview = ha->pci_reg; 849 ha->cdev.doorbells = ha->pci_dbells; 850 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 851 ha->cdev.db_size = ha->dbells_size; 852 853 bzero(¶ms, sizeof (struct ecore_hw_prepare_params)); 854 855 ha->personality = ECORE_PCI_DEFAULT; 856 857 params.personality = ha->personality; 858 859 params.drv_resc_alloc = false; 860 params.chk_reg_fifo = false; 861 params.initiate_pf_flr = true; 862 params.epoch = 0; 863 864 ecore_hw_prepare(&ha->cdev, ¶ms); 865 866 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str); 867 868 return (rval); 869 } 870 871 static void 872 qlnx_release(qlnx_host_t *ha) 873 { 874 device_t dev; 875 int i; 876 877 dev = ha->pci_dev; 878 879 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 880 881 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) { 882 if (ha->idle_chk[i] != NULL) { 883 free(ha->idle_chk[i], M_QLNXBUF); 884 ha->idle_chk[i] = NULL; 885 } 886 887 if (ha->grcdump[i] != NULL) { 888 free(ha->grcdump[i], M_QLNXBUF); 889 ha->grcdump[i] = NULL; 890 } 891 } 892 893 if (ha->flags.callout_init) 894 callout_drain(&ha->qlnx_callout); 895 896 if (ha->flags.slowpath_start) { 897 qlnx_slowpath_stop(ha); 898 } 899 900 ecore_hw_remove(&ha->cdev); 901 902 qlnx_del_cdev(ha); 903 904 if (ha->ifp != NULL) 905 ether_ifdetach(ha->ifp); 906 907 qlnx_free_tx_dma_tag(ha); 908 909 qlnx_free_rx_dma_tag(ha); 910 911 qlnx_free_parent_dma_tag(ha); 912 913 for (i = 0; i < ha->num_rss; i++) { 914 struct qlnx_fastpath *fp = &ha->fp_array[i]; 915 916 if (ha->irq_vec[i].handle) { 917 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 918 ha->irq_vec[i].handle); 919 } 920 921 if (ha->irq_vec[i].irq) { 922 (void)bus_release_resource(dev, SYS_RES_IRQ, 923 ha->irq_vec[i].irq_rid, 924 ha->irq_vec[i].irq); 925 } 926 927 qlnx_free_tx_br(ha, fp); 928 } 929 qlnx_destroy_fp_taskqueues(ha); 930 931 for (i = 0; i < ha->cdev.num_hwfns; i++) { 932 if (ha->sp_handle[i]) 933 (void)bus_teardown_intr(dev, ha->sp_irq[i], 934 ha->sp_handle[i]); 935 936 if (ha->sp_irq[i]) 937 (void) bus_release_resource(dev, SYS_RES_IRQ, 938 ha->sp_irq_rid[i], ha->sp_irq[i]); 939 } 940 941 qlnx_destroy_sp_taskqueues(ha); 942 943 if (ha->msix_count) 944 pci_release_msi(dev); 945 946 if (ha->flags.lock_init) { 947 mtx_destroy(&ha->tx_lock); 948 mtx_destroy(&ha->hw_lock); 949 } 950 951 if (ha->pci_reg) 952 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 953 ha->pci_reg); 954 955 if (ha->pci_dbells) 956 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid, 957 ha->pci_dbells); 958 959 if (ha->msix_bar) 960 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid, 961 ha->msix_bar); 962 963 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 964 return; 965 } 966 967 static void 968 qlnx_trigger_dump(qlnx_host_t *ha) 969 { 970 int i; 971 972 if (ha->ifp != NULL) 973 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 974 975 QL_DPRINT2(ha, (ha->pci_dev, "%s: start\n", __func__)); 976 977 for (i = 0; i < ha->cdev.num_hwfns; i++) { 978 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i); 979 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i); 980 } 981 982 QL_DPRINT2(ha, (ha->pci_dev, "%s: end\n", __func__)); 983 984 return; 985 } 986 987 static int 988 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS) 989 { 990 int err, ret = 0; 991 qlnx_host_t *ha; 992 993 err = sysctl_handle_int(oidp, &ret, 0, req); 994 995 if (err || !req->newptr) 996 return (err); 997 998 if (ret == 1) { 999 ha = (qlnx_host_t *)arg1; 1000 qlnx_trigger_dump(ha); 1001 } 1002 return (err); 1003 } 1004 1005 static int 1006 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS) 1007 { 1008 int err, i, ret = 0, usecs = 0; 1009 qlnx_host_t *ha; 1010 struct ecore_hwfn *p_hwfn; 1011 struct qlnx_fastpath *fp; 1012 1013 err = sysctl_handle_int(oidp, &usecs, 0, req); 1014 1015 if (err || !req->newptr || !usecs || (usecs > 255)) 1016 return (err); 1017 1018 ha = (qlnx_host_t *)arg1; 1019 1020 for (i = 0; i < ha->num_rss; i++) { 1021 1022 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1023 1024 fp = &ha->fp_array[i]; 1025 1026 if (fp->txq[0]->handle != NULL) { 1027 ret = ecore_set_queue_coalesce(p_hwfn, 0, 1028 (uint16_t)usecs, fp->txq[0]->handle); 1029 } 1030 } 1031 1032 if (!ret) 1033 ha->tx_coalesce_usecs = (uint8_t)usecs; 1034 1035 return (err); 1036 } 1037 1038 static int 1039 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS) 1040 { 1041 int err, i, ret = 0, usecs = 0; 1042 qlnx_host_t *ha; 1043 struct ecore_hwfn *p_hwfn; 1044 struct qlnx_fastpath *fp; 1045 1046 err = sysctl_handle_int(oidp, &usecs, 0, req); 1047 1048 if (err || !req->newptr || !usecs || (usecs > 255)) 1049 return (err); 1050 1051 ha = (qlnx_host_t *)arg1; 1052 1053 for (i = 0; i < ha->num_rss; i++) { 1054 1055 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1056 1057 fp = &ha->fp_array[i]; 1058 1059 if (fp->rxq->handle != NULL) { 1060 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs, 1061 0, fp->rxq->handle); 1062 } 1063 } 1064 1065 if (!ret) 1066 ha->rx_coalesce_usecs = (uint8_t)usecs; 1067 1068 return (err); 1069 } 1070 1071 static void 1072 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha) 1073 { 1074 struct sysctl_ctx_list *ctx; 1075 struct sysctl_oid_list *children; 1076 struct sysctl_oid *ctx_oid; 1077 1078 ctx = device_get_sysctl_ctx(ha->pci_dev); 1079 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1080 1081 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat", 1082 CTLFLAG_RD, NULL, "spstat"); 1083 children = SYSCTL_CHILDREN(ctx_oid); 1084 1085 SYSCTL_ADD_QUAD(ctx, children, 1086 OID_AUTO, "sp_interrupts", 1087 CTLFLAG_RD, &ha->sp_interrupts, 1088 "No. of slowpath interrupts"); 1089 1090 return; 1091 } 1092 1093 static void 1094 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha) 1095 { 1096 struct sysctl_ctx_list *ctx; 1097 struct sysctl_oid_list *children; 1098 struct sysctl_oid_list *node_children; 1099 struct sysctl_oid *ctx_oid; 1100 int i, j; 1101 uint8_t name_str[16]; 1102 1103 ctx = device_get_sysctl_ctx(ha->pci_dev); 1104 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1105 1106 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat", 1107 CTLFLAG_RD, NULL, "fpstat"); 1108 children = SYSCTL_CHILDREN(ctx_oid); 1109 1110 for (i = 0; i < ha->num_rss; i++) { 1111 1112 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1113 snprintf(name_str, sizeof(name_str), "%d", i); 1114 1115 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 1116 CTLFLAG_RD, NULL, name_str); 1117 node_children = SYSCTL_CHILDREN(ctx_oid); 1118 1119 /* Tx Related */ 1120 1121 SYSCTL_ADD_QUAD(ctx, node_children, 1122 OID_AUTO, "tx_pkts_processed", 1123 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed, 1124 "No. of packets processed for transmission"); 1125 1126 SYSCTL_ADD_QUAD(ctx, node_children, 1127 OID_AUTO, "tx_pkts_freed", 1128 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed, 1129 "No. of freed packets"); 1130 1131 SYSCTL_ADD_QUAD(ctx, node_children, 1132 OID_AUTO, "tx_pkts_transmitted", 1133 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted, 1134 "No. of transmitted packets"); 1135 1136 SYSCTL_ADD_QUAD(ctx, node_children, 1137 OID_AUTO, "tx_pkts_completed", 1138 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed, 1139 "No. of transmit completions"); 1140 1141 SYSCTL_ADD_QUAD(ctx, node_children, 1142 OID_AUTO, "tx_lso_wnd_min_len", 1143 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len, 1144 "tx_lso_wnd_min_len"); 1145 1146 SYSCTL_ADD_QUAD(ctx, node_children, 1147 OID_AUTO, "tx_defrag", 1148 CTLFLAG_RD, &ha->fp_array[i].tx_defrag, 1149 "tx_defrag"); 1150 1151 SYSCTL_ADD_QUAD(ctx, node_children, 1152 OID_AUTO, "tx_nsegs_gt_elem_left", 1153 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left, 1154 "tx_nsegs_gt_elem_left"); 1155 1156 SYSCTL_ADD_UINT(ctx, node_children, 1157 OID_AUTO, "tx_tso_max_nsegs", 1158 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs, 1159 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs"); 1160 1161 SYSCTL_ADD_UINT(ctx, node_children, 1162 OID_AUTO, "tx_tso_min_nsegs", 1163 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs, 1164 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs"); 1165 1166 SYSCTL_ADD_UINT(ctx, node_children, 1167 OID_AUTO, "tx_tso_max_pkt_len", 1168 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len, 1169 ha->fp_array[i].tx_tso_max_pkt_len, 1170 "tx_tso_max_pkt_len"); 1171 1172 SYSCTL_ADD_UINT(ctx, node_children, 1173 OID_AUTO, "tx_tso_min_pkt_len", 1174 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len, 1175 ha->fp_array[i].tx_tso_min_pkt_len, 1176 "tx_tso_min_pkt_len"); 1177 1178 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) { 1179 1180 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1181 snprintf(name_str, sizeof(name_str), 1182 "tx_pkts_nseg_%02d", (j+1)); 1183 1184 SYSCTL_ADD_QUAD(ctx, node_children, 1185 OID_AUTO, name_str, CTLFLAG_RD, 1186 &ha->fp_array[i].tx_pkts[j], name_str); 1187 } 1188 1189 SYSCTL_ADD_QUAD(ctx, node_children, 1190 OID_AUTO, "err_tx_nsegs_gt_elem_left", 1191 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left, 1192 "err_tx_nsegs_gt_elem_left"); 1193 1194 SYSCTL_ADD_QUAD(ctx, node_children, 1195 OID_AUTO, "err_tx_dmamap_create", 1196 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create, 1197 "err_tx_dmamap_create"); 1198 1199 SYSCTL_ADD_QUAD(ctx, node_children, 1200 OID_AUTO, "err_tx_defrag_dmamap_load", 1201 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load, 1202 "err_tx_defrag_dmamap_load"); 1203 1204 SYSCTL_ADD_QUAD(ctx, node_children, 1205 OID_AUTO, "err_tx_non_tso_max_seg", 1206 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg, 1207 "err_tx_non_tso_max_seg"); 1208 1209 SYSCTL_ADD_QUAD(ctx, node_children, 1210 OID_AUTO, "err_tx_dmamap_load", 1211 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load, 1212 "err_tx_dmamap_load"); 1213 1214 SYSCTL_ADD_QUAD(ctx, node_children, 1215 OID_AUTO, "err_tx_defrag", 1216 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag, 1217 "err_tx_defrag"); 1218 1219 SYSCTL_ADD_QUAD(ctx, node_children, 1220 OID_AUTO, "err_tx_free_pkt_null", 1221 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null, 1222 "err_tx_free_pkt_null"); 1223 1224 SYSCTL_ADD_QUAD(ctx, node_children, 1225 OID_AUTO, "err_tx_cons_idx_conflict", 1226 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict, 1227 "err_tx_cons_idx_conflict"); 1228 1229 #ifdef QLNX_TRACE_LRO_CNT 1230 SYSCTL_ADD_QUAD(ctx, node_children, 1231 OID_AUTO, "lro_cnt_64", 1232 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64, 1233 "lro_cnt_64"); 1234 1235 SYSCTL_ADD_QUAD(ctx, node_children, 1236 OID_AUTO, "lro_cnt_128", 1237 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128, 1238 "lro_cnt_128"); 1239 1240 SYSCTL_ADD_QUAD(ctx, node_children, 1241 OID_AUTO, "lro_cnt_256", 1242 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256, 1243 "lro_cnt_256"); 1244 1245 SYSCTL_ADD_QUAD(ctx, node_children, 1246 OID_AUTO, "lro_cnt_512", 1247 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512, 1248 "lro_cnt_512"); 1249 1250 SYSCTL_ADD_QUAD(ctx, node_children, 1251 OID_AUTO, "lro_cnt_1024", 1252 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024, 1253 "lro_cnt_1024"); 1254 #endif /* #ifdef QLNX_TRACE_LRO_CNT */ 1255 1256 /* Rx Related */ 1257 1258 SYSCTL_ADD_QUAD(ctx, node_children, 1259 OID_AUTO, "rx_pkts", 1260 CTLFLAG_RD, &ha->fp_array[i].rx_pkts, 1261 "No. of received packets"); 1262 1263 SYSCTL_ADD_QUAD(ctx, node_children, 1264 OID_AUTO, "tpa_start", 1265 CTLFLAG_RD, &ha->fp_array[i].tpa_start, 1266 "No. of tpa_start packets"); 1267 1268 SYSCTL_ADD_QUAD(ctx, node_children, 1269 OID_AUTO, "tpa_cont", 1270 CTLFLAG_RD, &ha->fp_array[i].tpa_cont, 1271 "No. of tpa_cont packets"); 1272 1273 SYSCTL_ADD_QUAD(ctx, node_children, 1274 OID_AUTO, "tpa_end", 1275 CTLFLAG_RD, &ha->fp_array[i].tpa_end, 1276 "No. of tpa_end packets"); 1277 1278 SYSCTL_ADD_QUAD(ctx, node_children, 1279 OID_AUTO, "err_m_getcl", 1280 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl, 1281 "err_m_getcl"); 1282 1283 SYSCTL_ADD_QUAD(ctx, node_children, 1284 OID_AUTO, "err_m_getjcl", 1285 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl, 1286 "err_m_getjcl"); 1287 1288 SYSCTL_ADD_QUAD(ctx, node_children, 1289 OID_AUTO, "err_rx_hw_errors", 1290 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors, 1291 "err_rx_hw_errors"); 1292 1293 SYSCTL_ADD_QUAD(ctx, node_children, 1294 OID_AUTO, "err_rx_alloc_errors", 1295 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors, 1296 "err_rx_alloc_errors"); 1297 } 1298 1299 return; 1300 } 1301 1302 static void 1303 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha) 1304 { 1305 struct sysctl_ctx_list *ctx; 1306 struct sysctl_oid_list *children; 1307 struct sysctl_oid *ctx_oid; 1308 1309 ctx = device_get_sysctl_ctx(ha->pci_dev); 1310 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1311 1312 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat", 1313 CTLFLAG_RD, NULL, "hwstat"); 1314 children = SYSCTL_CHILDREN(ctx_oid); 1315 1316 SYSCTL_ADD_QUAD(ctx, children, 1317 OID_AUTO, "no_buff_discards", 1318 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards, 1319 "No. of packets discarded due to lack of buffer"); 1320 1321 SYSCTL_ADD_QUAD(ctx, children, 1322 OID_AUTO, "packet_too_big_discard", 1323 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard, 1324 "No. of packets discarded because packet was too big"); 1325 1326 SYSCTL_ADD_QUAD(ctx, children, 1327 OID_AUTO, "ttl0_discard", 1328 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard, 1329 "ttl0_discard"); 1330 1331 SYSCTL_ADD_QUAD(ctx, children, 1332 OID_AUTO, "rx_ucast_bytes", 1333 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes, 1334 "rx_ucast_bytes"); 1335 1336 SYSCTL_ADD_QUAD(ctx, children, 1337 OID_AUTO, "rx_mcast_bytes", 1338 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes, 1339 "rx_mcast_bytes"); 1340 1341 SYSCTL_ADD_QUAD(ctx, children, 1342 OID_AUTO, "rx_bcast_bytes", 1343 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes, 1344 "rx_bcast_bytes"); 1345 1346 SYSCTL_ADD_QUAD(ctx, children, 1347 OID_AUTO, "rx_ucast_pkts", 1348 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts, 1349 "rx_ucast_pkts"); 1350 1351 SYSCTL_ADD_QUAD(ctx, children, 1352 OID_AUTO, "rx_mcast_pkts", 1353 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts, 1354 "rx_mcast_pkts"); 1355 1356 SYSCTL_ADD_QUAD(ctx, children, 1357 OID_AUTO, "rx_bcast_pkts", 1358 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts, 1359 "rx_bcast_pkts"); 1360 1361 SYSCTL_ADD_QUAD(ctx, children, 1362 OID_AUTO, "mftag_filter_discards", 1363 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards, 1364 "mftag_filter_discards"); 1365 1366 SYSCTL_ADD_QUAD(ctx, children, 1367 OID_AUTO, "mac_filter_discards", 1368 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards, 1369 "mac_filter_discards"); 1370 1371 SYSCTL_ADD_QUAD(ctx, children, 1372 OID_AUTO, "tx_ucast_bytes", 1373 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes, 1374 "tx_ucast_bytes"); 1375 1376 SYSCTL_ADD_QUAD(ctx, children, 1377 OID_AUTO, "tx_mcast_bytes", 1378 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes, 1379 "tx_mcast_bytes"); 1380 1381 SYSCTL_ADD_QUAD(ctx, children, 1382 OID_AUTO, "tx_bcast_bytes", 1383 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes, 1384 "tx_bcast_bytes"); 1385 1386 SYSCTL_ADD_QUAD(ctx, children, 1387 OID_AUTO, "tx_ucast_pkts", 1388 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts, 1389 "tx_ucast_pkts"); 1390 1391 SYSCTL_ADD_QUAD(ctx, children, 1392 OID_AUTO, "tx_mcast_pkts", 1393 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts, 1394 "tx_mcast_pkts"); 1395 1396 SYSCTL_ADD_QUAD(ctx, children, 1397 OID_AUTO, "tx_bcast_pkts", 1398 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts, 1399 "tx_bcast_pkts"); 1400 1401 SYSCTL_ADD_QUAD(ctx, children, 1402 OID_AUTO, "tx_err_drop_pkts", 1403 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts, 1404 "tx_err_drop_pkts"); 1405 1406 SYSCTL_ADD_QUAD(ctx, children, 1407 OID_AUTO, "tpa_coalesced_pkts", 1408 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts, 1409 "tpa_coalesced_pkts"); 1410 1411 SYSCTL_ADD_QUAD(ctx, children, 1412 OID_AUTO, "tpa_coalesced_events", 1413 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events, 1414 "tpa_coalesced_events"); 1415 1416 SYSCTL_ADD_QUAD(ctx, children, 1417 OID_AUTO, "tpa_aborts_num", 1418 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num, 1419 "tpa_aborts_num"); 1420 1421 SYSCTL_ADD_QUAD(ctx, children, 1422 OID_AUTO, "tpa_not_coalesced_pkts", 1423 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts, 1424 "tpa_not_coalesced_pkts"); 1425 1426 SYSCTL_ADD_QUAD(ctx, children, 1427 OID_AUTO, "tpa_coalesced_bytes", 1428 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes, 1429 "tpa_coalesced_bytes"); 1430 1431 SYSCTL_ADD_QUAD(ctx, children, 1432 OID_AUTO, "rx_64_byte_packets", 1433 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets, 1434 "rx_64_byte_packets"); 1435 1436 SYSCTL_ADD_QUAD(ctx, children, 1437 OID_AUTO, "rx_65_to_127_byte_packets", 1438 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets, 1439 "rx_65_to_127_byte_packets"); 1440 1441 SYSCTL_ADD_QUAD(ctx, children, 1442 OID_AUTO, "rx_128_to_255_byte_packets", 1443 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets, 1444 "rx_128_to_255_byte_packets"); 1445 1446 SYSCTL_ADD_QUAD(ctx, children, 1447 OID_AUTO, "rx_256_to_511_byte_packets", 1448 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets, 1449 "rx_256_to_511_byte_packets"); 1450 1451 SYSCTL_ADD_QUAD(ctx, children, 1452 OID_AUTO, "rx_512_to_1023_byte_packets", 1453 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets, 1454 "rx_512_to_1023_byte_packets"); 1455 1456 SYSCTL_ADD_QUAD(ctx, children, 1457 OID_AUTO, "rx_1024_to_1518_byte_packets", 1458 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets, 1459 "rx_1024_to_1518_byte_packets"); 1460 1461 SYSCTL_ADD_QUAD(ctx, children, 1462 OID_AUTO, "rx_1519_to_1522_byte_packets", 1463 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets, 1464 "rx_1519_to_1522_byte_packets"); 1465 1466 SYSCTL_ADD_QUAD(ctx, children, 1467 OID_AUTO, "rx_1523_to_2047_byte_packets", 1468 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets, 1469 "rx_1523_to_2047_byte_packets"); 1470 1471 SYSCTL_ADD_QUAD(ctx, children, 1472 OID_AUTO, "rx_2048_to_4095_byte_packets", 1473 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets, 1474 "rx_2048_to_4095_byte_packets"); 1475 1476 SYSCTL_ADD_QUAD(ctx, children, 1477 OID_AUTO, "rx_4096_to_9216_byte_packets", 1478 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets, 1479 "rx_4096_to_9216_byte_packets"); 1480 1481 SYSCTL_ADD_QUAD(ctx, children, 1482 OID_AUTO, "rx_9217_to_16383_byte_packets", 1483 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets, 1484 "rx_9217_to_16383_byte_packets"); 1485 1486 SYSCTL_ADD_QUAD(ctx, children, 1487 OID_AUTO, "rx_crc_errors", 1488 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors, 1489 "rx_crc_errors"); 1490 1491 SYSCTL_ADD_QUAD(ctx, children, 1492 OID_AUTO, "rx_mac_crtl_frames", 1493 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames, 1494 "rx_mac_crtl_frames"); 1495 1496 SYSCTL_ADD_QUAD(ctx, children, 1497 OID_AUTO, "rx_pause_frames", 1498 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames, 1499 "rx_pause_frames"); 1500 1501 SYSCTL_ADD_QUAD(ctx, children, 1502 OID_AUTO, "rx_pfc_frames", 1503 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames, 1504 "rx_pfc_frames"); 1505 1506 SYSCTL_ADD_QUAD(ctx, children, 1507 OID_AUTO, "rx_align_errors", 1508 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors, 1509 "rx_align_errors"); 1510 1511 SYSCTL_ADD_QUAD(ctx, children, 1512 OID_AUTO, "rx_carrier_errors", 1513 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors, 1514 "rx_carrier_errors"); 1515 1516 SYSCTL_ADD_QUAD(ctx, children, 1517 OID_AUTO, "rx_oversize_packets", 1518 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets, 1519 "rx_oversize_packets"); 1520 1521 SYSCTL_ADD_QUAD(ctx, children, 1522 OID_AUTO, "rx_jabbers", 1523 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers, 1524 "rx_jabbers"); 1525 1526 SYSCTL_ADD_QUAD(ctx, children, 1527 OID_AUTO, "rx_undersize_packets", 1528 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets, 1529 "rx_undersize_packets"); 1530 1531 SYSCTL_ADD_QUAD(ctx, children, 1532 OID_AUTO, "rx_fragments", 1533 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments, 1534 "rx_fragments"); 1535 1536 SYSCTL_ADD_QUAD(ctx, children, 1537 OID_AUTO, "tx_64_byte_packets", 1538 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets, 1539 "tx_64_byte_packets"); 1540 1541 SYSCTL_ADD_QUAD(ctx, children, 1542 OID_AUTO, "tx_65_to_127_byte_packets", 1543 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets, 1544 "tx_65_to_127_byte_packets"); 1545 1546 SYSCTL_ADD_QUAD(ctx, children, 1547 OID_AUTO, "tx_128_to_255_byte_packets", 1548 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets, 1549 "tx_128_to_255_byte_packets"); 1550 1551 SYSCTL_ADD_QUAD(ctx, children, 1552 OID_AUTO, "tx_256_to_511_byte_packets", 1553 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets, 1554 "tx_256_to_511_byte_packets"); 1555 1556 SYSCTL_ADD_QUAD(ctx, children, 1557 OID_AUTO, "tx_512_to_1023_byte_packets", 1558 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets, 1559 "tx_512_to_1023_byte_packets"); 1560 1561 SYSCTL_ADD_QUAD(ctx, children, 1562 OID_AUTO, "tx_1024_to_1518_byte_packets", 1563 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets, 1564 "tx_1024_to_1518_byte_packets"); 1565 1566 SYSCTL_ADD_QUAD(ctx, children, 1567 OID_AUTO, "tx_1519_to_2047_byte_packets", 1568 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets, 1569 "tx_1519_to_2047_byte_packets"); 1570 1571 SYSCTL_ADD_QUAD(ctx, children, 1572 OID_AUTO, "tx_2048_to_4095_byte_packets", 1573 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets, 1574 "tx_2048_to_4095_byte_packets"); 1575 1576 SYSCTL_ADD_QUAD(ctx, children, 1577 OID_AUTO, "tx_4096_to_9216_byte_packets", 1578 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets, 1579 "tx_4096_to_9216_byte_packets"); 1580 1581 SYSCTL_ADD_QUAD(ctx, children, 1582 OID_AUTO, "tx_9217_to_16383_byte_packets", 1583 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets, 1584 "tx_9217_to_16383_byte_packets"); 1585 1586 SYSCTL_ADD_QUAD(ctx, children, 1587 OID_AUTO, "tx_pause_frames", 1588 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames, 1589 "tx_pause_frames"); 1590 1591 SYSCTL_ADD_QUAD(ctx, children, 1592 OID_AUTO, "tx_pfc_frames", 1593 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames, 1594 "tx_pfc_frames"); 1595 1596 SYSCTL_ADD_QUAD(ctx, children, 1597 OID_AUTO, "tx_lpi_entry_count", 1598 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count, 1599 "tx_lpi_entry_count"); 1600 1601 SYSCTL_ADD_QUAD(ctx, children, 1602 OID_AUTO, "tx_total_collisions", 1603 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions, 1604 "tx_total_collisions"); 1605 1606 SYSCTL_ADD_QUAD(ctx, children, 1607 OID_AUTO, "brb_truncates", 1608 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates, 1609 "brb_truncates"); 1610 1611 SYSCTL_ADD_QUAD(ctx, children, 1612 OID_AUTO, "brb_discards", 1613 CTLFLAG_RD, &ha->hw_stats.common.brb_discards, 1614 "brb_discards"); 1615 1616 SYSCTL_ADD_QUAD(ctx, children, 1617 OID_AUTO, "rx_mac_bytes", 1618 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes, 1619 "rx_mac_bytes"); 1620 1621 SYSCTL_ADD_QUAD(ctx, children, 1622 OID_AUTO, "rx_mac_uc_packets", 1623 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets, 1624 "rx_mac_uc_packets"); 1625 1626 SYSCTL_ADD_QUAD(ctx, children, 1627 OID_AUTO, "rx_mac_mc_packets", 1628 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets, 1629 "rx_mac_mc_packets"); 1630 1631 SYSCTL_ADD_QUAD(ctx, children, 1632 OID_AUTO, "rx_mac_bc_packets", 1633 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets, 1634 "rx_mac_bc_packets"); 1635 1636 SYSCTL_ADD_QUAD(ctx, children, 1637 OID_AUTO, "rx_mac_frames_ok", 1638 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok, 1639 "rx_mac_frames_ok"); 1640 1641 SYSCTL_ADD_QUAD(ctx, children, 1642 OID_AUTO, "tx_mac_bytes", 1643 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes, 1644 "tx_mac_bytes"); 1645 1646 SYSCTL_ADD_QUAD(ctx, children, 1647 OID_AUTO, "tx_mac_uc_packets", 1648 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets, 1649 "tx_mac_uc_packets"); 1650 1651 SYSCTL_ADD_QUAD(ctx, children, 1652 OID_AUTO, "tx_mac_mc_packets", 1653 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets, 1654 "tx_mac_mc_packets"); 1655 1656 SYSCTL_ADD_QUAD(ctx, children, 1657 OID_AUTO, "tx_mac_bc_packets", 1658 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets, 1659 "tx_mac_bc_packets"); 1660 1661 SYSCTL_ADD_QUAD(ctx, children, 1662 OID_AUTO, "tx_mac_ctrl_frames", 1663 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames, 1664 "tx_mac_ctrl_frames"); 1665 return; 1666 } 1667 1668 static void 1669 qlnx_add_sysctls(qlnx_host_t *ha) 1670 { 1671 device_t dev = ha->pci_dev; 1672 struct sysctl_ctx_list *ctx; 1673 struct sysctl_oid_list *children; 1674 1675 ctx = device_get_sysctl_ctx(dev); 1676 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 1677 1678 qlnx_add_fp_stats_sysctls(ha); 1679 qlnx_add_sp_stats_sysctls(ha); 1680 qlnx_add_hw_stats_sysctls(ha); 1681 1682 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version", 1683 CTLFLAG_RD, qlnx_ver_str, 0, 1684 "Driver Version"); 1685 1686 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version", 1687 CTLFLAG_RD, ha->stormfw_ver, 0, 1688 "STORM Firmware Version"); 1689 1690 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version", 1691 CTLFLAG_RD, ha->mfw_ver, 0, 1692 "Management Firmware Version"); 1693 1694 SYSCTL_ADD_UINT(ctx, children, 1695 OID_AUTO, "personality", CTLFLAG_RD, 1696 &ha->personality, ha->personality, 1697 "\tpersonality = 0 => Ethernet Only\n" 1698 "\tpersonality = 3 => Ethernet and RoCE\n" 1699 "\tpersonality = 4 => Ethernet and iWARP\n" 1700 "\tpersonality = 6 => Default in Shared Memory\n"); 1701 1702 ha->dbg_level = 0; 1703 1704 SYSCTL_ADD_UINT(ctx, children, 1705 OID_AUTO, "debug", CTLFLAG_RW, 1706 &ha->dbg_level, ha->dbg_level, "Debug Level"); 1707 1708 ha->dp_level = 0; 1709 SYSCTL_ADD_UINT(ctx, children, 1710 OID_AUTO, "dp_level", CTLFLAG_RW, 1711 &ha->dp_level, ha->dp_level, "DP Level"); 1712 1713 ha->dp_module = 0; 1714 SYSCTL_ADD_UINT(ctx, children, 1715 OID_AUTO, "dp_module", CTLFLAG_RW, 1716 &ha->dp_module, ha->dp_module, "DP Module"); 1717 1718 ha->err_inject = 0; 1719 1720 SYSCTL_ADD_UINT(ctx, children, 1721 OID_AUTO, "err_inject", CTLFLAG_RW, 1722 &ha->err_inject, ha->err_inject, "Error Inject"); 1723 1724 ha->storm_stats_enable = 0; 1725 1726 SYSCTL_ADD_UINT(ctx, children, 1727 OID_AUTO, "storm_stats_enable", CTLFLAG_RW, 1728 &ha->storm_stats_enable, ha->storm_stats_enable, 1729 "Enable Storm Statistics Gathering"); 1730 1731 ha->storm_stats_index = 0; 1732 1733 SYSCTL_ADD_UINT(ctx, children, 1734 OID_AUTO, "storm_stats_index", CTLFLAG_RD, 1735 &ha->storm_stats_index, ha->storm_stats_index, 1736 "Enable Storm Statistics Gathering Current Index"); 1737 1738 ha->grcdump_taken = 0; 1739 SYSCTL_ADD_UINT(ctx, children, 1740 OID_AUTO, "grcdump_taken", CTLFLAG_RD, 1741 &ha->grcdump_taken, ha->grcdump_taken, "grcdump_taken"); 1742 1743 ha->idle_chk_taken = 0; 1744 SYSCTL_ADD_UINT(ctx, children, 1745 OID_AUTO, "idle_chk_taken", CTLFLAG_RD, 1746 &ha->idle_chk_taken, ha->idle_chk_taken, "idle_chk_taken"); 1747 1748 SYSCTL_ADD_UINT(ctx, children, 1749 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD, 1750 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs, 1751 "rx_coalesce_usecs"); 1752 1753 SYSCTL_ADD_UINT(ctx, children, 1754 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD, 1755 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs, 1756 "tx_coalesce_usecs"); 1757 1758 ha->rx_pkt_threshold = 32; 1759 SYSCTL_ADD_UINT(ctx, children, 1760 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW, 1761 &ha->rx_pkt_threshold, ha->rx_pkt_threshold, 1762 "No. of Rx Pkts to process at a time"); 1763 1764 ha->rx_jumbo_buf_eq_mtu = 0; 1765 SYSCTL_ADD_UINT(ctx, children, 1766 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW, 1767 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu, 1768 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n" 1769 "otherwise Rx Jumbo buffers are set to >= MTU size\n"); 1770 1771 SYSCTL_ADD_PROC(ctx, children, 1772 OID_AUTO, "trigger_dump", CTLTYPE_INT | CTLFLAG_RW, 1773 (void *)ha, 0, 1774 qlnx_trigger_dump_sysctl, "I", "trigger_dump"); 1775 1776 SYSCTL_ADD_PROC(ctx, children, 1777 OID_AUTO, "set_rx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW, 1778 (void *)ha, 0, 1779 qlnx_set_rx_coalesce, "I", 1780 "rx interrupt coalesce period microseconds"); 1781 1782 SYSCTL_ADD_PROC(ctx, children, 1783 OID_AUTO, "set_tx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW, 1784 (void *)ha, 0, 1785 qlnx_set_tx_coalesce, "I", 1786 "tx interrupt coalesce period microseconds"); 1787 1788 SYSCTL_ADD_QUAD(ctx, children, 1789 OID_AUTO, "err_illegal_intr", CTLFLAG_RD, 1790 &ha->err_illegal_intr, "err_illegal_intr"); 1791 1792 SYSCTL_ADD_QUAD(ctx, children, 1793 OID_AUTO, "err_fp_null", CTLFLAG_RD, 1794 &ha->err_fp_null, "err_fp_null"); 1795 1796 SYSCTL_ADD_QUAD(ctx, children, 1797 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD, 1798 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type"); 1799 return; 1800 } 1801 1802 1803 1804 /***************************************************************************** 1805 * Operating System Network Interface Functions 1806 *****************************************************************************/ 1807 1808 static void 1809 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) 1810 { 1811 uint16_t device_id; 1812 struct ifnet *ifp; 1813 1814 ifp = ha->ifp = if_alloc(IFT_ETHER); 1815 1816 if (ifp == NULL) 1817 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 1818 1819 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1820 1821 device_id = pci_get_device(ha->pci_dev); 1822 1823 #if __FreeBSD_version >= 1000000 1824 1825 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) 1826 ifp->if_baudrate = IF_Gbps(40); 1827 else if (device_id == QLOGIC_PCI_DEVICE_ID_1656) 1828 ifp->if_baudrate = IF_Gbps(25); 1829 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) 1830 ifp->if_baudrate = IF_Gbps(50); 1831 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) 1832 ifp->if_baudrate = IF_Gbps(100); 1833 1834 ifp->if_capabilities = IFCAP_LINKSTATE; 1835 #else 1836 ifp->if_mtu = ETHERMTU; 1837 ifp->if_baudrate = (1 * 1000 * 1000 *1000); 1838 1839 #endif /* #if __FreeBSD_version >= 1000000 */ 1840 1841 ifp->if_init = qlnx_init; 1842 ifp->if_softc = ha; 1843 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1844 ifp->if_ioctl = qlnx_ioctl; 1845 ifp->if_transmit = qlnx_transmit; 1846 ifp->if_qflush = qlnx_qflush; 1847 1848 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha)); 1849 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha); 1850 IFQ_SET_READY(&ifp->if_snd); 1851 1852 #if __FreeBSD_version >= 1100036 1853 if_setgetcounterfn(ifp, qlnx_get_counter); 1854 #endif 1855 1856 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1857 1858 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN); 1859 ether_ifattach(ifp, ha->primary_mac); 1860 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN); 1861 1862 ifp->if_capabilities = IFCAP_HWCSUM; 1863 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 1864 1865 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1866 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1867 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 1868 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1869 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 1870 ifp->if_capabilities |= IFCAP_TSO4; 1871 ifp->if_capabilities |= IFCAP_TSO6; 1872 ifp->if_capabilities |= IFCAP_LRO; 1873 1874 ifp->if_capenable = ifp->if_capabilities; 1875 1876 ifp->if_hwassist = CSUM_IP; 1877 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP; 1878 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; 1879 ifp->if_hwassist |= CSUM_TSO; 1880 1881 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1882 1883 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\ 1884 qlnx_media_status); 1885 1886 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) { 1887 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL); 1888 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL); 1889 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL); 1890 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1656) { 1891 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL); 1892 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL); 1893 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) { 1894 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL); 1895 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL); 1896 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) { 1897 ifmedia_add(&ha->media, 1898 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL); 1899 ifmedia_add(&ha->media, 1900 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL); 1901 ifmedia_add(&ha->media, 1902 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL); 1903 } 1904 1905 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL); 1906 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 1907 1908 1909 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 1910 1911 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 1912 1913 return; 1914 } 1915 1916 static void 1917 qlnx_init_locked(qlnx_host_t *ha) 1918 { 1919 struct ifnet *ifp = ha->ifp; 1920 1921 qlnx_stop(ha); 1922 1923 if (qlnx_load(ha) == 0) { 1924 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1925 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1926 } 1927 1928 return; 1929 } 1930 1931 static void 1932 qlnx_init(void *arg) 1933 { 1934 qlnx_host_t *ha; 1935 1936 ha = (qlnx_host_t *)arg; 1937 1938 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1939 1940 QLNX_LOCK(ha); 1941 qlnx_init_locked(ha); 1942 QLNX_UNLOCK(ha); 1943 1944 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1945 1946 return; 1947 } 1948 1949 static int 1950 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac) 1951 { 1952 struct ecore_filter_mcast *mcast; 1953 struct ecore_dev *cdev; 1954 int rc; 1955 1956 cdev = &ha->cdev; 1957 1958 mcast = &ha->ecore_mcast; 1959 bzero(mcast, sizeof(struct ecore_filter_mcast)); 1960 1961 if (add_mac) 1962 mcast->opcode = ECORE_FILTER_ADD; 1963 else 1964 mcast->opcode = ECORE_FILTER_REMOVE; 1965 1966 mcast->num_mc_addrs = 1; 1967 memcpy(mcast->mac, mac_addr, ETH_ALEN); 1968 1969 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 1970 1971 return (rc); 1972 } 1973 1974 static int 1975 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta) 1976 { 1977 int i; 1978 1979 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 1980 1981 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) 1982 return 0; /* its been already added */ 1983 } 1984 1985 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 1986 1987 if ((ha->mcast[i].addr[0] == 0) && 1988 (ha->mcast[i].addr[1] == 0) && 1989 (ha->mcast[i].addr[2] == 0) && 1990 (ha->mcast[i].addr[3] == 0) && 1991 (ha->mcast[i].addr[4] == 0) && 1992 (ha->mcast[i].addr[5] == 0)) { 1993 1994 if (qlnx_config_mcast_mac_addr(ha, mta, 1)) 1995 return (-1); 1996 1997 bcopy(mta, ha->mcast[i].addr, ETH_ALEN); 1998 ha->nmcast++; 1999 2000 return 0; 2001 } 2002 } 2003 return 0; 2004 } 2005 2006 static int 2007 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta) 2008 { 2009 int i; 2010 2011 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2012 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { 2013 2014 if (qlnx_config_mcast_mac_addr(ha, mta, 0)) 2015 return (-1); 2016 2017 ha->mcast[i].addr[0] = 0; 2018 ha->mcast[i].addr[1] = 0; 2019 ha->mcast[i].addr[2] = 0; 2020 ha->mcast[i].addr[3] = 0; 2021 ha->mcast[i].addr[4] = 0; 2022 ha->mcast[i].addr[5] = 0; 2023 2024 ha->nmcast--; 2025 2026 return 0; 2027 } 2028 } 2029 return 0; 2030 } 2031 2032 /* 2033 * Name: qls_hw_set_multi 2034 * Function: Sets the Multicast Addresses provided the host O.S into the 2035 * hardware (for the given interface) 2036 */ 2037 static void 2038 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 2039 uint32_t add_mac) 2040 { 2041 int i; 2042 2043 for (i = 0; i < mcnt; i++) { 2044 if (add_mac) { 2045 if (qlnx_hw_add_mcast(ha, mta)) 2046 break; 2047 } else { 2048 if (qlnx_hw_del_mcast(ha, mta)) 2049 break; 2050 } 2051 2052 mta += ETHER_HDR_LEN; 2053 } 2054 return; 2055 } 2056 2057 2058 #define QLNX_MCAST_ADDRS_SIZE (QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN) 2059 static int 2060 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi) 2061 { 2062 uint8_t mta[QLNX_MCAST_ADDRS_SIZE]; 2063 struct ifmultiaddr *ifma; 2064 int mcnt = 0; 2065 struct ifnet *ifp = ha->ifp; 2066 int ret = 0; 2067 2068 if_maddr_rlock(ifp); 2069 2070 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2071 2072 if (ifma->ifma_addr->sa_family != AF_LINK) 2073 continue; 2074 2075 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS) 2076 break; 2077 2078 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 2079 &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN); 2080 2081 mcnt++; 2082 } 2083 2084 if_maddr_runlock(ifp); 2085 2086 QLNX_LOCK(ha); 2087 qlnx_hw_set_multi(ha, mta, mcnt, add_multi); 2088 QLNX_UNLOCK(ha); 2089 2090 return (ret); 2091 } 2092 2093 static int 2094 qlnx_set_promisc(qlnx_host_t *ha) 2095 { 2096 int rc = 0; 2097 uint8_t filter; 2098 2099 filter = ha->filter; 2100 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2101 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 2102 2103 rc = qlnx_set_rx_accept_filter(ha, filter); 2104 return (rc); 2105 } 2106 2107 static int 2108 qlnx_set_allmulti(qlnx_host_t *ha) 2109 { 2110 int rc = 0; 2111 uint8_t filter; 2112 2113 filter = ha->filter; 2114 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2115 rc = qlnx_set_rx_accept_filter(ha, filter); 2116 2117 return (rc); 2118 } 2119 2120 2121 static int 2122 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2123 { 2124 int ret = 0, mask; 2125 struct ifreq *ifr = (struct ifreq *)data; 2126 struct ifaddr *ifa = (struct ifaddr *)data; 2127 qlnx_host_t *ha; 2128 2129 ha = (qlnx_host_t *)ifp->if_softc; 2130 2131 switch (cmd) { 2132 case SIOCSIFADDR: 2133 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", 2134 __func__, cmd)); 2135 2136 if (ifa->ifa_addr->sa_family == AF_INET) { 2137 ifp->if_flags |= IFF_UP; 2138 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2139 QLNX_LOCK(ha); 2140 qlnx_init_locked(ha); 2141 QLNX_UNLOCK(ha); 2142 } 2143 QL_DPRINT4(ha, (ha->pci_dev, 2144 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 2145 __func__, cmd, 2146 ntohl(IA_SIN(ifa)->sin_addr.s_addr))); 2147 2148 arp_ifinit(ifp, ifa); 2149 } else { 2150 ether_ioctl(ifp, cmd, data); 2151 } 2152 break; 2153 2154 case SIOCSIFMTU: 2155 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", 2156 __func__, cmd)); 2157 2158 if (ifr->ifr_mtu > QLNX_MAX_MTU) { 2159 ret = EINVAL; 2160 } else { 2161 QLNX_LOCK(ha); 2162 ifp->if_mtu = ifr->ifr_mtu; 2163 ha->max_frame_size = 2164 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2165 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2166 qlnx_init_locked(ha); 2167 } 2168 2169 QLNX_UNLOCK(ha); 2170 } 2171 2172 break; 2173 2174 case SIOCSIFFLAGS: 2175 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", 2176 __func__, cmd)); 2177 2178 QLNX_LOCK(ha); 2179 2180 if (ifp->if_flags & IFF_UP) { 2181 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2182 if ((ifp->if_flags ^ ha->if_flags) & 2183 IFF_PROMISC) { 2184 ret = qlnx_set_promisc(ha); 2185 } else if ((ifp->if_flags ^ ha->if_flags) & 2186 IFF_ALLMULTI) { 2187 ret = qlnx_set_allmulti(ha); 2188 } 2189 } else { 2190 ha->max_frame_size = ifp->if_mtu + 2191 ETHER_HDR_LEN + ETHER_CRC_LEN; 2192 qlnx_init_locked(ha); 2193 } 2194 } else { 2195 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2196 qlnx_stop(ha); 2197 ha->if_flags = ifp->if_flags; 2198 } 2199 2200 QLNX_UNLOCK(ha); 2201 break; 2202 2203 case SIOCADDMULTI: 2204 QL_DPRINT4(ha, (ha->pci_dev, 2205 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); 2206 2207 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2208 if (qlnx_set_multi(ha, 1)) 2209 ret = EINVAL; 2210 } 2211 break; 2212 2213 case SIOCDELMULTI: 2214 QL_DPRINT4(ha, (ha->pci_dev, 2215 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); 2216 2217 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2218 if (qlnx_set_multi(ha, 0)) 2219 ret = EINVAL; 2220 } 2221 break; 2222 2223 case SIOCSIFMEDIA: 2224 case SIOCGIFMEDIA: 2225 QL_DPRINT4(ha, (ha->pci_dev, 2226 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", 2227 __func__, cmd)); 2228 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 2229 break; 2230 2231 case SIOCSIFCAP: 2232 2233 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2234 2235 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", 2236 __func__, cmd)); 2237 2238 if (mask & IFCAP_HWCSUM) 2239 ifp->if_capenable ^= IFCAP_HWCSUM; 2240 if (mask & IFCAP_TSO4) 2241 ifp->if_capenable ^= IFCAP_TSO4; 2242 if (mask & IFCAP_TSO6) 2243 ifp->if_capenable ^= IFCAP_TSO6; 2244 if (mask & IFCAP_VLAN_HWTAGGING) 2245 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2246 if (mask & IFCAP_VLAN_HWTSO) 2247 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2248 if (mask & IFCAP_LRO) 2249 ifp->if_capenable ^= IFCAP_LRO; 2250 2251 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 2252 qlnx_init(ha); 2253 2254 VLAN_CAPABILITIES(ifp); 2255 break; 2256 2257 #if (__FreeBSD_version >= 1100101) 2258 2259 case SIOCGI2C: 2260 { 2261 struct ifi2creq i2c; 2262 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 2263 struct ecore_ptt *p_ptt; 2264 2265 ret = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 2266 2267 if (ret) 2268 break; 2269 2270 if ((i2c.len > sizeof (i2c.data)) || 2271 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) { 2272 ret = EINVAL; 2273 break; 2274 } 2275 2276 p_ptt = ecore_ptt_acquire(p_hwfn); 2277 2278 if (!p_ptt) { 2279 QL_DPRINT1(ha, (ha->pci_dev, "%s :" 2280 " ecore_ptt_acquire failed\n", __func__)); 2281 ret = -1; 2282 break; 2283 } 2284 2285 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt, 2286 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset, 2287 i2c.len, &i2c.data[0]); 2288 2289 ecore_ptt_release(p_hwfn, p_ptt); 2290 2291 if (ret) { 2292 ret = -1; 2293 break; 2294 } 2295 2296 ret = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 2297 2298 QL_DPRINT8(ha, (ha->pci_dev, "SIOCGI2C copyout ret = %d" 2299 " len = %d addr = 0x%02x offset = 0x%04x" 2300 " data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x" 2301 " 0x%02x 0x%02x 0x%02x\n", 2302 ret, i2c.len, i2c.dev_addr, i2c.offset, 2303 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3], 2304 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7])); 2305 break; 2306 } 2307 #endif /* #if (__FreeBSD_version >= 1100101) */ 2308 2309 default: 2310 QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n", 2311 __func__, cmd)); 2312 ret = ether_ioctl(ifp, cmd, data); 2313 break; 2314 } 2315 2316 return (ret); 2317 } 2318 2319 static int 2320 qlnx_media_change(struct ifnet *ifp) 2321 { 2322 qlnx_host_t *ha; 2323 struct ifmedia *ifm; 2324 int ret = 0; 2325 2326 ha = (qlnx_host_t *)ifp->if_softc; 2327 2328 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2329 2330 ifm = &ha->media; 2331 2332 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2333 ret = EINVAL; 2334 2335 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 2336 2337 return (ret); 2338 } 2339 2340 static void 2341 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2342 { 2343 qlnx_host_t *ha; 2344 2345 ha = (qlnx_host_t *)ifp->if_softc; 2346 2347 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2348 2349 ifmr->ifm_status = IFM_AVALID; 2350 ifmr->ifm_active = IFM_ETHER; 2351 2352 if (ha->link_up) { 2353 ifmr->ifm_status |= IFM_ACTIVE; 2354 ifmr->ifm_active |= 2355 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link)); 2356 2357 if (ha->if_link.link_partner_caps & 2358 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause)) 2359 ifmr->ifm_active |= 2360 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 2361 } 2362 2363 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__, 2364 (ha->link_up ? "link_up" : "link_down"))); 2365 2366 return; 2367 } 2368 2369 2370 static void 2371 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2372 struct qlnx_tx_queue *txq) 2373 { 2374 u16 idx; 2375 struct mbuf *mp; 2376 bus_dmamap_t map; 2377 int i; 2378 struct eth_tx_bd *tx_data_bd; 2379 struct eth_tx_1st_bd *first_bd; 2380 int nbds = 0; 2381 2382 idx = txq->sw_tx_cons; 2383 mp = txq->sw_tx_ring[idx].mp; 2384 map = txq->sw_tx_ring[idx].map; 2385 2386 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){ 2387 2388 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL); 2389 2390 QL_DPRINT1(ha, (ha->pci_dev, "%s: (mp == NULL) " 2391 " tx_idx = 0x%x" 2392 " ecore_prod_idx = 0x%x" 2393 " ecore_cons_idx = 0x%x" 2394 " hw_bd_cons = 0x%x" 2395 " txq_db_last = 0x%x" 2396 " elem_left = 0x%x\n", 2397 __func__, 2398 fp->rss_id, 2399 ecore_chain_get_prod_idx(&txq->tx_pbl), 2400 ecore_chain_get_cons_idx(&txq->tx_pbl), 2401 le16toh(*txq->hw_cons_ptr), 2402 txq->tx_db.raw, 2403 ecore_chain_get_elem_left(&txq->tx_pbl))); 2404 2405 fp->err_tx_free_pkt_null++; 2406 2407 //DEBUG 2408 qlnx_trigger_dump(ha); 2409 2410 return; 2411 } else { 2412 2413 QLNX_INC_OPACKETS((ha->ifp)); 2414 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len)); 2415 2416 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE); 2417 bus_dmamap_unload(ha->tx_tag, map); 2418 2419 fp->tx_pkts_freed++; 2420 fp->tx_pkts_completed++; 2421 2422 m_freem(mp); 2423 } 2424 2425 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl); 2426 nbds = first_bd->data.nbds; 2427 2428 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0); 2429 2430 for (i = 1; i < nbds; i++) { 2431 tx_data_bd = ecore_chain_consume(&txq->tx_pbl); 2432 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0); 2433 } 2434 txq->sw_tx_ring[idx].flags = 0; 2435 txq->sw_tx_ring[idx].mp = NULL; 2436 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0; 2437 2438 return; 2439 } 2440 2441 static void 2442 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2443 struct qlnx_tx_queue *txq) 2444 { 2445 u16 hw_bd_cons; 2446 u16 ecore_cons_idx; 2447 uint16_t diff; 2448 2449 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 2450 2451 while (hw_bd_cons != 2452 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 2453 2454 if (hw_bd_cons < ecore_cons_idx) { 2455 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 2456 } else { 2457 diff = hw_bd_cons - ecore_cons_idx; 2458 } 2459 if ((diff > TX_RING_SIZE) || 2460 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){ 2461 2462 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF); 2463 2464 QL_DPRINT1(ha, (ha->pci_dev, "%s: (diff = 0x%x) " 2465 " tx_idx = 0x%x" 2466 " ecore_prod_idx = 0x%x" 2467 " ecore_cons_idx = 0x%x" 2468 " hw_bd_cons = 0x%x" 2469 " txq_db_last = 0x%x" 2470 " elem_left = 0x%x\n", 2471 __func__, diff, 2472 fp->rss_id, 2473 ecore_chain_get_prod_idx(&txq->tx_pbl), 2474 ecore_chain_get_cons_idx(&txq->tx_pbl), 2475 le16toh(*txq->hw_cons_ptr), 2476 txq->tx_db.raw, 2477 ecore_chain_get_elem_left(&txq->tx_pbl))); 2478 2479 fp->err_tx_cons_idx_conflict++; 2480 2481 //DEBUG 2482 qlnx_trigger_dump(ha); 2483 } 2484 2485 qlnx_free_tx_pkt(ha, fp, txq); 2486 2487 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 2488 } 2489 return; 2490 } 2491 2492 static int 2493 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp) 2494 { 2495 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc; 2496 struct qlnx_fastpath *fp; 2497 int rss_id = 0, ret = 0; 2498 2499 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2500 2501 #if __FreeBSD_version >= 1100000 2502 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 2503 #else 2504 if (mp->m_flags & M_FLOWID) 2505 #endif 2506 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) % 2507 ha->num_rss; 2508 2509 fp = &ha->fp_array[rss_id]; 2510 2511 if (fp->tx_br == NULL) { 2512 ret = EINVAL; 2513 goto qlnx_transmit_exit; 2514 } 2515 2516 if (mp != NULL) { 2517 ret = drbr_enqueue(ifp, fp->tx_br, mp); 2518 } 2519 2520 if (fp->fp_taskqueue != NULL) 2521 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 2522 2523 ret = 0; 2524 2525 qlnx_transmit_exit: 2526 2527 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 2528 return ret; 2529 } 2530 2531 static void 2532 qlnx_qflush(struct ifnet *ifp) 2533 { 2534 int rss_id; 2535 struct qlnx_fastpath *fp; 2536 struct mbuf *mp; 2537 qlnx_host_t *ha; 2538 2539 ha = (qlnx_host_t *)ifp->if_softc; 2540 2541 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2542 2543 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 2544 2545 fp = &ha->fp_array[rss_id]; 2546 2547 if (fp == NULL) 2548 continue; 2549 2550 if (fp->tx_br) { 2551 mtx_lock(&fp->tx_mtx); 2552 2553 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 2554 fp->tx_pkts_freed++; 2555 m_freem(mp); 2556 } 2557 mtx_unlock(&fp->tx_mtx); 2558 } 2559 } 2560 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 2561 2562 return; 2563 } 2564 2565 static void 2566 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value) 2567 { 2568 struct ecore_dev *cdev; 2569 uint32_t offset; 2570 2571 cdev = &ha->cdev; 2572 2573 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)cdev->doorbells); 2574 2575 bus_write_4(ha->pci_dbells, offset, value); 2576 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ); 2577 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ); 2578 2579 return; 2580 } 2581 2582 static uint32_t 2583 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp) 2584 { 2585 struct ether_vlan_header *eh = NULL; 2586 struct ip *ip = NULL; 2587 struct ip6_hdr *ip6 = NULL; 2588 struct tcphdr *th = NULL; 2589 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0; 2590 uint16_t etype = 0; 2591 device_t dev; 2592 uint8_t buf[sizeof(struct ip6_hdr)]; 2593 2594 dev = ha->pci_dev; 2595 2596 eh = mtod(mp, struct ether_vlan_header *); 2597 2598 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2599 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2600 etype = ntohs(eh->evl_proto); 2601 } else { 2602 ehdrlen = ETHER_HDR_LEN; 2603 etype = ntohs(eh->evl_encap_proto); 2604 } 2605 2606 switch (etype) { 2607 2608 case ETHERTYPE_IP: 2609 ip = (struct ip *)(mp->m_data + ehdrlen); 2610 2611 ip_hlen = sizeof (struct ip); 2612 2613 if (mp->m_len < (ehdrlen + ip_hlen)) { 2614 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 2615 ip = (struct ip *)buf; 2616 } 2617 2618 th = (struct tcphdr *)(ip + 1); 2619 offset = ip_hlen + ehdrlen + (th->th_off << 2); 2620 break; 2621 2622 case ETHERTYPE_IPV6: 2623 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 2624 2625 ip_hlen = sizeof(struct ip6_hdr); 2626 2627 if (mp->m_len < (ehdrlen + ip_hlen)) { 2628 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 2629 buf); 2630 ip6 = (struct ip6_hdr *)buf; 2631 } 2632 th = (struct tcphdr *)(ip6 + 1); 2633 offset = ip_hlen + ehdrlen + (th->th_off << 2); 2634 break; 2635 2636 default: 2637 break; 2638 } 2639 2640 return (offset); 2641 } 2642 2643 static __inline int 2644 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs, 2645 uint32_t offset) 2646 { 2647 int i; 2648 uint32_t sum, nbds_in_hdr = 1; 2649 bus_dma_segment_t *t_segs = segs; 2650 2651 /* count the number of segments spanned by TCP header */ 2652 2653 i = 0; 2654 while ((i < nsegs) && (offset > t_segs->ds_len)) { 2655 nbds_in_hdr++; 2656 offset = offset - t_segs->ds_len; 2657 t_segs++; 2658 i++; 2659 } 2660 2661 while (nsegs >= QLNX_MAX_SEGMENTS_NON_TSO) { 2662 2663 sum = 0; 2664 2665 for (i = 0; i < (ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr); i++){ 2666 sum += segs->ds_len; 2667 segs++; 2668 } 2669 2670 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) { 2671 fp->tx_lso_wnd_min_len++; 2672 return (-1); 2673 } 2674 2675 nsegs -= QLNX_MAX_SEGMENTS_NON_TSO; 2676 } 2677 2678 return (0); 2679 } 2680 2681 static int 2682 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp) 2683 { 2684 bus_dma_segment_t *segs; 2685 bus_dmamap_t map = 0; 2686 uint32_t nsegs = 0; 2687 int ret = -1; 2688 struct mbuf *m_head = *m_headp; 2689 uint16_t idx = 0; 2690 uint16_t elem_left; 2691 2692 uint8_t nbd = 0; 2693 struct qlnx_tx_queue *txq; 2694 2695 struct eth_tx_1st_bd *first_bd; 2696 struct eth_tx_2nd_bd *second_bd; 2697 struct eth_tx_3rd_bd *third_bd; 2698 struct eth_tx_bd *tx_data_bd; 2699 2700 int seg_idx = 0; 2701 uint32_t nbds_in_hdr = 0; 2702 uint32_t offset = 0; 2703 2704 QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2705 2706 if (!ha->link_up) 2707 return (-1); 2708 2709 first_bd = NULL; 2710 second_bd = NULL; 2711 third_bd = NULL; 2712 tx_data_bd = NULL; 2713 2714 txq = fp->txq[0]; 2715 idx = txq->sw_tx_prod; 2716 2717 map = txq->sw_tx_ring[idx].map; 2718 segs = txq->segs; 2719 2720 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 2721 BUS_DMA_NOWAIT); 2722 2723 #ifdef QLNX_TRACE_TSO_PKT_LEN 2724 2725 if (!fp->tx_tso_min_pkt_len) { 2726 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 2727 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 2728 } else { 2729 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len) 2730 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 2731 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len) 2732 fp->tx_tso_max_pkt_len = m_head->m_pkthdr.len; 2733 } 2734 2735 #endif /* #ifdef QLNX_TRACE_TSO_PKT_LEN */ 2736 2737 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 2738 offset = qlnx_tcp_offset(ha, m_head); 2739 2740 if ((ret == EFBIG) || 2741 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && ( 2742 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) || 2743 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) && 2744 qlnx_tso_check(fp, segs, nsegs, offset))))) { 2745 2746 struct mbuf *m; 2747 2748 QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__, 2749 m_head->m_pkthdr.len)); 2750 2751 fp->tx_defrag++; 2752 2753 m = m_defrag(m_head, M_NOWAIT); 2754 if (m == NULL) { 2755 fp->err_tx_defrag++; 2756 fp->tx_pkts_freed++; 2757 m_freem(m_head); 2758 *m_headp = NULL; 2759 QL_DPRINT1(ha, (ha->pci_dev, 2760 "%s: m_defrag() = NULL [%d]\n", 2761 __func__, ret)); 2762 return (ENOBUFS); 2763 } 2764 2765 m_head = m; 2766 *m_headp = m_head; 2767 2768 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 2769 segs, &nsegs, BUS_DMA_NOWAIT))) { 2770 2771 fp->err_tx_defrag_dmamap_load++; 2772 2773 QL_DPRINT1(ha, (ha->pci_dev, 2774 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", 2775 __func__, ret, m_head->m_pkthdr.len)); 2776 2777 fp->tx_pkts_freed++; 2778 m_freem(m_head); 2779 *m_headp = NULL; 2780 2781 return (ret); 2782 } 2783 2784 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && 2785 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) { 2786 2787 fp->err_tx_non_tso_max_seg++; 2788 2789 QL_DPRINT1(ha, (ha->pci_dev, 2790 "%s: (%d) nsegs too many for non-TSO[%d, %d]\n", 2791 __func__, ret, nsegs, m_head->m_pkthdr.len)); 2792 2793 fp->tx_pkts_freed++; 2794 m_freem(m_head); 2795 *m_headp = NULL; 2796 2797 return (ret); 2798 } 2799 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 2800 offset = qlnx_tcp_offset(ha, m_head); 2801 2802 } else if (ret) { 2803 2804 fp->err_tx_dmamap_load++; 2805 2806 QL_DPRINT1(ha, (ha->pci_dev, 2807 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", 2808 __func__, ret, m_head->m_pkthdr.len)); 2809 2810 fp->tx_pkts_freed++; 2811 m_freem(m_head); 2812 *m_headp = NULL; 2813 return (ret); 2814 } 2815 2816 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet")); 2817 2818 #ifdef QLNX_TRACE_TSO_PKT_LEN 2819 2820 if (nsegs < QLNX_FP_MAX_SEGS) 2821 fp->tx_pkts[(nsegs - 1)]++; 2822 else 2823 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; 2824 2825 #endif /* #ifdef QLNX_TRACE_TSO_PKT_LEN */ 2826 2827 if ((nsegs + QLNX_TX_ELEM_RESERVE) > 2828 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) { 2829 2830 QL_DPRINT1(ha, (ha->pci_dev, "%s: (%d, 0x%x) insuffient BDs" 2831 "in chain[%d] trying to free packets\n", 2832 __func__, nsegs, elem_left, fp->rss_id)); 2833 2834 fp->tx_nsegs_gt_elem_left++; 2835 2836 (void)qlnx_tx_int(ha, fp, txq); 2837 2838 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left = 2839 ecore_chain_get_elem_left(&txq->tx_pbl))) { 2840 2841 QL_DPRINT1(ha, (ha->pci_dev, 2842 "%s: (%d, 0x%x) insuffient BDs in chain[%d]\n", 2843 __func__, nsegs, elem_left, fp->rss_id)); 2844 2845 fp->err_tx_nsegs_gt_elem_left++; 2846 ha->storm_stats_enable = 1; 2847 return (ENOBUFS); 2848 } 2849 } 2850 2851 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 2852 2853 txq->sw_tx_ring[idx].mp = m_head; 2854 2855 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); 2856 2857 memset(first_bd, 0, sizeof(*first_bd)); 2858 2859 first_bd->data.bd_flags.bitfields = 2860 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 2861 2862 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len); 2863 2864 nbd++; 2865 2866 if (m_head->m_pkthdr.csum_flags & CSUM_IP) { 2867 first_bd->data.bd_flags.bitfields |= 2868 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 2869 } 2870 2871 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) { 2872 first_bd->data.bd_flags.bitfields |= 2873 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT); 2874 } 2875 2876 if (m_head->m_flags & M_VLANTAG) { 2877 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag; 2878 first_bd->data.bd_flags.bitfields |= 2879 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT); 2880 } 2881 2882 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 2883 2884 first_bd->data.bd_flags.bitfields |= 2885 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); 2886 first_bd->data.bd_flags.bitfields |= 2887 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 2888 2889 nbds_in_hdr = 1; 2890 2891 if (offset == segs->ds_len) { 2892 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 2893 segs++; 2894 seg_idx++; 2895 2896 second_bd = (struct eth_tx_2nd_bd *) 2897 ecore_chain_produce(&txq->tx_pbl); 2898 memset(second_bd, 0, sizeof(*second_bd)); 2899 nbd++; 2900 2901 if (seg_idx < nsegs) { 2902 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 2903 (segs->ds_addr), (segs->ds_len)); 2904 segs++; 2905 seg_idx++; 2906 } 2907 2908 third_bd = (struct eth_tx_3rd_bd *) 2909 ecore_chain_produce(&txq->tx_pbl); 2910 memset(third_bd, 0, sizeof(*third_bd)); 2911 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 2912 third_bd->data.bitfields |= 2913 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 2914 nbd++; 2915 2916 if (seg_idx < nsegs) { 2917 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 2918 (segs->ds_addr), (segs->ds_len)); 2919 segs++; 2920 seg_idx++; 2921 } 2922 2923 for (; seg_idx < nsegs; seg_idx++) { 2924 tx_data_bd = (struct eth_tx_bd *) 2925 ecore_chain_produce(&txq->tx_pbl); 2926 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 2927 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 2928 segs->ds_addr,\ 2929 segs->ds_len); 2930 segs++; 2931 nbd++; 2932 } 2933 2934 } else if (offset < segs->ds_len) { 2935 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 2936 2937 second_bd = (struct eth_tx_2nd_bd *) 2938 ecore_chain_produce(&txq->tx_pbl); 2939 memset(second_bd, 0, sizeof(*second_bd)); 2940 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 2941 (segs->ds_addr + offset),\ 2942 (segs->ds_len - offset)); 2943 nbd++; 2944 segs++; 2945 2946 third_bd = (struct eth_tx_3rd_bd *) 2947 ecore_chain_produce(&txq->tx_pbl); 2948 memset(third_bd, 0, sizeof(*third_bd)); 2949 2950 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 2951 segs->ds_addr,\ 2952 segs->ds_len); 2953 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 2954 third_bd->data.bitfields |= 2955 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 2956 segs++; 2957 nbd++; 2958 2959 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) { 2960 tx_data_bd = (struct eth_tx_bd *) 2961 ecore_chain_produce(&txq->tx_pbl); 2962 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 2963 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 2964 segs->ds_addr,\ 2965 segs->ds_len); 2966 segs++; 2967 nbd++; 2968 } 2969 2970 } else { 2971 offset = offset - segs->ds_len; 2972 segs++; 2973 2974 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 2975 2976 if (offset) 2977 nbds_in_hdr++; 2978 2979 tx_data_bd = (struct eth_tx_bd *) 2980 ecore_chain_produce(&txq->tx_pbl); 2981 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 2982 2983 if (second_bd == NULL) { 2984 second_bd = (struct eth_tx_2nd_bd *) 2985 tx_data_bd; 2986 } else if (third_bd == NULL) { 2987 third_bd = (struct eth_tx_3rd_bd *) 2988 tx_data_bd; 2989 } 2990 2991 if (offset && (offset < segs->ds_len)) { 2992 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 2993 segs->ds_addr, offset); 2994 2995 tx_data_bd = (struct eth_tx_bd *) 2996 ecore_chain_produce(&txq->tx_pbl); 2997 2998 memset(tx_data_bd, 0, 2999 sizeof(*tx_data_bd)); 3000 3001 if (second_bd == NULL) { 3002 second_bd = 3003 (struct eth_tx_2nd_bd *)tx_data_bd; 3004 } else if (third_bd == NULL) { 3005 third_bd = 3006 (struct eth_tx_3rd_bd *)tx_data_bd; 3007 } 3008 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3009 (segs->ds_addr + offset), \ 3010 (segs->ds_len - offset)); 3011 nbd++; 3012 offset = 0; 3013 } else { 3014 if (offset) 3015 offset = offset - segs->ds_len; 3016 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3017 segs->ds_addr, segs->ds_len); 3018 } 3019 segs++; 3020 nbd++; 3021 } 3022 3023 if (third_bd == NULL) { 3024 third_bd = (struct eth_tx_3rd_bd *) 3025 ecore_chain_produce(&txq->tx_pbl); 3026 memset(third_bd, 0, sizeof(*third_bd)); 3027 } 3028 3029 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3030 third_bd->data.bitfields |= 3031 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3032 } 3033 } else { 3034 segs++; 3035 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3036 tx_data_bd = (struct eth_tx_bd *) 3037 ecore_chain_produce(&txq->tx_pbl); 3038 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3039 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\ 3040 segs->ds_len); 3041 segs++; 3042 nbd++; 3043 } 3044 first_bd->data.bitfields = 3045 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) 3046 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 3047 first_bd->data.bitfields = 3048 htole16(first_bd->data.bitfields); 3049 } 3050 3051 3052 first_bd->data.nbds = nbd; 3053 3054 #ifdef QLNX_TRACE_TSO_PKT_LEN 3055 3056 if (fp->tx_tso_max_nsegs < nsegs) 3057 fp->tx_tso_max_nsegs = nsegs; 3058 3059 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs)) 3060 fp->tx_tso_min_nsegs = nsegs; 3061 3062 #endif /* #ifdef QLNX_TRACE_TSO_PKT_LEN */ 3063 3064 txq->sw_tx_ring[idx].nsegs = nsegs; 3065 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1); 3066 3067 txq->tx_db.data.bd_prod = 3068 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl)); 3069 3070 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw); 3071 3072 QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__)); 3073 return (0); 3074 } 3075 3076 static void 3077 qlnx_stop(qlnx_host_t *ha) 3078 { 3079 struct ifnet *ifp = ha->ifp; 3080 device_t dev; 3081 int i; 3082 3083 dev = ha->pci_dev; 3084 3085 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 3086 3087 /* 3088 * We simply lock and unlock each fp->tx_mtx to 3089 * propagate the if_drv_flags 3090 * state to each tx thread 3091 */ 3092 if (ha->state == QLNX_STATE_OPEN) { 3093 for (i = 0; i < ha->num_rss; i++) { 3094 struct qlnx_fastpath *fp = &ha->fp_array[i]; 3095 3096 mtx_lock(&fp->tx_mtx); 3097 mtx_unlock(&fp->tx_mtx); 3098 3099 if (fp->fp_taskqueue != NULL) 3100 taskqueue_enqueue(fp->fp_taskqueue, 3101 &fp->fp_task); 3102 } 3103 } 3104 3105 qlnx_unload(ha); 3106 3107 return; 3108 } 3109 3110 static int 3111 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha) 3112 { 3113 return(TX_RING_SIZE - 1); 3114 } 3115 3116 uint8_t * 3117 qlnx_get_mac_addr(qlnx_host_t *ha) 3118 { 3119 struct ecore_hwfn *p_hwfn; 3120 3121 p_hwfn = &ha->cdev.hwfns[0]; 3122 return (p_hwfn->hw_info.hw_mac_addr); 3123 } 3124 3125 static uint32_t 3126 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link) 3127 { 3128 uint32_t ifm_type = 0; 3129 3130 switch (if_link->media_type) { 3131 3132 case MEDIA_MODULE_FIBER: 3133 case MEDIA_UNSPECIFIED: 3134 if (if_link->speed == (100 * 1000)) 3135 ifm_type = QLNX_IFM_100G_SR4; 3136 else if (if_link->speed == (40 * 1000)) 3137 ifm_type = IFM_40G_SR4; 3138 else if (if_link->speed == (25 * 1000)) 3139 ifm_type = QLNX_IFM_25G_SR; 3140 break; 3141 3142 case MEDIA_DA_TWINAX: 3143 if (if_link->speed == (100 * 1000)) 3144 ifm_type = QLNX_IFM_100G_CR4; 3145 else if (if_link->speed == (40 * 1000)) 3146 ifm_type = IFM_40G_CR4; 3147 else if (if_link->speed == (25 * 1000)) 3148 ifm_type = QLNX_IFM_25G_CR; 3149 break; 3150 3151 default : 3152 ifm_type = IFM_UNKNOWN; 3153 break; 3154 } 3155 return (ifm_type); 3156 } 3157 3158 3159 3160 /***************************************************************************** 3161 * Interrupt Service Functions 3162 *****************************************************************************/ 3163 3164 static int 3165 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3166 struct mbuf *mp_head, uint16_t len) 3167 { 3168 struct mbuf *mp, *mpf, *mpl; 3169 struct sw_rx_data *sw_rx_data; 3170 struct qlnx_rx_queue *rxq; 3171 uint16_t len_in_buffer; 3172 3173 rxq = fp->rxq; 3174 mpf = mpl = mp = NULL; 3175 3176 while (len) { 3177 3178 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3179 3180 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3181 mp = sw_rx_data->data; 3182 3183 if (mp == NULL) { 3184 QL_DPRINT1(ha, (ha->pci_dev, "%s: mp = NULL\n", 3185 __func__)); 3186 fp->err_rx_mp_null++; 3187 rxq->sw_rx_cons = 3188 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3189 3190 if (mpf != NULL) 3191 m_freem(mpf); 3192 3193 return (-1); 3194 } 3195 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3196 BUS_DMASYNC_POSTREAD); 3197 3198 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3199 3200 QL_DPRINT1(ha, (ha->pci_dev, 3201 "%s: New buffer allocation failed, dropping" 3202 " incoming packet and reusing its buffer\n", 3203 __func__)); 3204 3205 qlnx_reuse_rx_data(rxq); 3206 fp->err_rx_alloc_errors++; 3207 3208 if (mpf != NULL) 3209 m_freem(mpf); 3210 3211 return (-1); 3212 } 3213 ecore_chain_consume(&rxq->rx_bd_ring); 3214 3215 if (len > rxq->rx_buf_size) 3216 len_in_buffer = rxq->rx_buf_size; 3217 else 3218 len_in_buffer = len; 3219 3220 len = len - len_in_buffer; 3221 3222 mp->m_flags &= ~M_PKTHDR; 3223 mp->m_next = NULL; 3224 mp->m_len = len_in_buffer; 3225 3226 if (mpf == NULL) 3227 mpf = mpl = mp; 3228 else { 3229 mpl->m_next = mp; 3230 mpl = mp; 3231 } 3232 } 3233 3234 if (mpf != NULL) 3235 mp_head->m_next = mpf; 3236 3237 return (0); 3238 } 3239 3240 static void 3241 qlnx_tpa_start(qlnx_host_t *ha, 3242 struct qlnx_fastpath *fp, 3243 struct qlnx_rx_queue *rxq, 3244 struct eth_fast_path_rx_tpa_start_cqe *cqe) 3245 { 3246 uint32_t agg_index; 3247 struct ifnet *ifp = ha->ifp; 3248 struct mbuf *mp; 3249 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3250 struct sw_rx_data *sw_rx_data; 3251 dma_addr_t addr; 3252 bus_dmamap_t map; 3253 struct eth_rx_bd *rx_bd; 3254 int i; 3255 device_t dev; 3256 #if __FreeBSD_version >= 1100000 3257 uint8_t hash_type; 3258 #endif /* #if __FreeBSD_version >= 1100000 */ 3259 3260 dev = ha->pci_dev; 3261 agg_index = cqe->tpa_agg_index; 3262 3263 QL_DPRINT7(ha, (dev, "%s[%d]: enter\n " 3264 "\t type = 0x%x\n" 3265 "\t bitfields = 0x%x\n" 3266 "\t seg_len = 0x%x\n" 3267 "\t pars_flags = 0x%x\n" 3268 "\t vlan_tag = 0x%x\n" 3269 "\t rss_hash = 0x%x\n" 3270 "\t len_on_first_bd = 0x%x\n" 3271 "\t placement_offset = 0x%x\n" 3272 "\t tpa_agg_index = 0x%x\n" 3273 "\t header_len = 0x%x\n" 3274 "\t ext_bd_len_list[0] = 0x%x\n" 3275 "\t ext_bd_len_list[1] = 0x%x\n" 3276 "\t ext_bd_len_list[2] = 0x%x\n" 3277 "\t ext_bd_len_list[3] = 0x%x\n" 3278 "\t ext_bd_len_list[4] = 0x%x\n", 3279 __func__, fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len, 3280 cqe->pars_flags.flags, cqe->vlan_tag, 3281 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset, 3282 cqe->tpa_agg_index, cqe->header_len, 3283 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1], 3284 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3], 3285 cqe->ext_bd_len_list[4])); 3286 3287 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3288 fp->err_rx_tpa_invalid_agg_num++; 3289 return; 3290 } 3291 3292 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3293 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); 3294 mp = sw_rx_data->data; 3295 3296 QL_DPRINT7(ha, (dev, "%s[%d]: mp = %p \n ", __func__, fp->rss_id, mp)); 3297 3298 if (mp == NULL) { 3299 QL_DPRINT7(ha, (dev, "%s[%d]: mp = NULL\n", __func__, 3300 fp->rss_id)); 3301 fp->err_rx_mp_null++; 3302 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3303 3304 return; 3305 } 3306 3307 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) { 3308 3309 QL_DPRINT7(ha, (dev, "%s[%d]: CQE in CONS = %u has error," 3310 " flags = %x, dropping incoming packet\n", __func__, 3311 fp->rss_id, rxq->sw_rx_cons, 3312 le16toh(cqe->pars_flags.flags))); 3313 3314 fp->err_rx_hw_errors++; 3315 3316 qlnx_reuse_rx_data(rxq); 3317 3318 QLNX_INC_IERRORS(ifp); 3319 3320 return; 3321 } 3322 3323 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3324 3325 QL_DPRINT7(ha, (dev, "%s[%d]: New buffer allocation failed," 3326 " dropping incoming packet and reusing its buffer\n", 3327 __func__, fp->rss_id)); 3328 3329 fp->err_rx_alloc_errors++; 3330 QLNX_INC_IQDROPS(ifp); 3331 3332 /* 3333 * Load the tpa mbuf into the rx ring and save the 3334 * posted mbuf 3335 */ 3336 3337 map = sw_rx_data->map; 3338 addr = sw_rx_data->dma_addr; 3339 3340 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 3341 3342 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data; 3343 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr; 3344 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map; 3345 3346 rxq->tpa_info[agg_index].rx_buf.data = mp; 3347 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr; 3348 rxq->tpa_info[agg_index].rx_buf.map = map; 3349 3350 rx_bd = (struct eth_rx_bd *) 3351 ecore_chain_produce(&rxq->rx_bd_ring); 3352 3353 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr)); 3354 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr)); 3355 3356 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3357 BUS_DMASYNC_PREREAD); 3358 3359 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 3360 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3361 3362 ecore_chain_consume(&rxq->rx_bd_ring); 3363 3364 /* Now reuse any buffers posted in ext_bd_len_list */ 3365 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 3366 3367 if (cqe->ext_bd_len_list[i] == 0) 3368 break; 3369 3370 qlnx_reuse_rx_data(rxq); 3371 } 3372 3373 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 3374 return; 3375 } 3376 3377 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 3378 3379 QL_DPRINT7(ha, (dev, "%s[%d]: invalid aggregation state," 3380 " dropping incoming packet and reusing its buffer\n", 3381 __func__, fp->rss_id)); 3382 3383 QLNX_INC_IQDROPS(ifp); 3384 3385 /* if we already have mbuf head in aggregation free it */ 3386 if (rxq->tpa_info[agg_index].mpf) { 3387 m_freem(rxq->tpa_info[agg_index].mpf); 3388 rxq->tpa_info[agg_index].mpl = NULL; 3389 } 3390 rxq->tpa_info[agg_index].mpf = mp; 3391 rxq->tpa_info[agg_index].mpl = NULL; 3392 3393 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3394 ecore_chain_consume(&rxq->rx_bd_ring); 3395 3396 /* Now reuse any buffers posted in ext_bd_len_list */ 3397 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 3398 3399 if (cqe->ext_bd_len_list[i] == 0) 3400 break; 3401 3402 qlnx_reuse_rx_data(rxq); 3403 } 3404 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 3405 3406 return; 3407 } 3408 3409 /* 3410 * first process the ext_bd_len_list 3411 * if this fails then we simply drop the packet 3412 */ 3413 ecore_chain_consume(&rxq->rx_bd_ring); 3414 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3415 3416 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 3417 3418 QL_DPRINT7(ha, (dev, "%s[%d]: 4\n ", __func__, fp->rss_id)); 3419 3420 if (cqe->ext_bd_len_list[i] == 0) 3421 break; 3422 3423 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3424 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3425 BUS_DMASYNC_POSTREAD); 3426 3427 mpc = sw_rx_data->data; 3428 3429 if (mpc == NULL) { 3430 QL_DPRINT7(ha, (ha->pci_dev, "%s[%d]: mpc = NULL\n", 3431 __func__, fp->rss_id)); 3432 fp->err_rx_mp_null++; 3433 if (mpf != NULL) 3434 m_freem(mpf); 3435 mpf = mpl = NULL; 3436 rxq->tpa_info[agg_index].agg_state = 3437 QLNX_AGG_STATE_ERROR; 3438 ecore_chain_consume(&rxq->rx_bd_ring); 3439 rxq->sw_rx_cons = 3440 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3441 continue; 3442 } 3443 3444 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3445 QL_DPRINT7(ha, (dev, 3446 "%s[%d]: New buffer allocation failed, dropping" 3447 " incoming packet and reusing its buffer\n", 3448 __func__, fp->rss_id)); 3449 3450 qlnx_reuse_rx_data(rxq); 3451 3452 if (mpf != NULL) 3453 m_freem(mpf); 3454 mpf = mpl = NULL; 3455 3456 rxq->tpa_info[agg_index].agg_state = 3457 QLNX_AGG_STATE_ERROR; 3458 3459 ecore_chain_consume(&rxq->rx_bd_ring); 3460 rxq->sw_rx_cons = 3461 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3462 3463 continue; 3464 } 3465 3466 mpc->m_flags &= ~M_PKTHDR; 3467 mpc->m_next = NULL; 3468 mpc->m_len = cqe->ext_bd_len_list[i]; 3469 3470 3471 if (mpf == NULL) { 3472 mpf = mpl = mpc; 3473 } else { 3474 mpl->m_len = ha->rx_buf_size; 3475 mpl->m_next = mpc; 3476 mpl = mpc; 3477 } 3478 3479 ecore_chain_consume(&rxq->rx_bd_ring); 3480 rxq->sw_rx_cons = 3481 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3482 } 3483 3484 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 3485 3486 QL_DPRINT7(ha, (dev, "%s[%d]: invalid aggregation state," 3487 " dropping incoming packet and reusing its buffer\n", 3488 __func__, fp->rss_id)); 3489 3490 QLNX_INC_IQDROPS(ifp); 3491 3492 rxq->tpa_info[agg_index].mpf = mp; 3493 rxq->tpa_info[agg_index].mpl = NULL; 3494 3495 return; 3496 } 3497 3498 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset; 3499 3500 if (mpf != NULL) { 3501 mp->m_len = ha->rx_buf_size; 3502 mp->m_next = mpf; 3503 rxq->tpa_info[agg_index].mpf = mp; 3504 rxq->tpa_info[agg_index].mpl = mpl; 3505 } else { 3506 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset; 3507 rxq->tpa_info[agg_index].mpf = mp; 3508 rxq->tpa_info[agg_index].mpl = mp; 3509 mp->m_next = NULL; 3510 } 3511 3512 mp->m_flags |= M_PKTHDR; 3513 3514 /* assign packet to this interface interface */ 3515 mp->m_pkthdr.rcvif = ifp; 3516 3517 /* assume no hardware checksum has complated */ 3518 mp->m_pkthdr.csum_flags = 0; 3519 3520 //mp->m_pkthdr.flowid = fp->rss_id; 3521 mp->m_pkthdr.flowid = cqe->rss_hash; 3522 3523 #if __FreeBSD_version >= 1100000 3524 3525 hash_type = cqe->bitfields & 3526 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 3527 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 3528 3529 switch (hash_type) { 3530 3531 case RSS_HASH_TYPE_IPV4: 3532 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 3533 break; 3534 3535 case RSS_HASH_TYPE_TCP_IPV4: 3536 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 3537 break; 3538 3539 case RSS_HASH_TYPE_IPV6: 3540 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 3541 break; 3542 3543 case RSS_HASH_TYPE_TCP_IPV6: 3544 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 3545 break; 3546 3547 default: 3548 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 3549 break; 3550 } 3551 3552 #else 3553 mp->m_flags |= M_FLOWID; 3554 #endif 3555 3556 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | 3557 CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 3558 3559 mp->m_pkthdr.csum_data = 0xFFFF; 3560 3561 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) { 3562 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag); 3563 mp->m_flags |= M_VLANTAG; 3564 } 3565 3566 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START; 3567 3568 QL_DPRINT7(ha, (dev, "%s[%d]: 5\n" "\tagg_state = %d\n" 3569 "\t mpf = %p mpl = %p\n", __func__, fp->rss_id, 3570 rxq->tpa_info[agg_index].agg_state, 3571 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl)); 3572 3573 return; 3574 } 3575 3576 static void 3577 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3578 struct qlnx_rx_queue *rxq, 3579 struct eth_fast_path_rx_tpa_cont_cqe *cqe) 3580 { 3581 struct sw_rx_data *sw_rx_data; 3582 int i; 3583 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3584 struct mbuf *mp; 3585 uint32_t agg_index; 3586 device_t dev; 3587 3588 dev = ha->pci_dev; 3589 3590 QL_DPRINT7(ha, (dev, "%s[%d]: enter\n " 3591 "\t type = 0x%x\n" 3592 "\t tpa_agg_index = 0x%x\n" 3593 "\t len_list[0] = 0x%x\n" 3594 "\t len_list[1] = 0x%x\n" 3595 "\t len_list[2] = 0x%x\n" 3596 "\t len_list[3] = 0x%x\n" 3597 "\t len_list[4] = 0x%x\n" 3598 "\t len_list[5] = 0x%x\n", 3599 __func__, fp->rss_id, cqe->type, cqe->tpa_agg_index, 3600 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 3601 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5])); 3602 3603 agg_index = cqe->tpa_agg_index; 3604 3605 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3606 QL_DPRINT7(ha, (dev, "%s[%d]: 0\n ", __func__, fp->rss_id)); 3607 fp->err_rx_tpa_invalid_agg_num++; 3608 return; 3609 } 3610 3611 3612 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) { 3613 3614 QL_DPRINT7(ha, (dev, "%s[%d]: 1\n ", __func__, fp->rss_id)); 3615 3616 if (cqe->len_list[i] == 0) 3617 break; 3618 3619 if (rxq->tpa_info[agg_index].agg_state != 3620 QLNX_AGG_STATE_START) { 3621 qlnx_reuse_rx_data(rxq); 3622 continue; 3623 } 3624 3625 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3626 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3627 BUS_DMASYNC_POSTREAD); 3628 3629 mpc = sw_rx_data->data; 3630 3631 if (mpc == NULL) { 3632 3633 QL_DPRINT7(ha, (dev, "%s[%d]: mpc = NULL\n", 3634 __func__, fp->rss_id)); 3635 3636 fp->err_rx_mp_null++; 3637 if (mpf != NULL) 3638 m_freem(mpf); 3639 mpf = mpl = NULL; 3640 rxq->tpa_info[agg_index].agg_state = 3641 QLNX_AGG_STATE_ERROR; 3642 ecore_chain_consume(&rxq->rx_bd_ring); 3643 rxq->sw_rx_cons = 3644 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3645 continue; 3646 } 3647 3648 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3649 3650 QL_DPRINT7(ha, (dev, 3651 "%s[%d]: New buffer allocation failed, dropping" 3652 " incoming packet and reusing its buffer\n", 3653 __func__, fp->rss_id)); 3654 3655 qlnx_reuse_rx_data(rxq); 3656 3657 if (mpf != NULL) 3658 m_freem(mpf); 3659 mpf = mpl = NULL; 3660 3661 rxq->tpa_info[agg_index].agg_state = 3662 QLNX_AGG_STATE_ERROR; 3663 3664 ecore_chain_consume(&rxq->rx_bd_ring); 3665 rxq->sw_rx_cons = 3666 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3667 3668 continue; 3669 } 3670 3671 mpc->m_flags &= ~M_PKTHDR; 3672 mpc->m_next = NULL; 3673 mpc->m_len = cqe->len_list[i]; 3674 3675 3676 if (mpf == NULL) { 3677 mpf = mpl = mpc; 3678 } else { 3679 mpl->m_len = ha->rx_buf_size; 3680 mpl->m_next = mpc; 3681 mpl = mpc; 3682 } 3683 3684 ecore_chain_consume(&rxq->rx_bd_ring); 3685 rxq->sw_rx_cons = 3686 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3687 } 3688 3689 QL_DPRINT7(ha, (dev, "%s[%d]: 2\n" "\tmpf = %p mpl = %p\n", 3690 __func__, fp->rss_id, mpf, mpl)); 3691 3692 if (mpf != NULL) { 3693 mp = rxq->tpa_info[agg_index].mpl; 3694 mp->m_len = ha->rx_buf_size; 3695 mp->m_next = mpf; 3696 rxq->tpa_info[agg_index].mpl = mpl; 3697 } 3698 3699 return; 3700 } 3701 3702 static int 3703 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3704 struct qlnx_rx_queue *rxq, 3705 struct eth_fast_path_rx_tpa_end_cqe *cqe) 3706 { 3707 struct sw_rx_data *sw_rx_data; 3708 int i; 3709 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3710 struct mbuf *mp; 3711 uint32_t agg_index; 3712 uint32_t len = 0; 3713 struct ifnet *ifp = ha->ifp; 3714 device_t dev; 3715 3716 dev = ha->pci_dev; 3717 3718 QL_DPRINT7(ha, (dev, "%s[%d]: enter\n " 3719 "\t type = 0x%x\n" 3720 "\t tpa_agg_index = 0x%x\n" 3721 "\t total_packet_len = 0x%x\n" 3722 "\t num_of_bds = 0x%x\n" 3723 "\t end_reason = 0x%x\n" 3724 "\t num_of_coalesced_segs = 0x%x\n" 3725 "\t ts_delta = 0x%x\n" 3726 "\t len_list[0] = 0x%x\n" 3727 "\t len_list[1] = 0x%x\n" 3728 "\t len_list[2] = 0x%x\n" 3729 "\t len_list[3] = 0x%x\n", 3730 __func__, fp->rss_id, cqe->type, cqe->tpa_agg_index, 3731 cqe->total_packet_len, cqe->num_of_bds, 3732 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta, 3733 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 3734 cqe->len_list[3])); 3735 3736 agg_index = cqe->tpa_agg_index; 3737 3738 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3739 3740 QL_DPRINT7(ha, (dev, "%s[%d]: 0\n ", __func__, fp->rss_id)); 3741 3742 fp->err_rx_tpa_invalid_agg_num++; 3743 return (0); 3744 } 3745 3746 3747 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) { 3748 3749 QL_DPRINT7(ha, (dev, "%s[%d]: 1\n ", __func__, fp->rss_id)); 3750 3751 if (cqe->len_list[i] == 0) 3752 break; 3753 3754 if (rxq->tpa_info[agg_index].agg_state != 3755 QLNX_AGG_STATE_START) { 3756 3757 QL_DPRINT7(ha, (dev, "%s[%d]: 2\n ", __func__, 3758 fp->rss_id)); 3759 3760 qlnx_reuse_rx_data(rxq); 3761 continue; 3762 } 3763 3764 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3765 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3766 BUS_DMASYNC_POSTREAD); 3767 3768 mpc = sw_rx_data->data; 3769 3770 if (mpc == NULL) { 3771 3772 QL_DPRINT7(ha, (dev, "%s[%d]: mpc = NULL\n", 3773 __func__, fp->rss_id)); 3774 3775 fp->err_rx_mp_null++; 3776 if (mpf != NULL) 3777 m_freem(mpf); 3778 mpf = mpl = NULL; 3779 rxq->tpa_info[agg_index].agg_state = 3780 QLNX_AGG_STATE_ERROR; 3781 ecore_chain_consume(&rxq->rx_bd_ring); 3782 rxq->sw_rx_cons = 3783 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3784 continue; 3785 } 3786 3787 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3788 QL_DPRINT7(ha, (dev, 3789 "%s[%d]: New buffer allocation failed, dropping" 3790 " incoming packet and reusing its buffer\n", 3791 __func__, fp->rss_id)); 3792 3793 qlnx_reuse_rx_data(rxq); 3794 3795 if (mpf != NULL) 3796 m_freem(mpf); 3797 mpf = mpl = NULL; 3798 3799 rxq->tpa_info[agg_index].agg_state = 3800 QLNX_AGG_STATE_ERROR; 3801 3802 ecore_chain_consume(&rxq->rx_bd_ring); 3803 rxq->sw_rx_cons = 3804 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3805 3806 continue; 3807 } 3808 3809 mpc->m_flags &= ~M_PKTHDR; 3810 mpc->m_next = NULL; 3811 mpc->m_len = cqe->len_list[i]; 3812 3813 3814 if (mpf == NULL) { 3815 mpf = mpl = mpc; 3816 } else { 3817 mpl->m_len = ha->rx_buf_size; 3818 mpl->m_next = mpc; 3819 mpl = mpc; 3820 } 3821 3822 ecore_chain_consume(&rxq->rx_bd_ring); 3823 rxq->sw_rx_cons = 3824 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3825 } 3826 3827 QL_DPRINT7(ha, (dev, "%s[%d]: 5\n ", __func__, fp->rss_id)); 3828 3829 if (mpf != NULL) { 3830 3831 QL_DPRINT7(ha, (dev, "%s[%d]: 6\n ", __func__, fp->rss_id)); 3832 3833 mp = rxq->tpa_info[agg_index].mpl; 3834 mp->m_len = ha->rx_buf_size; 3835 mp->m_next = mpf; 3836 } 3837 3838 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { 3839 3840 QL_DPRINT7(ha, (dev, "%s[%d]: 7\n ", __func__, fp->rss_id)); 3841 3842 if (rxq->tpa_info[agg_index].mpf != NULL) 3843 m_freem(rxq->tpa_info[agg_index].mpf); 3844 rxq->tpa_info[agg_index].mpf = NULL; 3845 rxq->tpa_info[agg_index].mpl = NULL; 3846 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 3847 return (0); 3848 } 3849 3850 mp = rxq->tpa_info[agg_index].mpf; 3851 m_adj(mp, rxq->tpa_info[agg_index].placement_offset); 3852 mp->m_pkthdr.len = cqe->total_packet_len; 3853 3854 if (mp->m_next == NULL) 3855 mp->m_len = mp->m_pkthdr.len; 3856 else { 3857 /* compute the total packet length */ 3858 mpf = mp; 3859 while (mpf != NULL) { 3860 len += mpf->m_len; 3861 mpf = mpf->m_next; 3862 } 3863 3864 if (cqe->total_packet_len > len) { 3865 mpl = rxq->tpa_info[agg_index].mpl; 3866 mpl->m_len += (cqe->total_packet_len - len); 3867 } 3868 } 3869 3870 QLNX_INC_IPACKETS(ifp); 3871 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len)); 3872 3873 QL_DPRINT7(ha, (dev, "%s[%d]: 8 csum_data = 0x%x csum_flags = 0x%lx\n " 3874 "m_len = 0x%x m_pkthdr_len = 0x%x\n", 3875 __func__, fp->rss_id, mp->m_pkthdr.csum_data, 3876 mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len)); 3877 3878 (*ifp->if_input)(ifp, mp); 3879 3880 rxq->tpa_info[agg_index].mpf = NULL; 3881 rxq->tpa_info[agg_index].mpl = NULL; 3882 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 3883 3884 return (cqe->num_of_coalesced_segs); 3885 } 3886 3887 static int 3888 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 3889 int lro_enable) 3890 { 3891 uint16_t hw_comp_cons, sw_comp_cons; 3892 int rx_pkt = 0; 3893 struct qlnx_rx_queue *rxq = fp->rxq; 3894 struct ifnet *ifp = ha->ifp; 3895 struct ecore_dev *cdev = &ha->cdev; 3896 struct ecore_hwfn *p_hwfn; 3897 3898 #ifdef QLNX_SOFT_LRO 3899 struct lro_ctrl *lro; 3900 3901 lro = &rxq->lro; 3902 #endif /* #ifdef QLNX_SOFT_LRO */ 3903 3904 hw_comp_cons = le16toh(*rxq->hw_cons_ptr); 3905 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 3906 3907 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)]; 3908 3909 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 3910 * / BD in the while-loop before reading hw_comp_cons. If the CQE is 3911 * read before it is written by FW, then FW writes CQE and SB, and then 3912 * the CPU reads the hw_comp_cons, it will use an old CQE. 3913 */ 3914 3915 /* Loop to complete all indicated BDs */ 3916 while (sw_comp_cons != hw_comp_cons) { 3917 union eth_rx_cqe *cqe; 3918 struct eth_fast_path_rx_reg_cqe *fp_cqe; 3919 struct sw_rx_data *sw_rx_data; 3920 register struct mbuf *mp; 3921 enum eth_rx_cqe_type cqe_type; 3922 uint16_t len, pad, len_on_first_bd; 3923 uint8_t *data; 3924 #if __FreeBSD_version >= 1100000 3925 uint8_t hash_type; 3926 #endif /* #if __FreeBSD_version >= 1100000 */ 3927 3928 /* Get the CQE from the completion ring */ 3929 cqe = (union eth_rx_cqe *) 3930 ecore_chain_consume(&rxq->rx_comp_ring); 3931 cqe_type = cqe->fast_path_regular.type; 3932 3933 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { 3934 QL_DPRINT3(ha, (ha->pci_dev, "Got a slowath CQE\n")); 3935 3936 ecore_eth_cqe_completion(p_hwfn, 3937 (struct eth_slow_path_rx_cqe *)cqe); 3938 goto next_cqe; 3939 } 3940 3941 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { 3942 3943 switch (cqe_type) { 3944 3945 case ETH_RX_CQE_TYPE_TPA_START: 3946 qlnx_tpa_start(ha, fp, rxq, 3947 &cqe->fast_path_tpa_start); 3948 fp->tpa_start++; 3949 break; 3950 3951 case ETH_RX_CQE_TYPE_TPA_CONT: 3952 qlnx_tpa_cont(ha, fp, rxq, 3953 &cqe->fast_path_tpa_cont); 3954 fp->tpa_cont++; 3955 break; 3956 3957 case ETH_RX_CQE_TYPE_TPA_END: 3958 rx_pkt += qlnx_tpa_end(ha, fp, rxq, 3959 &cqe->fast_path_tpa_end); 3960 fp->tpa_end++; 3961 break; 3962 3963 default: 3964 break; 3965 } 3966 3967 goto next_cqe; 3968 } 3969 3970 /* Get the data from the SW ring */ 3971 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3972 mp = sw_rx_data->data; 3973 3974 if (mp == NULL) { 3975 QL_DPRINT1(ha, (ha->pci_dev, "%s: mp = NULL\n", 3976 __func__)); 3977 fp->err_rx_mp_null++; 3978 rxq->sw_rx_cons = 3979 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3980 goto next_cqe; 3981 } 3982 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3983 BUS_DMASYNC_POSTREAD); 3984 3985 /* non GRO */ 3986 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */ 3987 len = le16toh(fp_cqe->pkt_len); 3988 pad = fp_cqe->placement_offset; 3989 3990 QL_DPRINT3(ha, 3991 (ha->pci_dev, "CQE type = %x, flags = %x, vlan = %x," 3992 " len %u, parsing flags = %d pad = %d\n", 3993 cqe_type, fp_cqe->bitfields, 3994 le16toh(fp_cqe->vlan_tag), 3995 len, le16toh(fp_cqe->pars_flags.flags), pad)); 3996 3997 data = mtod(mp, uint8_t *); 3998 data = data + pad; 3999 4000 if (0) 4001 qlnx_dump_buf8(ha, __func__, data, len); 4002 4003 /* For every Rx BD consumed, we allocate a new BD so the BD ring 4004 * is always with a fixed size. If allocation fails, we take the 4005 * consumed BD and return it to the ring in the PROD position. 4006 * The packet that was received on that BD will be dropped (and 4007 * not passed to the upper stack). 4008 */ 4009 /* If this is an error packet then drop it */ 4010 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) & 4011 CQE_FLAGS_ERR) { 4012 4013 QL_DPRINT1(ha, (ha->pci_dev, 4014 "CQE in CONS = %u has error, flags = %x," 4015 " dropping incoming packet\n", sw_comp_cons, 4016 le16toh(cqe->fast_path_regular.pars_flags.flags))); 4017 4018 fp->err_rx_hw_errors++; 4019 4020 qlnx_reuse_rx_data(rxq); 4021 4022 QLNX_INC_IERRORS(ifp); 4023 4024 goto next_cqe; 4025 } 4026 4027 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4028 4029 QL_DPRINT1(ha, (ha->pci_dev, 4030 "New buffer allocation failed, dropping" 4031 " incoming packet and reusing its buffer\n")); 4032 4033 qlnx_reuse_rx_data(rxq); 4034 4035 fp->err_rx_alloc_errors++; 4036 4037 QLNX_INC_IQDROPS(ifp); 4038 4039 goto next_cqe; 4040 } 4041 4042 ecore_chain_consume(&rxq->rx_bd_ring); 4043 4044 len_on_first_bd = fp_cqe->len_on_first_bd; 4045 m_adj(mp, pad); 4046 mp->m_pkthdr.len = len; 4047 4048 QL_DPRINT1(ha, 4049 (ha->pci_dev, "%s: len = %d len_on_first_bd = %d\n", 4050 __func__, len, len_on_first_bd)); 4051 4052 if ((len > 60 ) && (len > len_on_first_bd)) { 4053 4054 mp->m_len = len_on_first_bd; 4055 4056 if (qlnx_rx_jumbo_chain(ha, fp, mp, 4057 (len - len_on_first_bd)) != 0) { 4058 4059 m_freem(mp); 4060 4061 QLNX_INC_IQDROPS(ifp); 4062 4063 goto next_cqe; 4064 } 4065 4066 } else if (len_on_first_bd < len) { 4067 fp->err_rx_jumbo_chain_pkts++; 4068 } else { 4069 mp->m_len = len; 4070 } 4071 4072 mp->m_flags |= M_PKTHDR; 4073 4074 /* assign packet to this interface interface */ 4075 mp->m_pkthdr.rcvif = ifp; 4076 4077 /* assume no hardware checksum has complated */ 4078 mp->m_pkthdr.csum_flags = 0; 4079 4080 mp->m_pkthdr.flowid = fp_cqe->rss_hash; 4081 4082 #if __FreeBSD_version >= 1100000 4083 4084 hash_type = fp_cqe->bitfields & 4085 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4086 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4087 4088 switch (hash_type) { 4089 4090 case RSS_HASH_TYPE_IPV4: 4091 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4092 break; 4093 4094 case RSS_HASH_TYPE_TCP_IPV4: 4095 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4096 break; 4097 4098 case RSS_HASH_TYPE_IPV6: 4099 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4100 break; 4101 4102 case RSS_HASH_TYPE_TCP_IPV6: 4103 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4104 break; 4105 4106 default: 4107 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4108 break; 4109 } 4110 4111 #else 4112 mp->m_flags |= M_FLOWID; 4113 #endif 4114 4115 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) { 4116 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4117 } 4118 4119 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) { 4120 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4121 } 4122 4123 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) { 4124 mp->m_pkthdr.csum_data = 0xFFFF; 4125 mp->m_pkthdr.csum_flags |= 4126 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4127 } 4128 4129 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) { 4130 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag); 4131 mp->m_flags |= M_VLANTAG; 4132 } 4133 4134 QLNX_INC_IPACKETS(ifp); 4135 QLNX_INC_IBYTES(ifp, len); 4136 4137 #ifdef QLNX_SOFT_LRO 4138 4139 if (lro_enable) { 4140 4141 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 4142 4143 tcp_lro_queue_mbuf(lro, mp); 4144 4145 #else 4146 4147 if (tcp_lro_rx(lro, mp, 0)) 4148 (*ifp->if_input)(ifp, mp); 4149 4150 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 4151 4152 } else { 4153 (*ifp->if_input)(ifp, mp); 4154 } 4155 #else 4156 4157 (*ifp->if_input)(ifp, mp); 4158 4159 #endif /* #ifdef QLNX_SOFT_LRO */ 4160 4161 rx_pkt++; 4162 4163 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4164 4165 next_cqe: /* don't consume bd rx buffer */ 4166 ecore_chain_recycle_consumed(&rxq->rx_comp_ring); 4167 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4168 4169 /* CR TPA - revisit how to handle budget in TPA perhaps 4170 increase on "end" */ 4171 if (rx_pkt == budget) 4172 break; 4173 } /* repeat while sw_comp_cons != hw_comp_cons... */ 4174 4175 /* Update producers */ 4176 qlnx_update_rx_prod(p_hwfn, rxq); 4177 4178 return rx_pkt; 4179 } 4180 4181 /* 4182 * fast path interrupt 4183 */ 4184 4185 static void 4186 qlnx_fp_isr(void *arg) 4187 { 4188 qlnx_ivec_t *ivec = arg; 4189 qlnx_host_t *ha; 4190 struct qlnx_fastpath *fp = NULL; 4191 int idx, lro_enable, tc; 4192 int rx_int = 0, total_rx_count = 0; 4193 4194 ha = ivec->ha; 4195 lro_enable = ha->ifp->if_capenable & IFCAP_LRO; 4196 4197 if (ha->state != QLNX_STATE_OPEN) { 4198 return; 4199 } 4200 4201 idx = ivec->rss_idx; 4202 4203 if ((idx = ivec->rss_idx) >= ha->num_rss) { 4204 QL_DPRINT1(ha, (ha->pci_dev, "%s: illegal interrupt[%d]\n", 4205 __func__, idx)); 4206 ha->err_illegal_intr++; 4207 return; 4208 } 4209 fp = &ha->fp_array[idx]; 4210 4211 if (fp == NULL) { 4212 QL_DPRINT1(ha, (ha->pci_dev, "%s: fp_array[%d] NULL\n", 4213 __func__, idx)); 4214 ha->err_fp_null++; 4215 } else { 4216 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); 4217 4218 do { 4219 for (tc = 0; tc < ha->num_tc; tc++) { 4220 if (mtx_trylock(&fp->tx_mtx)) { 4221 qlnx_tx_int(ha, fp, fp->txq[tc]); 4222 mtx_unlock(&fp->tx_mtx); 4223 } 4224 } 4225 4226 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, 4227 lro_enable); 4228 4229 if (rx_int) { 4230 fp->rx_pkts += rx_int; 4231 total_rx_count += rx_int; 4232 } 4233 4234 } while (rx_int); 4235 4236 4237 #ifdef QLNX_SOFT_LRO 4238 { 4239 struct lro_ctrl *lro; 4240 4241 lro = &fp->rxq->lro; 4242 4243 if (lro_enable && total_rx_count) { 4244 4245 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 4246 4247 #ifdef QLNX_TRACE_LRO_CNT 4248 if (lro->lro_mbuf_count & ~1023) 4249 fp->lro_cnt_1024++; 4250 else if (lro->lro_mbuf_count & ~511) 4251 fp->lro_cnt_512++; 4252 else if (lro->lro_mbuf_count & ~255) 4253 fp->lro_cnt_256++; 4254 else if (lro->lro_mbuf_count & ~127) 4255 fp->lro_cnt_128++; 4256 else if (lro->lro_mbuf_count & ~63) 4257 fp->lro_cnt_64++; 4258 #endif /* #ifdef QLNX_TRACE_LRO_CNT */ 4259 4260 tcp_lro_flush_all(lro); 4261 4262 #else 4263 struct lro_entry *queued; 4264 4265 while ((!SLIST_EMPTY(&lro->lro_active))) { 4266 queued = SLIST_FIRST(&lro->lro_active); 4267 SLIST_REMOVE_HEAD(&lro->lro_active, \ 4268 next); 4269 tcp_lro_flush(lro, queued); 4270 } 4271 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 4272 } 4273 } 4274 #endif /* #ifdef QLNX_SOFT_LRO */ 4275 4276 if (fp->fp_taskqueue != NULL) 4277 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 4278 4279 ecore_sb_update_sb_idx(fp->sb_info); 4280 rmb(); 4281 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 4282 4283 return; 4284 } 4285 4286 return; 4287 } 4288 4289 4290 /* 4291 * slow path interrupt processing function 4292 * can be invoked in polled mode or in interrupt mode via taskqueue. 4293 */ 4294 void 4295 qlnx_sp_isr(void *arg) 4296 { 4297 struct ecore_hwfn *p_hwfn; 4298 qlnx_host_t *ha; 4299 4300 p_hwfn = arg; 4301 4302 ha = (qlnx_host_t *)p_hwfn->p_dev; 4303 4304 ha->sp_interrupts++; 4305 4306 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 4307 4308 ecore_int_sp_dpc(p_hwfn); 4309 4310 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 4311 4312 return; 4313 } 4314 4315 /***************************************************************************** 4316 * Support Functions for DMA'able Memory 4317 *****************************************************************************/ 4318 4319 static void 4320 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 4321 { 4322 *((bus_addr_t *)arg) = 0; 4323 4324 if (error) { 4325 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 4326 return; 4327 } 4328 4329 *((bus_addr_t *)arg) = segs[0].ds_addr; 4330 4331 return; 4332 } 4333 4334 static int 4335 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 4336 { 4337 int ret = 0; 4338 device_t dev; 4339 bus_addr_t b_addr; 4340 4341 dev = ha->pci_dev; 4342 4343 ret = bus_dma_tag_create( 4344 ha->parent_tag,/* parent */ 4345 dma_buf->alignment, 4346 ((bus_size_t)(1ULL << 32)),/* boundary */ 4347 BUS_SPACE_MAXADDR, /* lowaddr */ 4348 BUS_SPACE_MAXADDR, /* highaddr */ 4349 NULL, NULL, /* filter, filterarg */ 4350 dma_buf->size, /* maxsize */ 4351 1, /* nsegments */ 4352 dma_buf->size, /* maxsegsize */ 4353 0, /* flags */ 4354 NULL, NULL, /* lockfunc, lockarg */ 4355 &dma_buf->dma_tag); 4356 4357 if (ret) { 4358 QL_DPRINT1(ha, 4359 (dev, "%s: could not create dma tag\n", __func__)); 4360 goto qlnx_alloc_dmabuf_exit; 4361 } 4362 ret = bus_dmamem_alloc(dma_buf->dma_tag, 4363 (void **)&dma_buf->dma_b, 4364 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 4365 &dma_buf->dma_map); 4366 if (ret) { 4367 bus_dma_tag_destroy(dma_buf->dma_tag); 4368 QL_DPRINT1(ha, 4369 (dev, "%s: bus_dmamem_alloc failed\n", __func__)); 4370 goto qlnx_alloc_dmabuf_exit; 4371 } 4372 4373 ret = bus_dmamap_load(dma_buf->dma_tag, 4374 dma_buf->dma_map, 4375 dma_buf->dma_b, 4376 dma_buf->size, 4377 qlnx_dmamap_callback, 4378 &b_addr, BUS_DMA_NOWAIT); 4379 4380 if (ret || !b_addr) { 4381 bus_dma_tag_destroy(dma_buf->dma_tag); 4382 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 4383 dma_buf->dma_map); 4384 ret = -1; 4385 goto qlnx_alloc_dmabuf_exit; 4386 } 4387 4388 dma_buf->dma_addr = b_addr; 4389 4390 qlnx_alloc_dmabuf_exit: 4391 4392 return ret; 4393 } 4394 4395 static void 4396 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 4397 { 4398 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 4399 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 4400 bus_dma_tag_destroy(dma_buf->dma_tag); 4401 return; 4402 } 4403 4404 void * 4405 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size) 4406 { 4407 qlnx_dma_t dma_buf; 4408 qlnx_dma_t *dma_p; 4409 qlnx_host_t *ha; 4410 device_t dev; 4411 4412 ha = (qlnx_host_t *)ecore_dev; 4413 dev = ha->pci_dev; 4414 4415 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4416 4417 memset(&dma_buf, 0, sizeof (qlnx_dma_t)); 4418 4419 dma_buf.size = size + PAGE_SIZE; 4420 dma_buf.alignment = 8; 4421 4422 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0) 4423 return (NULL); 4424 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size); 4425 4426 *phys = dma_buf.dma_addr; 4427 4428 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size); 4429 4430 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t)); 4431 4432 QL_DPRINT5(ha, (dev, "%s: [%p %p %p %p 0x%08x ]\n", __func__, 4433 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag, 4434 dma_buf.dma_b, (void *)dma_buf.dma_addr, size)); 4435 4436 return (dma_buf.dma_b); 4437 } 4438 4439 void 4440 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys, 4441 uint32_t size) 4442 { 4443 qlnx_dma_t dma_buf, *dma_p; 4444 qlnx_host_t *ha; 4445 device_t dev; 4446 4447 ha = (qlnx_host_t *)ecore_dev; 4448 dev = ha->pci_dev; 4449 4450 if (v_addr == NULL) 4451 return; 4452 4453 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4454 4455 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size); 4456 4457 QL_DPRINT5(ha, (dev, "%s: [%p %p %p %p 0x%08x ]\n", __func__, 4458 (void *)dma_p->dma_map, (void *)dma_p->dma_tag, 4459 dma_p->dma_b, (void *)dma_p->dma_addr, size)); 4460 4461 dma_buf = *dma_p; 4462 4463 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf); 4464 return; 4465 } 4466 4467 static int 4468 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha) 4469 { 4470 int ret; 4471 device_t dev; 4472 4473 dev = ha->pci_dev; 4474 4475 /* 4476 * Allocate parent DMA Tag 4477 */ 4478 ret = bus_dma_tag_create( 4479 bus_get_dma_tag(dev), /* parent */ 4480 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 4481 BUS_SPACE_MAXADDR, /* lowaddr */ 4482 BUS_SPACE_MAXADDR, /* highaddr */ 4483 NULL, NULL, /* filter, filterarg */ 4484 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 4485 0, /* nsegments */ 4486 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 4487 0, /* flags */ 4488 NULL, NULL, /* lockfunc, lockarg */ 4489 &ha->parent_tag); 4490 4491 if (ret) { 4492 QL_DPRINT1(ha, (dev, "%s: could not create parent dma tag\n", 4493 __func__)); 4494 return (-1); 4495 } 4496 4497 ha->flags.parent_tag = 1; 4498 4499 return (0); 4500 } 4501 4502 static void 4503 qlnx_free_parent_dma_tag(qlnx_host_t *ha) 4504 { 4505 if (ha->parent_tag != NULL) { 4506 bus_dma_tag_destroy(ha->parent_tag); 4507 ha->parent_tag = NULL; 4508 } 4509 return; 4510 } 4511 4512 static int 4513 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha) 4514 { 4515 if (bus_dma_tag_create(NULL, /* parent */ 4516 1, 0, /* alignment, bounds */ 4517 BUS_SPACE_MAXADDR, /* lowaddr */ 4518 BUS_SPACE_MAXADDR, /* highaddr */ 4519 NULL, NULL, /* filter, filterarg */ 4520 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */ 4521 QLNX_MAX_SEGMENTS, /* nsegments */ 4522 (PAGE_SIZE * 4), /* maxsegsize */ 4523 BUS_DMA_ALLOCNOW, /* flags */ 4524 NULL, /* lockfunc */ 4525 NULL, /* lockfuncarg */ 4526 &ha->tx_tag)) { 4527 4528 QL_DPRINT1(ha, (ha->pci_dev, "%s: tx_tag alloc failed\n", 4529 __func__)); 4530 return (-1); 4531 } 4532 4533 return (0); 4534 } 4535 4536 static void 4537 qlnx_free_tx_dma_tag(qlnx_host_t *ha) 4538 { 4539 if (ha->tx_tag != NULL) { 4540 bus_dma_tag_destroy(ha->tx_tag); 4541 ha->tx_tag = NULL; 4542 } 4543 return; 4544 } 4545 4546 static int 4547 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha) 4548 { 4549 if (bus_dma_tag_create(NULL, /* parent */ 4550 1, 0, /* alignment, bounds */ 4551 BUS_SPACE_MAXADDR, /* lowaddr */ 4552 BUS_SPACE_MAXADDR, /* highaddr */ 4553 NULL, NULL, /* filter, filterarg */ 4554 MJUM9BYTES, /* maxsize */ 4555 1, /* nsegments */ 4556 MJUM9BYTES, /* maxsegsize */ 4557 BUS_DMA_ALLOCNOW, /* flags */ 4558 NULL, /* lockfunc */ 4559 NULL, /* lockfuncarg */ 4560 &ha->rx_tag)) { 4561 4562 QL_DPRINT1(ha, (ha->pci_dev, "%s: rx_tag alloc failed\n", 4563 __func__)); 4564 4565 return (-1); 4566 } 4567 return (0); 4568 } 4569 4570 static void 4571 qlnx_free_rx_dma_tag(qlnx_host_t *ha) 4572 { 4573 if (ha->rx_tag != NULL) { 4574 bus_dma_tag_destroy(ha->rx_tag); 4575 ha->rx_tag = NULL; 4576 } 4577 return; 4578 } 4579 4580 /********************************* 4581 * Exported functions 4582 *********************************/ 4583 uint32_t 4584 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id) 4585 { 4586 uint32_t bar_size; 4587 4588 bar_id = bar_id * 2; 4589 4590 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev, 4591 SYS_RES_MEMORY, 4592 PCIR_BAR(bar_id)); 4593 4594 return (bar_size); 4595 } 4596 4597 uint32_t 4598 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value) 4599 { 4600 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4601 pci_reg, 1); 4602 return 0; 4603 } 4604 4605 uint32_t 4606 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg, 4607 uint16_t *reg_value) 4608 { 4609 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4610 pci_reg, 2); 4611 return 0; 4612 } 4613 4614 uint32_t 4615 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg, 4616 uint32_t *reg_value) 4617 { 4618 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4619 pci_reg, 4); 4620 return 0; 4621 } 4622 4623 void 4624 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value) 4625 { 4626 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4627 pci_reg, reg_value, 1); 4628 return; 4629 } 4630 4631 void 4632 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg, 4633 uint16_t reg_value) 4634 { 4635 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4636 pci_reg, reg_value, 2); 4637 return; 4638 } 4639 4640 void 4641 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg, 4642 uint32_t reg_value) 4643 { 4644 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4645 pci_reg, reg_value, 4); 4646 return; 4647 } 4648 4649 4650 int 4651 qlnx_pci_find_capability(void *ecore_dev, int cap) 4652 { 4653 int reg; 4654 4655 if (pci_find_cap(((qlnx_host_t *)ecore_dev)->pci_dev, PCIY_EXPRESS, 4656 ®) == 0) 4657 return reg; 4658 else { 4659 QL_DPRINT1(((qlnx_host_t *)ecore_dev), 4660 (((qlnx_host_t *)ecore_dev)->pci_dev, 4661 "%s: failed\n", __func__)); 4662 return 0; 4663 } 4664 } 4665 4666 uint32_t 4667 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr) 4668 { 4669 uint32_t data32; 4670 struct ecore_dev *cdev; 4671 struct ecore_hwfn *p_hwfn; 4672 4673 p_hwfn = hwfn; 4674 4675 cdev = p_hwfn->p_dev; 4676 4677 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) - 4678 (uint8_t *)(cdev->regview)) + reg_addr; 4679 4680 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr); 4681 4682 return (data32); 4683 } 4684 4685 void 4686 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 4687 { 4688 struct ecore_dev *cdev; 4689 struct ecore_hwfn *p_hwfn; 4690 4691 p_hwfn = hwfn; 4692 4693 cdev = p_hwfn->p_dev; 4694 4695 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) - 4696 (uint8_t *)(cdev->regview)) + reg_addr; 4697 4698 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value); 4699 4700 return; 4701 } 4702 4703 void 4704 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value) 4705 { 4706 struct ecore_dev *cdev; 4707 struct ecore_hwfn *p_hwfn; 4708 4709 p_hwfn = hwfn; 4710 4711 cdev = p_hwfn->p_dev; 4712 4713 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) - 4714 (uint8_t *)(cdev->regview)) + reg_addr; 4715 4716 bus_write_2(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value); 4717 4718 return; 4719 } 4720 4721 void 4722 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 4723 { 4724 struct ecore_dev *cdev; 4725 struct ecore_hwfn *p_hwfn; 4726 4727 p_hwfn = hwfn; 4728 4729 cdev = p_hwfn->p_dev; 4730 4731 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->doorbells) - 4732 (uint8_t *)(cdev->doorbells)) + reg_addr; 4733 4734 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, reg_addr, value); 4735 4736 return; 4737 } 4738 4739 uint32_t 4740 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr) 4741 { 4742 uint32_t data32; 4743 uint32_t offset; 4744 struct ecore_dev *cdev; 4745 4746 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 4747 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 4748 4749 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset); 4750 4751 return (data32); 4752 } 4753 4754 void 4755 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value) 4756 { 4757 uint32_t offset; 4758 struct ecore_dev *cdev; 4759 4760 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 4761 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 4762 4763 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value); 4764 4765 return; 4766 } 4767 4768 void * 4769 qlnx_zalloc(uint32_t size) 4770 { 4771 caddr_t va; 4772 4773 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT); 4774 bzero(va, size); 4775 return ((void *)va); 4776 } 4777 4778 void 4779 qlnx_barrier(void *p_hwfn) 4780 { 4781 qlnx_host_t *ha; 4782 4783 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 4784 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE); 4785 } 4786 4787 void 4788 qlnx_link_update(void *p_hwfn) 4789 { 4790 qlnx_host_t *ha; 4791 int prev_link_state; 4792 4793 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 4794 4795 qlnx_fill_link(p_hwfn, &ha->if_link); 4796 4797 prev_link_state = ha->link_up; 4798 ha->link_up = ha->if_link.link_up; 4799 4800 if (prev_link_state != ha->link_up) { 4801 if (ha->link_up) { 4802 if_link_state_change(ha->ifp, LINK_STATE_UP); 4803 } else { 4804 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 4805 } 4806 } 4807 return; 4808 } 4809 4810 void 4811 qlnx_fill_link(struct ecore_hwfn *hwfn, struct qlnx_link_output *if_link) 4812 { 4813 struct ecore_mcp_link_params link_params; 4814 struct ecore_mcp_link_state link_state; 4815 4816 memset(if_link, 0, sizeof(*if_link)); 4817 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params)); 4818 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state)); 4819 4820 /* Prepare source inputs */ 4821 /* we only deal with physical functions */ 4822 memcpy(&link_params, ecore_mcp_get_link_params(hwfn), 4823 sizeof(link_params)); 4824 memcpy(&link_state, ecore_mcp_get_link_state(hwfn), 4825 sizeof(link_state)); 4826 4827 ecore_mcp_get_media_type(hwfn->p_dev, &if_link->media_type); 4828 4829 /* Set the link parameters to pass to protocol driver */ 4830 if (link_state.link_up) { 4831 if_link->link_up = true; 4832 if_link->speed = link_state.speed; 4833 } 4834 4835 if_link->supported_caps = QLNX_LINK_CAP_FIBRE; 4836 4837 if (link_params.speed.autoneg) 4838 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg; 4839 4840 if (link_params.pause.autoneg || 4841 (link_params.pause.forced_rx && link_params.pause.forced_tx)) 4842 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause; 4843 4844 if (link_params.pause.autoneg || link_params.pause.forced_rx || 4845 link_params.pause.forced_tx) 4846 if_link->supported_caps |= QLNX_LINK_CAP_Pause; 4847 4848 if (link_params.speed.advertised_speeds & 4849 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 4850 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half | 4851 QLNX_LINK_CAP_1000baseT_Full; 4852 4853 if (link_params.speed.advertised_speeds & 4854 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 4855 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full; 4856 4857 if (link_params.speed.advertised_speeds & 4858 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 4859 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full; 4860 4861 if (link_params.speed.advertised_speeds & 4862 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) 4863 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 4864 4865 if (link_params.speed.advertised_speeds & 4866 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 4867 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 4868 4869 if (link_params.speed.advertised_speeds & 4870 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 4871 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 4872 4873 if_link->advertised_caps = if_link->supported_caps; 4874 4875 if_link->autoneg = link_params.speed.autoneg; 4876 if_link->duplex = QLNX_LINK_DUPLEX; 4877 4878 /* Link partner capabilities */ 4879 4880 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD) 4881 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half; 4882 4883 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD) 4884 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full; 4885 4886 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G) 4887 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full; 4888 4889 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G) 4890 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full; 4891 4892 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G) 4893 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 4894 4895 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G) 4896 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 4897 4898 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G) 4899 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 4900 4901 if (link_state.an_complete) 4902 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg; 4903 4904 if (link_state.partner_adv_pause) 4905 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause; 4906 4907 if ((link_state.partner_adv_pause == 4908 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) || 4909 (link_state.partner_adv_pause == 4910 ECORE_LINK_PARTNER_BOTH_PAUSE)) 4911 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause; 4912 4913 return; 4914 } 4915 4916 static int 4917 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params) 4918 { 4919 int rc, i; 4920 4921 for (i = 0; i < cdev->num_hwfns; i++) { 4922 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 4923 p_hwfn->pf_params = *func_params; 4924 } 4925 4926 rc = ecore_resc_alloc(cdev); 4927 if (rc) 4928 goto qlnx_nic_setup_exit; 4929 4930 ecore_resc_setup(cdev); 4931 4932 qlnx_nic_setup_exit: 4933 4934 return rc; 4935 } 4936 4937 static int 4938 qlnx_nic_start(struct ecore_dev *cdev) 4939 { 4940 int rc; 4941 struct ecore_hw_init_params params; 4942 4943 bzero(¶ms, sizeof (struct ecore_hw_init_params)); 4944 4945 params.p_tunn = NULL; 4946 params.b_hw_start = true; 4947 params.int_mode = cdev->int_mode; 4948 params.allow_npar_tx_switch = true; 4949 params.bin_fw_data = NULL; 4950 4951 rc = ecore_hw_init(cdev, ¶ms); 4952 if (rc) { 4953 ecore_resc_free(cdev); 4954 return rc; 4955 } 4956 4957 return 0; 4958 } 4959 4960 static int 4961 qlnx_slowpath_start(qlnx_host_t *ha) 4962 { 4963 struct ecore_dev *cdev; 4964 struct ecore_pf_params pf_params; 4965 int rc; 4966 4967 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 4968 pf_params.eth_pf_params.num_cons = 4969 (ha->num_rss) * (ha->num_tc + 1); 4970 4971 cdev = &ha->cdev; 4972 4973 rc = qlnx_nic_setup(cdev, &pf_params); 4974 if (rc) 4975 goto qlnx_slowpath_start_exit; 4976 4977 cdev->int_mode = ECORE_INT_MODE_MSIX; 4978 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; 4979 4980 #ifdef QLNX_MAX_COALESCE 4981 cdev->rx_coalesce_usecs = 255; 4982 cdev->tx_coalesce_usecs = 255; 4983 #endif 4984 4985 rc = qlnx_nic_start(cdev); 4986 4987 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs; 4988 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs; 4989 4990 qlnx_slowpath_start_exit: 4991 4992 return (rc); 4993 } 4994 4995 static int 4996 qlnx_slowpath_stop(qlnx_host_t *ha) 4997 { 4998 struct ecore_dev *cdev; 4999 device_t dev = ha->pci_dev; 5000 int i; 5001 5002 cdev = &ha->cdev; 5003 5004 ecore_hw_stop(cdev); 5005 5006 for (i = 0; i < ha->cdev.num_hwfns; i++) { 5007 5008 if (ha->sp_handle[i]) 5009 (void)bus_teardown_intr(dev, ha->sp_irq[i], 5010 ha->sp_handle[i]); 5011 5012 ha->sp_handle[i] = NULL; 5013 5014 if (ha->sp_irq[i]) 5015 (void) bus_release_resource(dev, SYS_RES_IRQ, 5016 ha->sp_irq_rid[i], ha->sp_irq[i]); 5017 ha->sp_irq[i] = NULL; 5018 } 5019 5020 ecore_resc_free(cdev); 5021 5022 return 0; 5023 } 5024 5025 static void 5026 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 5027 char ver_str[VER_SIZE]) 5028 { 5029 int i; 5030 5031 memcpy(cdev->name, name, NAME_SIZE); 5032 5033 for_each_hwfn(cdev, i) { 5034 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 5035 } 5036 5037 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD; 5038 5039 return ; 5040 } 5041 5042 void 5043 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats) 5044 { 5045 enum ecore_mcp_protocol_type type; 5046 union ecore_mcp_protocol_stats *stats; 5047 struct ecore_eth_stats eth_stats; 5048 device_t dev; 5049 5050 dev = ((qlnx_host_t *)cdev)->pci_dev; 5051 stats = proto_stats; 5052 type = proto_type; 5053 5054 switch (type) { 5055 case ECORE_MCP_LAN_STATS: 5056 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats); 5057 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; 5058 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts; 5059 stats->lan_stats.fcs_err = -1; 5060 break; 5061 5062 default: 5063 ((qlnx_host_t *)cdev)->err_get_proto_invalid_type++; 5064 5065 QL_DPRINT1(((qlnx_host_t *)cdev), 5066 (dev, "%s: invalid protocol type 0x%x\n", __func__, 5067 type)); 5068 break; 5069 } 5070 return; 5071 } 5072 5073 static int 5074 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver) 5075 { 5076 struct ecore_hwfn *p_hwfn; 5077 struct ecore_ptt *p_ptt; 5078 5079 p_hwfn = &ha->cdev.hwfns[0]; 5080 p_ptt = ecore_ptt_acquire(p_hwfn); 5081 5082 if (p_ptt == NULL) { 5083 QL_DPRINT1(ha, (ha->pci_dev, 5084 "%s : ecore_ptt_acquire failed\n", __func__)); 5085 return (-1); 5086 } 5087 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL); 5088 5089 ecore_ptt_release(p_hwfn, p_ptt); 5090 5091 return (0); 5092 } 5093 5094 static int 5095 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size) 5096 { 5097 struct ecore_hwfn *p_hwfn; 5098 struct ecore_ptt *p_ptt; 5099 5100 p_hwfn = &ha->cdev.hwfns[0]; 5101 p_ptt = ecore_ptt_acquire(p_hwfn); 5102 5103 if (p_ptt == NULL) { 5104 QL_DPRINT1(ha, (ha->pci_dev, 5105 "%s : ecore_ptt_acquire failed\n", __func__)); 5106 return (-1); 5107 } 5108 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size); 5109 5110 ecore_ptt_release(p_hwfn, p_ptt); 5111 5112 return (0); 5113 } 5114 5115 static int 5116 qlnx_alloc_mem_arrays(qlnx_host_t *ha) 5117 { 5118 struct ecore_dev *cdev; 5119 5120 cdev = &ha->cdev; 5121 5122 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS)); 5123 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS)); 5124 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS)); 5125 5126 return 0; 5127 } 5128 5129 static void 5130 qlnx_init_fp(qlnx_host_t *ha) 5131 { 5132 int rss_id, txq_array_index, tc; 5133 5134 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 5135 5136 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 5137 5138 fp->rss_id = rss_id; 5139 fp->edev = ha; 5140 fp->sb_info = &ha->sb_array[rss_id]; 5141 fp->rxq = &ha->rxq_array[rss_id]; 5142 fp->rxq->rxq_id = rss_id; 5143 5144 for (tc = 0; tc < ha->num_tc; tc++) { 5145 txq_array_index = tc * ha->num_rss + rss_id; 5146 fp->txq[tc] = &ha->txq_array[txq_array_index]; 5147 fp->txq[tc]->index = txq_array_index; 5148 } 5149 5150 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str, 5151 rss_id); 5152 5153 /* reset all the statistics counters */ 5154 5155 fp->tx_pkts_processed = 0; 5156 fp->tx_pkts_freed = 0; 5157 fp->tx_pkts_transmitted = 0; 5158 fp->tx_pkts_completed = 0; 5159 fp->tx_lso_wnd_min_len = 0; 5160 fp->tx_defrag = 0; 5161 fp->tx_nsegs_gt_elem_left = 0; 5162 fp->tx_tso_max_nsegs = 0; 5163 fp->tx_tso_min_nsegs = 0; 5164 fp->err_tx_nsegs_gt_elem_left = 0; 5165 fp->err_tx_dmamap_create = 0; 5166 fp->err_tx_defrag_dmamap_load = 0; 5167 fp->err_tx_non_tso_max_seg = 0; 5168 fp->err_tx_dmamap_load = 0; 5169 fp->err_tx_defrag = 0; 5170 fp->err_tx_free_pkt_null = 0; 5171 fp->err_tx_cons_idx_conflict = 0; 5172 5173 fp->rx_pkts = 0; 5174 fp->err_m_getcl = 0; 5175 fp->err_m_getjcl = 0; 5176 } 5177 return; 5178 } 5179 5180 static void 5181 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info) 5182 { 5183 struct ecore_dev *cdev; 5184 5185 cdev = &ha->cdev; 5186 5187 if (sb_info->sb_virt) { 5188 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt), 5189 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt))); 5190 sb_info->sb_virt = NULL; 5191 } 5192 } 5193 5194 static int 5195 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info, 5196 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id) 5197 { 5198 struct ecore_hwfn *p_hwfn; 5199 int hwfn_index, rc; 5200 u16 rel_sb_id; 5201 5202 hwfn_index = sb_id % cdev->num_hwfns; 5203 p_hwfn = &cdev->hwfns[hwfn_index]; 5204 rel_sb_id = sb_id / cdev->num_hwfns; 5205 5206 QL_DPRINT2(((qlnx_host_t *)cdev), (((qlnx_host_t *)cdev)->pci_dev, 5207 "%s: hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x " 5208 "sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n", 5209 __func__, hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info, 5210 sb_virt_addr, (void *)sb_phy_addr)); 5211 5212 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, 5213 sb_virt_addr, sb_phy_addr, rel_sb_id); 5214 5215 return rc; 5216 } 5217 5218 /* This function allocates fast-path status block memory */ 5219 static int 5220 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id) 5221 { 5222 struct status_block *sb_virt; 5223 bus_addr_t sb_phys; 5224 int rc; 5225 uint32_t size; 5226 struct ecore_dev *cdev; 5227 5228 cdev = &ha->cdev; 5229 5230 size = sizeof(*sb_virt); 5231 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size); 5232 5233 if (!sb_virt) { 5234 QL_DPRINT1(ha, (ha->pci_dev, 5235 "%s: Status block allocation failed\n", __func__)); 5236 return -ENOMEM; 5237 } 5238 5239 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id); 5240 if (rc) { 5241 QL_DPRINT1(ha, (ha->pci_dev, "%s: failed\n", __func__)); 5242 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size); 5243 } 5244 5245 return rc; 5246 } 5247 5248 static void 5249 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5250 { 5251 int i; 5252 struct sw_rx_data *rx_buf; 5253 5254 for (i = 0; i < rxq->num_rx_buffers; i++) { 5255 5256 rx_buf = &rxq->sw_rx_ring[i]; 5257 5258 if (rx_buf->data != NULL) { 5259 if (rx_buf->map != NULL) { 5260 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 5261 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 5262 rx_buf->map = NULL; 5263 } 5264 m_freem(rx_buf->data); 5265 rx_buf->data = NULL; 5266 } 5267 } 5268 return; 5269 } 5270 5271 static void 5272 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5273 { 5274 struct ecore_dev *cdev; 5275 int i; 5276 5277 cdev = &ha->cdev; 5278 5279 qlnx_free_rx_buffers(ha, rxq); 5280 5281 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 5282 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]); 5283 if (rxq->tpa_info[i].mpf != NULL) 5284 m_freem(rxq->tpa_info[i].mpf); 5285 } 5286 5287 bzero((void *)&rxq->sw_rx_ring[0], 5288 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 5289 5290 /* Free the real RQ ring used by FW */ 5291 if (rxq->rx_bd_ring.p_virt_addr) { 5292 ecore_chain_free(cdev, &rxq->rx_bd_ring); 5293 rxq->rx_bd_ring.p_virt_addr = NULL; 5294 } 5295 5296 /* Free the real completion ring used by FW */ 5297 if (rxq->rx_comp_ring.p_virt_addr && 5298 rxq->rx_comp_ring.pbl_sp.p_virt_table) { 5299 ecore_chain_free(cdev, &rxq->rx_comp_ring); 5300 rxq->rx_comp_ring.p_virt_addr = NULL; 5301 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL; 5302 } 5303 5304 #ifdef QLNX_SOFT_LRO 5305 { 5306 struct lro_ctrl *lro; 5307 5308 lro = &rxq->lro; 5309 tcp_lro_free(lro); 5310 } 5311 #endif /* #ifdef QLNX_SOFT_LRO */ 5312 5313 return; 5314 } 5315 5316 static int 5317 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5318 { 5319 register struct mbuf *mp; 5320 uint16_t rx_buf_size; 5321 struct sw_rx_data *sw_rx_data; 5322 struct eth_rx_bd *rx_bd; 5323 dma_addr_t dma_addr; 5324 bus_dmamap_t map; 5325 bus_dma_segment_t segs[1]; 5326 int nsegs; 5327 int ret; 5328 struct ecore_dev *cdev; 5329 5330 cdev = &ha->cdev; 5331 5332 rx_buf_size = rxq->rx_buf_size; 5333 5334 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 5335 5336 if (mp == NULL) { 5337 QL_DPRINT1(ha, (ha->pci_dev, 5338 "%s : Failed to allocate Rx data\n", __func__)); 5339 return -ENOMEM; 5340 } 5341 5342 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 5343 5344 map = (bus_dmamap_t)0; 5345 5346 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 5347 BUS_DMA_NOWAIT); 5348 dma_addr = segs[0].ds_addr; 5349 5350 if (ret || !dma_addr || (nsegs != 1)) { 5351 m_freem(mp); 5352 QL_DPRINT1(ha, (ha->pci_dev, 5353 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 5354 __func__, ret, (long long unsigned int)dma_addr, 5355 nsegs)); 5356 return -ENOMEM; 5357 } 5358 5359 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 5360 sw_rx_data->data = mp; 5361 sw_rx_data->dma_addr = dma_addr; 5362 sw_rx_data->map = map; 5363 5364 /* Advance PROD and get BD pointer */ 5365 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); 5366 rx_bd->addr.hi = htole32(U64_HI(dma_addr)); 5367 rx_bd->addr.lo = htole32(U64_LO(dma_addr)); 5368 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 5369 5370 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 5371 5372 return 0; 5373 } 5374 5375 static int 5376 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 5377 struct qlnx_agg_info *tpa) 5378 { 5379 struct mbuf *mp; 5380 dma_addr_t dma_addr; 5381 bus_dmamap_t map; 5382 bus_dma_segment_t segs[1]; 5383 int nsegs; 5384 int ret; 5385 struct sw_rx_data *rx_buf; 5386 5387 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 5388 5389 if (mp == NULL) { 5390 QL_DPRINT1(ha, (ha->pci_dev, 5391 "%s : Failed to allocate Rx data\n", __func__)); 5392 return -ENOMEM; 5393 } 5394 5395 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 5396 5397 map = (bus_dmamap_t)0; 5398 5399 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 5400 BUS_DMA_NOWAIT); 5401 dma_addr = segs[0].ds_addr; 5402 5403 if (ret || !dma_addr || (nsegs != 1)) { 5404 m_freem(mp); 5405 QL_DPRINT1(ha, (ha->pci_dev, 5406 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 5407 __func__, ret, (long long unsigned int)dma_addr, 5408 nsegs)); 5409 return -ENOMEM; 5410 } 5411 5412 rx_buf = &tpa->rx_buf; 5413 5414 memset(rx_buf, 0, sizeof (struct sw_rx_data)); 5415 5416 rx_buf->data = mp; 5417 rx_buf->dma_addr = dma_addr; 5418 rx_buf->map = map; 5419 5420 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 5421 5422 return (0); 5423 } 5424 5425 static void 5426 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa) 5427 { 5428 struct sw_rx_data *rx_buf; 5429 5430 rx_buf = &tpa->rx_buf; 5431 5432 if (rx_buf->data != NULL) { 5433 if (rx_buf->map != NULL) { 5434 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 5435 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 5436 rx_buf->map = NULL; 5437 } 5438 m_freem(rx_buf->data); 5439 rx_buf->data = NULL; 5440 } 5441 return; 5442 } 5443 5444 /* This function allocates all memory needed per Rx queue */ 5445 static int 5446 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5447 { 5448 int i, rc, num_allocated; 5449 struct ifnet *ifp; 5450 struct ecore_dev *cdev; 5451 5452 cdev = &ha->cdev; 5453 ifp = ha->ifp; 5454 5455 rxq->num_rx_buffers = RX_RING_SIZE; 5456 5457 rxq->rx_buf_size = ha->rx_buf_size; 5458 5459 /* Allocate the parallel driver ring for Rx buffers */ 5460 bzero((void *)&rxq->sw_rx_ring[0], 5461 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 5462 5463 /* Allocate FW Rx ring */ 5464 5465 rc = ecore_chain_alloc(cdev, 5466 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 5467 ECORE_CHAIN_MODE_NEXT_PTR, 5468 ECORE_CHAIN_CNT_TYPE_U16, 5469 RX_RING_SIZE, 5470 sizeof(struct eth_rx_bd), 5471 &rxq->rx_bd_ring, NULL); 5472 5473 if (rc) 5474 goto err; 5475 5476 /* Allocate FW completion ring */ 5477 rc = ecore_chain_alloc(cdev, 5478 ECORE_CHAIN_USE_TO_CONSUME, 5479 ECORE_CHAIN_MODE_PBL, 5480 ECORE_CHAIN_CNT_TYPE_U16, 5481 RX_RING_SIZE, 5482 sizeof(union eth_rx_cqe), 5483 &rxq->rx_comp_ring, NULL); 5484 5485 if (rc) 5486 goto err; 5487 5488 /* Allocate buffers for the Rx ring */ 5489 5490 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 5491 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size, 5492 &rxq->tpa_info[i]); 5493 if (rc) 5494 break; 5495 5496 } 5497 5498 for (i = 0; i < rxq->num_rx_buffers; i++) { 5499 rc = qlnx_alloc_rx_buffer(ha, rxq); 5500 if (rc) 5501 break; 5502 } 5503 num_allocated = i; 5504 if (!num_allocated) { 5505 QL_DPRINT1(ha, (ha->pci_dev, 5506 "%s: Rx buffers allocation failed\n", __func__)); 5507 goto err; 5508 } else if (num_allocated < rxq->num_rx_buffers) { 5509 QL_DPRINT1(ha, (ha->pci_dev, 5510 "%s: Allocated less buffers than" 5511 " desired (%d allocated)\n", __func__, num_allocated)); 5512 } 5513 5514 #ifdef QLNX_SOFT_LRO 5515 5516 { 5517 struct lro_ctrl *lro; 5518 5519 lro = &rxq->lro; 5520 5521 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 5522 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) { 5523 QL_DPRINT1(ha, (ha->pci_dev, 5524 "%s: tcp_lro_init[%d] failed\n", 5525 __func__, rxq->rxq_id)); 5526 goto err; 5527 } 5528 #else 5529 if (tcp_lro_init(lro)) { 5530 QL_DPRINT1(ha, (ha->pci_dev, 5531 "%s: tcp_lro_init[%d] failed\n", 5532 __func__, rxq->rxq_id)); 5533 goto err; 5534 } 5535 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 5536 5537 lro->ifp = ha->ifp; 5538 } 5539 #endif /* #ifdef QLNX_SOFT_LRO */ 5540 return 0; 5541 5542 err: 5543 qlnx_free_mem_rxq(ha, rxq); 5544 return -ENOMEM; 5545 } 5546 5547 5548 static void 5549 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 5550 struct qlnx_tx_queue *txq) 5551 { 5552 struct ecore_dev *cdev; 5553 5554 cdev = &ha->cdev; 5555 5556 bzero((void *)&txq->sw_tx_ring[0], 5557 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 5558 5559 /* Free the real RQ ring used by FW */ 5560 if (txq->tx_pbl.p_virt_addr) { 5561 ecore_chain_free(cdev, &txq->tx_pbl); 5562 txq->tx_pbl.p_virt_addr = NULL; 5563 } 5564 return; 5565 } 5566 5567 /* This function allocates all memory needed per Tx queue */ 5568 static int 5569 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 5570 struct qlnx_tx_queue *txq) 5571 { 5572 int ret = ECORE_SUCCESS; 5573 union eth_tx_bd_types *p_virt; 5574 struct ecore_dev *cdev; 5575 5576 cdev = &ha->cdev; 5577 5578 bzero((void *)&txq->sw_tx_ring[0], 5579 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 5580 5581 /* Allocate the real Tx ring to be used by FW */ 5582 ret = ecore_chain_alloc(cdev, 5583 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 5584 ECORE_CHAIN_MODE_PBL, 5585 ECORE_CHAIN_CNT_TYPE_U16, 5586 TX_RING_SIZE, 5587 sizeof(*p_virt), 5588 &txq->tx_pbl, NULL); 5589 5590 if (ret != ECORE_SUCCESS) { 5591 goto err; 5592 } 5593 5594 txq->num_tx_buffers = TX_RING_SIZE; 5595 5596 return 0; 5597 5598 err: 5599 qlnx_free_mem_txq(ha, fp, txq); 5600 return -ENOMEM; 5601 } 5602 5603 static void 5604 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5605 { 5606 struct mbuf *mp; 5607 struct ifnet *ifp = ha->ifp; 5608 5609 if (mtx_initialized(&fp->tx_mtx)) { 5610 5611 if (fp->tx_br != NULL) { 5612 5613 mtx_lock(&fp->tx_mtx); 5614 5615 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 5616 fp->tx_pkts_freed++; 5617 m_freem(mp); 5618 } 5619 5620 mtx_unlock(&fp->tx_mtx); 5621 5622 buf_ring_free(fp->tx_br, M_DEVBUF); 5623 fp->tx_br = NULL; 5624 } 5625 mtx_destroy(&fp->tx_mtx); 5626 } 5627 return; 5628 } 5629 5630 static void 5631 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5632 { 5633 int tc; 5634 5635 qlnx_free_mem_sb(ha, fp->sb_info); 5636 5637 qlnx_free_mem_rxq(ha, fp->rxq); 5638 5639 for (tc = 0; tc < ha->num_tc; tc++) 5640 qlnx_free_mem_txq(ha, fp, fp->txq[tc]); 5641 5642 return; 5643 } 5644 5645 static int 5646 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5647 { 5648 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 5649 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id); 5650 5651 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 5652 5653 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF, 5654 M_NOWAIT, &fp->tx_mtx); 5655 if (fp->tx_br == NULL) { 5656 QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for " 5657 " fp[%d, %d]\n", ha->dev_unit, fp->rss_id)); 5658 return -ENOMEM; 5659 } 5660 return 0; 5661 } 5662 5663 static int 5664 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5665 { 5666 int rc, tc; 5667 5668 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id); 5669 if (rc) 5670 goto err; 5671 5672 if (ha->rx_jumbo_buf_eq_mtu) { 5673 if (ha->max_frame_size <= MCLBYTES) 5674 ha->rx_buf_size = MCLBYTES; 5675 else if (ha->max_frame_size <= MJUMPAGESIZE) 5676 ha->rx_buf_size = MJUMPAGESIZE; 5677 else if (ha->max_frame_size <= MJUM9BYTES) 5678 ha->rx_buf_size = MJUM9BYTES; 5679 else if (ha->max_frame_size <= MJUM16BYTES) 5680 ha->rx_buf_size = MJUM16BYTES; 5681 } else { 5682 if (ha->max_frame_size <= MCLBYTES) 5683 ha->rx_buf_size = MCLBYTES; 5684 else 5685 ha->rx_buf_size = MJUMPAGESIZE; 5686 } 5687 5688 rc = qlnx_alloc_mem_rxq(ha, fp->rxq); 5689 if (rc) 5690 goto err; 5691 5692 for (tc = 0; tc < ha->num_tc; tc++) { 5693 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]); 5694 if (rc) 5695 goto err; 5696 } 5697 5698 return 0; 5699 5700 err: 5701 qlnx_free_mem_fp(ha, fp); 5702 return -ENOMEM; 5703 } 5704 5705 static void 5706 qlnx_free_mem_load(qlnx_host_t *ha) 5707 { 5708 int i; 5709 struct ecore_dev *cdev; 5710 5711 cdev = &ha->cdev; 5712 5713 for (i = 0; i < ha->num_rss; i++) { 5714 struct qlnx_fastpath *fp = &ha->fp_array[i]; 5715 5716 qlnx_free_mem_fp(ha, fp); 5717 } 5718 return; 5719 } 5720 5721 static int 5722 qlnx_alloc_mem_load(qlnx_host_t *ha) 5723 { 5724 int rc = 0, rss_id; 5725 5726 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 5727 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 5728 5729 rc = qlnx_alloc_mem_fp(ha, fp); 5730 if (rc) 5731 break; 5732 } 5733 return (rc); 5734 } 5735 5736 static int 5737 qlnx_start_vport(struct ecore_dev *cdev, 5738 u8 vport_id, 5739 u16 mtu, 5740 u8 drop_ttl0_flg, 5741 u8 inner_vlan_removal_en_flg, 5742 u8 tx_switching, 5743 u8 hw_lro_enable) 5744 { 5745 int rc, i; 5746 struct ecore_sp_vport_start_params vport_start_params = { 0 }; 5747 qlnx_host_t *ha; 5748 5749 ha = (qlnx_host_t *)cdev; 5750 5751 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg; 5752 vport_start_params.tx_switching = 0; 5753 vport_start_params.handle_ptp_pkts = 0; 5754 vport_start_params.only_untagged = 0; 5755 vport_start_params.drop_ttl0 = drop_ttl0_flg; 5756 5757 vport_start_params.tpa_mode = 5758 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE); 5759 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 5760 5761 vport_start_params.vport_id = vport_id; 5762 vport_start_params.mtu = mtu; 5763 5764 5765 QL_DPRINT2(ha, (ha->pci_dev, "%s: setting mtu to %d\n", __func__, mtu)); 5766 5767 for_each_hwfn(cdev, i) { 5768 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 5769 5770 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid; 5771 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 5772 5773 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params); 5774 5775 if (rc) { 5776 QL_DPRINT1(ha, (ha->pci_dev, 5777 "%s: Failed to start VPORT V-PORT %d " 5778 "with MTU %d\n", __func__, vport_id, mtu)); 5779 return -ENOMEM; 5780 } 5781 5782 ecore_hw_start_fastpath(p_hwfn); 5783 5784 QL_DPRINT2(ha, (ha->pci_dev, 5785 "%s: Started V-PORT %d with MTU %d\n", 5786 __func__, vport_id, mtu)); 5787 } 5788 return 0; 5789 } 5790 5791 5792 static int 5793 qlnx_update_vport(struct ecore_dev *cdev, 5794 struct qlnx_update_vport_params *params) 5795 { 5796 struct ecore_sp_vport_update_params sp_params; 5797 int rc, i, j, fp_index; 5798 struct ecore_hwfn *p_hwfn; 5799 struct ecore_rss_params *rss; 5800 qlnx_host_t *ha = (qlnx_host_t *)cdev; 5801 struct qlnx_fastpath *fp; 5802 5803 memset(&sp_params, 0, sizeof(sp_params)); 5804 /* Translate protocol params into sp params */ 5805 sp_params.vport_id = params->vport_id; 5806 5807 sp_params.update_vport_active_rx_flg = 5808 params->update_vport_active_rx_flg; 5809 sp_params.vport_active_rx_flg = params->vport_active_rx_flg; 5810 5811 sp_params.update_vport_active_tx_flg = 5812 params->update_vport_active_tx_flg; 5813 sp_params.vport_active_tx_flg = params->vport_active_tx_flg; 5814 5815 sp_params.update_inner_vlan_removal_flg = 5816 params->update_inner_vlan_removal_flg; 5817 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; 5818 5819 sp_params.sge_tpa_params = params->sge_tpa_params; 5820 5821 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 5822 * We need to re-fix the rss values per engine for CMT. 5823 */ 5824 5825 sp_params.rss_params = params->rss_params; 5826 5827 for_each_hwfn(cdev, i) { 5828 5829 p_hwfn = &cdev->hwfns[i]; 5830 5831 if ((cdev->num_hwfns > 1) && 5832 params->rss_params->update_rss_config && 5833 params->rss_params->rss_enable) { 5834 5835 rss = params->rss_params; 5836 5837 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) { 5838 5839 fp_index = ((cdev->num_hwfns * j) + i) % 5840 ha->num_rss; 5841 5842 fp = &ha->fp_array[fp_index]; 5843 rss->rss_ind_table[j] = fp->rxq->handle; 5844 } 5845 5846 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) { 5847 QL_DPRINT3(ha, (ha->pci_dev, 5848 "%p %p %p %p %p %p %p %p \n", 5849 rss->rss_ind_table[j], 5850 rss->rss_ind_table[j+1], 5851 rss->rss_ind_table[j+2], 5852 rss->rss_ind_table[j+3], 5853 rss->rss_ind_table[j+4], 5854 rss->rss_ind_table[j+5], 5855 rss->rss_ind_table[j+6], 5856 rss->rss_ind_table[j+7])); 5857 j += 8; 5858 } 5859 } 5860 5861 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 5862 rc = ecore_sp_vport_update(p_hwfn, &sp_params, 5863 ECORE_SPQ_MODE_EBLOCK, NULL); 5864 if (rc) { 5865 QL_DPRINT1(ha, (ha->pci_dev, 5866 "%s:Failed to update VPORT\n", __func__)); 5867 return rc; 5868 } 5869 5870 QL_DPRINT2(ha, (ha->pci_dev, 5871 "%s: Updated V-PORT %d: tx_active_flag %d," 5872 "rx_active_flag %d [tx_update %d], [rx_update %d]\n", 5873 __func__, 5874 params->vport_id, params->vport_active_tx_flg, 5875 params->vport_active_rx_flg, 5876 params->update_vport_active_tx_flg, 5877 params->update_vport_active_rx_flg)); 5878 } 5879 5880 return 0; 5881 } 5882 5883 static void 5884 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq) 5885 { 5886 struct eth_rx_bd *rx_bd_cons = 5887 ecore_chain_consume(&rxq->rx_bd_ring); 5888 struct eth_rx_bd *rx_bd_prod = 5889 ecore_chain_produce(&rxq->rx_bd_ring); 5890 struct sw_rx_data *sw_rx_data_cons = 5891 &rxq->sw_rx_ring[rxq->sw_rx_cons]; 5892 struct sw_rx_data *sw_rx_data_prod = 5893 &rxq->sw_rx_ring[rxq->sw_rx_prod]; 5894 5895 sw_rx_data_prod->data = sw_rx_data_cons->data; 5896 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); 5897 5898 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 5899 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 5900 5901 return; 5902 } 5903 5904 static void 5905 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq) 5906 { 5907 5908 uint16_t bd_prod; 5909 uint16_t cqe_prod; 5910 union { 5911 struct eth_rx_prod_data rx_prod_data; 5912 uint32_t data32; 5913 } rx_prods; 5914 5915 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); 5916 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); 5917 5918 /* Update producers */ 5919 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod); 5920 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod); 5921 5922 /* Make sure that the BD and SGE data is updated before updating the 5923 * producers since FW might read the BD/SGE right after the producer 5924 * is updated. 5925 */ 5926 wmb(); 5927 5928 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr, 5929 sizeof(rx_prods), &rx_prods.data32); 5930 5931 /* mmiowb is needed to synchronize doorbell writes from more than one 5932 * processor. It guarantees that the write arrives to the device before 5933 * the napi lock is released and another qlnx_poll is called (possibly 5934 * on another CPU). Without this barrier, the next doorbell can bypass 5935 * this doorbell. This is applicable to IA64/Altix systems. 5936 */ 5937 wmb(); 5938 5939 return; 5940 } 5941 5942 static uint32_t qlnx_hash_key[] = { 5943 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda), 5944 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2), 5945 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d), 5946 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0), 5947 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb), 5948 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4), 5949 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3), 5950 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c), 5951 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b), 5952 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)}; 5953 5954 static int 5955 qlnx_start_queues(qlnx_host_t *ha) 5956 { 5957 int rc, tc, i, vport_id = 0, 5958 drop_ttl0_flg = 1, vlan_removal_en = 1, 5959 tx_switching = 0, hw_lro_enable = 0; 5960 struct ecore_dev *cdev = &ha->cdev; 5961 struct ecore_rss_params *rss_params = &ha->rss_params; 5962 struct qlnx_update_vport_params vport_update_params; 5963 struct ifnet *ifp; 5964 struct ecore_hwfn *p_hwfn; 5965 struct ecore_sge_tpa_params tpa_params; 5966 struct ecore_queue_start_common_params qparams; 5967 struct qlnx_fastpath *fp; 5968 5969 ifp = ha->ifp; 5970 5971 if (!ha->num_rss) { 5972 QL_DPRINT1(ha, (ha->pci_dev, 5973 "%s: Cannot update V-VPORT as active as there" 5974 " are no Rx queues\n", __func__)); 5975 return -EINVAL; 5976 } 5977 5978 #ifndef QLNX_SOFT_LRO 5979 hw_lro_enable = ifp->if_capenable & IFCAP_LRO; 5980 #endif /* #ifndef QLNX_SOFT_LRO */ 5981 5982 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg, 5983 vlan_removal_en, tx_switching, hw_lro_enable); 5984 5985 if (rc) { 5986 QL_DPRINT1(ha, (ha->pci_dev, 5987 "%s: Start V-PORT failed %d\n", __func__, rc)); 5988 return rc; 5989 } 5990 5991 QL_DPRINT2(ha, (ha->pci_dev, 5992 "%s: Start vport ramrod passed," 5993 " vport_id = %d, MTU = %d, vlan_removal_en = %d\n", __func__, 5994 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en)); 5995 5996 for_each_rss(i) { 5997 struct ecore_rxq_start_ret_params rx_ret_params; 5998 struct ecore_txq_start_ret_params tx_ret_params; 5999 6000 fp = &ha->fp_array[i]; 6001 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)]; 6002 6003 bzero(&qparams, sizeof(struct ecore_queue_start_common_params)); 6004 bzero(&rx_ret_params, 6005 sizeof (struct ecore_rxq_start_ret_params)); 6006 6007 qparams.queue_id = i ; 6008 qparams.vport_id = vport_id; 6009 qparams.stats_id = vport_id; 6010 qparams.p_sb = fp->sb_info; 6011 qparams.sb_idx = RX_PI; 6012 6013 6014 rc = ecore_eth_rx_queue_start(p_hwfn, 6015 p_hwfn->hw_info.opaque_fid, 6016 &qparams, 6017 fp->rxq->rx_buf_size, /* bd_max_bytes */ 6018 /* bd_chain_phys_addr */ 6019 fp->rxq->rx_bd_ring.p_phys_addr, 6020 /* cqe_pbl_addr */ 6021 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring), 6022 /* cqe_pbl_size */ 6023 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring), 6024 &rx_ret_params); 6025 6026 if (rc) { 6027 QL_DPRINT1(ha, (ha->pci_dev, 6028 "%s: Start RXQ #%d failed %d\n", __func__, 6029 i, rc)); 6030 return rc; 6031 } 6032 6033 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod; 6034 fp->rxq->handle = rx_ret_params.p_handle; 6035 fp->rxq->hw_cons_ptr = 6036 &fp->sb_info->sb_virt->pi_array[RX_PI]; 6037 6038 qlnx_update_rx_prod(p_hwfn, fp->rxq); 6039 6040 for (tc = 0; tc < ha->num_tc; tc++) { 6041 struct qlnx_tx_queue *txq = fp->txq[tc]; 6042 6043 bzero(&qparams, 6044 sizeof(struct ecore_queue_start_common_params)); 6045 bzero(&tx_ret_params, 6046 sizeof (struct ecore_txq_start_ret_params)); 6047 6048 qparams.queue_id = txq->index / cdev->num_hwfns ; 6049 qparams.vport_id = vport_id; 6050 qparams.stats_id = vport_id; 6051 qparams.p_sb = fp->sb_info; 6052 qparams.sb_idx = TX_PI(tc); 6053 6054 rc = ecore_eth_tx_queue_start(p_hwfn, 6055 p_hwfn->hw_info.opaque_fid, 6056 &qparams, tc, 6057 /* bd_chain_phys_addr */ 6058 ecore_chain_get_pbl_phys(&txq->tx_pbl), 6059 ecore_chain_get_page_cnt(&txq->tx_pbl), 6060 &tx_ret_params); 6061 6062 if (rc) { 6063 QL_DPRINT1(ha, (ha->pci_dev, 6064 "%s: Start TXQ #%d failed %d\n", 6065 __func__, txq->index, rc)); 6066 return rc; 6067 } 6068 6069 txq->doorbell_addr = tx_ret_params.p_doorbell; 6070 txq->handle = tx_ret_params.p_handle; 6071 6072 txq->hw_cons_ptr = 6073 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; 6074 SET_FIELD(txq->tx_db.data.params, 6075 ETH_DB_DATA_DEST, DB_DEST_XCM); 6076 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, 6077 DB_AGG_CMD_SET); 6078 SET_FIELD(txq->tx_db.data.params, 6079 ETH_DB_DATA_AGG_VAL_SEL, 6080 DQ_XCM_ETH_TX_BD_PROD_CMD); 6081 6082 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 6083 } 6084 } 6085 6086 /* Fill struct with RSS params */ 6087 if (ha->num_rss > 1) { 6088 6089 rss_params->update_rss_config = 1; 6090 rss_params->rss_enable = 1; 6091 rss_params->update_rss_capabilities = 1; 6092 rss_params->update_rss_ind_table = 1; 6093 rss_params->update_rss_key = 1; 6094 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | 6095 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; 6096 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */ 6097 6098 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 6099 fp = &ha->fp_array[(i % ha->num_rss)]; 6100 rss_params->rss_ind_table[i] = fp->rxq->handle; 6101 } 6102 6103 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 6104 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i]; 6105 6106 } else { 6107 memset(rss_params, 0, sizeof(*rss_params)); 6108 } 6109 6110 6111 /* Prepare and send the vport enable */ 6112 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6113 vport_update_params.vport_id = vport_id; 6114 vport_update_params.update_vport_active_tx_flg = 1; 6115 vport_update_params.vport_active_tx_flg = 1; 6116 vport_update_params.update_vport_active_rx_flg = 1; 6117 vport_update_params.vport_active_rx_flg = 1; 6118 vport_update_params.rss_params = rss_params; 6119 vport_update_params.update_inner_vlan_removal_flg = 1; 6120 vport_update_params.inner_vlan_removal_flg = 1; 6121 6122 if (hw_lro_enable) { 6123 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params)); 6124 6125 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6126 6127 tpa_params.update_tpa_en_flg = 1; 6128 tpa_params.tpa_ipv4_en_flg = 1; 6129 tpa_params.tpa_ipv6_en_flg = 1; 6130 6131 tpa_params.update_tpa_param_flg = 1; 6132 tpa_params.tpa_pkt_split_flg = 0; 6133 tpa_params.tpa_hdr_data_split_flg = 0; 6134 tpa_params.tpa_gro_consistent_flg = 0; 6135 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 6136 tpa_params.tpa_max_size = (uint16_t)(-1); 6137 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2; 6138 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2; 6139 6140 vport_update_params.sge_tpa_params = &tpa_params; 6141 } 6142 6143 rc = qlnx_update_vport(cdev, &vport_update_params); 6144 if (rc) { 6145 QL_DPRINT1(ha, (ha->pci_dev, 6146 "%s: Update V-PORT failed %d\n", __func__, rc)); 6147 return rc; 6148 } 6149 6150 return 0; 6151 } 6152 6153 static int 6154 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6155 struct qlnx_tx_queue *txq) 6156 { 6157 uint16_t hw_bd_cons; 6158 uint16_t ecore_cons_idx; 6159 6160 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 6161 6162 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6163 6164 while (hw_bd_cons != 6165 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 6166 6167 mtx_lock(&fp->tx_mtx); 6168 6169 (void)qlnx_tx_int(ha, fp, txq); 6170 6171 mtx_unlock(&fp->tx_mtx); 6172 6173 qlnx_mdelay(__func__, 2); 6174 6175 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6176 } 6177 6178 QL_DPRINT2(ha, (ha->pci_dev, "%s[%d, %d]: done\n", __func__, 6179 fp->rss_id, txq->index)); 6180 6181 return 0; 6182 } 6183 6184 static int 6185 qlnx_stop_queues(qlnx_host_t *ha) 6186 { 6187 struct qlnx_update_vport_params vport_update_params; 6188 struct ecore_dev *cdev; 6189 struct qlnx_fastpath *fp; 6190 int rc, tc, i; 6191 6192 cdev = &ha->cdev; 6193 6194 /* Disable the vport */ 6195 6196 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6197 6198 vport_update_params.vport_id = 0; 6199 vport_update_params.update_vport_active_tx_flg = 1; 6200 vport_update_params.vport_active_tx_flg = 0; 6201 vport_update_params.update_vport_active_rx_flg = 1; 6202 vport_update_params.vport_active_rx_flg = 0; 6203 vport_update_params.rss_params = &ha->rss_params; 6204 vport_update_params.rss_params->update_rss_config = 0; 6205 vport_update_params.rss_params->rss_enable = 0; 6206 vport_update_params.update_inner_vlan_removal_flg = 0; 6207 vport_update_params.inner_vlan_removal_flg = 0; 6208 6209 rc = qlnx_update_vport(cdev, &vport_update_params); 6210 if (rc) { 6211 QL_DPRINT1(ha, (ha->pci_dev, "%s:Failed to update vport\n", 6212 __func__)); 6213 return rc; 6214 } 6215 6216 /* Flush Tx queues. If needed, request drain from MCP */ 6217 for_each_rss(i) { 6218 fp = &ha->fp_array[i]; 6219 6220 for (tc = 0; tc < ha->num_tc; tc++) { 6221 struct qlnx_tx_queue *txq = fp->txq[tc]; 6222 6223 rc = qlnx_drain_txq(ha, fp, txq); 6224 if (rc) 6225 return rc; 6226 } 6227 } 6228 6229 /* Stop all Queues in reverse order*/ 6230 for (i = ha->num_rss - 1; i >= 0; i--) { 6231 6232 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)]; 6233 6234 fp = &ha->fp_array[i]; 6235 6236 /* Stop the Tx Queue(s)*/ 6237 for (tc = 0; tc < ha->num_tc; tc++) { 6238 int tx_queue_id; 6239 6240 tx_queue_id = tc * ha->num_rss + i; 6241 rc = ecore_eth_tx_queue_stop(p_hwfn, 6242 fp->txq[tc]->handle); 6243 6244 if (rc) { 6245 QL_DPRINT1(ha, (ha->pci_dev, 6246 "%s: Failed to stop TXQ #%d\n", 6247 __func__, tx_queue_id)); 6248 return rc; 6249 } 6250 } 6251 6252 /* Stop the Rx Queue*/ 6253 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false, 6254 false); 6255 if (rc) { 6256 QL_DPRINT1(ha, (ha->pci_dev, 6257 "%s: Failed to stop RXQ #%d\n", __func__, i)); 6258 return rc; 6259 } 6260 } 6261 6262 /* Stop the vport */ 6263 for_each_hwfn(cdev, i) { 6264 6265 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6266 6267 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0); 6268 6269 if (rc) { 6270 QL_DPRINT1(ha, (ha->pci_dev, 6271 "%s: Failed to stop VPORT\n", __func__)); 6272 return rc; 6273 } 6274 } 6275 6276 return rc; 6277 } 6278 6279 static int 6280 qlnx_set_ucast_rx_mac(qlnx_host_t *ha, 6281 enum ecore_filter_opcode opcode, 6282 unsigned char mac[ETH_ALEN]) 6283 { 6284 struct ecore_filter_ucast ucast; 6285 struct ecore_dev *cdev; 6286 int rc; 6287 6288 cdev = &ha->cdev; 6289 6290 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6291 6292 ucast.opcode = opcode; 6293 ucast.type = ECORE_FILTER_MAC; 6294 ucast.is_rx_filter = 1; 6295 ucast.vport_to_add_to = 0; 6296 memcpy(&ucast.mac[0], mac, ETH_ALEN); 6297 6298 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6299 6300 return (rc); 6301 } 6302 6303 static int 6304 qlnx_remove_all_ucast_mac(qlnx_host_t *ha) 6305 { 6306 struct ecore_filter_ucast ucast; 6307 struct ecore_dev *cdev; 6308 int rc; 6309 6310 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6311 6312 ucast.opcode = ECORE_FILTER_REPLACE; 6313 ucast.type = ECORE_FILTER_MAC; 6314 ucast.is_rx_filter = 1; 6315 6316 cdev = &ha->cdev; 6317 6318 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6319 6320 return (rc); 6321 } 6322 6323 static int 6324 qlnx_remove_all_mcast_mac(qlnx_host_t *ha) 6325 { 6326 struct ecore_filter_mcast *mcast; 6327 struct ecore_dev *cdev; 6328 int rc, i; 6329 6330 cdev = &ha->cdev; 6331 6332 mcast = &ha->ecore_mcast; 6333 bzero(mcast, sizeof(struct ecore_filter_mcast)); 6334 6335 mcast->opcode = ECORE_FILTER_REMOVE; 6336 6337 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 6338 6339 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] || 6340 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] || 6341 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) { 6342 6343 memcpy(&mcast->mac[i], &ha->mcast[i].addr[0], ETH_ALEN); 6344 mcast->num_mc_addrs++; 6345 } 6346 } 6347 mcast = &ha->ecore_mcast; 6348 6349 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 6350 6351 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS)); 6352 ha->nmcast = 0; 6353 6354 return (rc); 6355 } 6356 6357 static int 6358 qlnx_clean_filters(qlnx_host_t *ha) 6359 { 6360 int rc = 0; 6361 6362 /* Remove all unicast macs */ 6363 rc = qlnx_remove_all_ucast_mac(ha); 6364 if (rc) 6365 return rc; 6366 6367 /* Remove all multicast macs */ 6368 rc = qlnx_remove_all_mcast_mac(ha); 6369 if (rc) 6370 return rc; 6371 6372 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac); 6373 6374 return (rc); 6375 } 6376 6377 static int 6378 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter) 6379 { 6380 struct ecore_filter_accept_flags accept; 6381 int rc = 0; 6382 struct ecore_dev *cdev; 6383 6384 cdev = &ha->cdev; 6385 6386 bzero(&accept, sizeof(struct ecore_filter_accept_flags)); 6387 6388 accept.update_rx_mode_config = 1; 6389 accept.rx_accept_filter = filter; 6390 6391 accept.update_tx_mode_config = 1; 6392 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 6393 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST; 6394 6395 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false, 6396 ECORE_SPQ_MODE_CB, NULL); 6397 6398 return (rc); 6399 } 6400 6401 static int 6402 qlnx_set_rx_mode(qlnx_host_t *ha) 6403 { 6404 int rc = 0; 6405 uint8_t filter; 6406 6407 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac); 6408 if (rc) 6409 return rc; 6410 6411 rc = qlnx_remove_all_mcast_mac(ha); 6412 if (rc) 6413 return rc; 6414 6415 filter = ECORE_ACCEPT_UCAST_MATCHED | 6416 ECORE_ACCEPT_MCAST_MATCHED | 6417 ECORE_ACCEPT_BCAST; 6418 ha->filter = filter; 6419 6420 rc = qlnx_set_rx_accept_filter(ha, filter); 6421 6422 return (rc); 6423 } 6424 6425 static int 6426 qlnx_set_link(qlnx_host_t *ha, bool link_up) 6427 { 6428 int i, rc = 0; 6429 struct ecore_dev *cdev; 6430 struct ecore_hwfn *hwfn; 6431 struct ecore_ptt *ptt; 6432 6433 cdev = &ha->cdev; 6434 6435 for_each_hwfn(cdev, i) { 6436 6437 hwfn = &cdev->hwfns[i]; 6438 6439 ptt = ecore_ptt_acquire(hwfn); 6440 if (!ptt) 6441 return -EBUSY; 6442 6443 rc = ecore_mcp_set_link(hwfn, ptt, link_up); 6444 6445 ecore_ptt_release(hwfn, ptt); 6446 6447 if (rc) 6448 return rc; 6449 } 6450 return (rc); 6451 } 6452 6453 #if __FreeBSD_version >= 1100000 6454 static uint64_t 6455 qlnx_get_counter(if_t ifp, ift_counter cnt) 6456 { 6457 qlnx_host_t *ha; 6458 uint64_t count; 6459 6460 ha = (qlnx_host_t *)if_getsoftc(ifp); 6461 6462 switch (cnt) { 6463 6464 case IFCOUNTER_IPACKETS: 6465 count = ha->hw_stats.common.rx_ucast_pkts + 6466 ha->hw_stats.common.rx_mcast_pkts + 6467 ha->hw_stats.common.rx_bcast_pkts; 6468 break; 6469 6470 case IFCOUNTER_IERRORS: 6471 count = ha->hw_stats.common.rx_crc_errors + 6472 ha->hw_stats.common.rx_align_errors + 6473 ha->hw_stats.common.rx_oversize_packets + 6474 ha->hw_stats.common.rx_undersize_packets; 6475 break; 6476 6477 case IFCOUNTER_OPACKETS: 6478 count = ha->hw_stats.common.tx_ucast_pkts + 6479 ha->hw_stats.common.tx_mcast_pkts + 6480 ha->hw_stats.common.tx_bcast_pkts; 6481 break; 6482 6483 case IFCOUNTER_OERRORS: 6484 count = ha->hw_stats.common.tx_err_drop_pkts; 6485 break; 6486 6487 case IFCOUNTER_COLLISIONS: 6488 return (0); 6489 6490 case IFCOUNTER_IBYTES: 6491 count = ha->hw_stats.common.rx_ucast_bytes + 6492 ha->hw_stats.common.rx_mcast_bytes + 6493 ha->hw_stats.common.rx_bcast_bytes; 6494 break; 6495 6496 case IFCOUNTER_OBYTES: 6497 count = ha->hw_stats.common.tx_ucast_bytes + 6498 ha->hw_stats.common.tx_mcast_bytes + 6499 ha->hw_stats.common.tx_bcast_bytes; 6500 break; 6501 6502 case IFCOUNTER_IMCASTS: 6503 count = ha->hw_stats.common.rx_mcast_bytes; 6504 break; 6505 6506 case IFCOUNTER_OMCASTS: 6507 count = ha->hw_stats.common.tx_mcast_bytes; 6508 break; 6509 6510 case IFCOUNTER_IQDROPS: 6511 case IFCOUNTER_OQDROPS: 6512 case IFCOUNTER_NOPROTO: 6513 6514 default: 6515 return (if_get_counter_default(ifp, cnt)); 6516 } 6517 return (count); 6518 } 6519 #endif 6520 6521 6522 static void 6523 qlnx_timer(void *arg) 6524 { 6525 qlnx_host_t *ha; 6526 6527 ha = (qlnx_host_t *)arg; 6528 6529 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats); 6530 6531 if (ha->storm_stats_enable) 6532 qlnx_sample_storm_stats(ha); 6533 6534 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 6535 6536 return; 6537 } 6538 6539 static int 6540 qlnx_load(qlnx_host_t *ha) 6541 { 6542 int i; 6543 int rc = 0; 6544 struct ecore_dev *cdev; 6545 device_t dev; 6546 6547 cdev = &ha->cdev; 6548 dev = ha->pci_dev; 6549 6550 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 6551 6552 rc = qlnx_alloc_mem_arrays(ha); 6553 if (rc) 6554 goto qlnx_load_exit0; 6555 6556 qlnx_init_fp(ha); 6557 6558 rc = qlnx_alloc_mem_load(ha); 6559 if (rc) 6560 goto qlnx_load_exit1; 6561 6562 QL_DPRINT2(ha, (dev, "%s: Allocated %d RSS queues on %d TC/s\n", 6563 __func__, ha->num_rss, ha->num_tc)); 6564 6565 for (i = 0; i < ha->num_rss; i++) { 6566 6567 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq, 6568 (INTR_TYPE_NET | INTR_MPSAFE), 6569 NULL, qlnx_fp_isr, &ha->irq_vec[i], 6570 &ha->irq_vec[i].handle))) { 6571 6572 QL_DPRINT1(ha, (dev, "could not setup interrupt\n")); 6573 6574 goto qlnx_load_exit2; 6575 } 6576 6577 QL_DPRINT2(ha, (dev, "%s: rss_id = %d irq_rid %d" 6578 " irq %p handle %p\n", __func__, i, 6579 ha->irq_vec[i].irq_rid, 6580 ha->irq_vec[i].irq, ha->irq_vec[i].handle)); 6581 6582 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus)); 6583 } 6584 6585 rc = qlnx_start_queues(ha); 6586 if (rc) 6587 goto qlnx_load_exit2; 6588 6589 QL_DPRINT2(ha, (dev, "%s: Start VPORT, RXQ and TXQ succeeded\n", 6590 __func__)); 6591 6592 /* Add primary mac and set Rx filters */ 6593 rc = qlnx_set_rx_mode(ha); 6594 if (rc) 6595 goto qlnx_load_exit2; 6596 6597 /* Ask for link-up using current configuration */ 6598 qlnx_set_link(ha, true); 6599 6600 ha->state = QLNX_STATE_OPEN; 6601 6602 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats)); 6603 6604 if (ha->flags.callout_init) 6605 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 6606 6607 goto qlnx_load_exit0; 6608 6609 qlnx_load_exit2: 6610 qlnx_free_mem_load(ha); 6611 6612 qlnx_load_exit1: 6613 ha->num_rss = 0; 6614 6615 qlnx_load_exit0: 6616 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit [%d]\n", __func__, rc)); 6617 return rc; 6618 } 6619 6620 static void 6621 qlnx_drain_soft_lro(qlnx_host_t *ha) 6622 { 6623 #ifdef QLNX_SOFT_LRO 6624 6625 struct ifnet *ifp; 6626 int i; 6627 6628 ifp = ha->ifp; 6629 6630 6631 if (ifp->if_capenable & IFCAP_LRO) { 6632 6633 for (i = 0; i < ha->num_rss; i++) { 6634 6635 struct qlnx_fastpath *fp = &ha->fp_array[i]; 6636 struct lro_ctrl *lro; 6637 6638 lro = &fp->rxq->lro; 6639 6640 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 6641 6642 tcp_lro_flush_all(lro); 6643 6644 #else 6645 struct lro_entry *queued; 6646 6647 while ((!SLIST_EMPTY(&lro->lro_active))){ 6648 queued = SLIST_FIRST(&lro->lro_active); 6649 SLIST_REMOVE_HEAD(&lro->lro_active, next); 6650 tcp_lro_flush(lro, queued); 6651 } 6652 6653 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 6654 6655 } 6656 } 6657 6658 #endif /* #ifdef QLNX_SOFT_LRO */ 6659 6660 return; 6661 } 6662 6663 static void 6664 qlnx_unload(qlnx_host_t *ha) 6665 { 6666 struct ecore_dev *cdev; 6667 device_t dev; 6668 int i; 6669 6670 cdev = &ha->cdev; 6671 dev = ha->pci_dev; 6672 6673 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 6674 6675 if (ha->state == QLNX_STATE_OPEN) { 6676 6677 qlnx_set_link(ha, false); 6678 qlnx_clean_filters(ha); 6679 qlnx_stop_queues(ha); 6680 ecore_hw_stop_fastpath(cdev); 6681 6682 for (i = 0; i < ha->num_rss; i++) { 6683 if (ha->irq_vec[i].handle) { 6684 (void)bus_teardown_intr(dev, 6685 ha->irq_vec[i].irq, 6686 ha->irq_vec[i].handle); 6687 ha->irq_vec[i].handle = NULL; 6688 } 6689 } 6690 6691 qlnx_drain_fp_taskqueues(ha); 6692 qlnx_drain_soft_lro(ha); 6693 qlnx_free_mem_load(ha); 6694 } 6695 6696 if (ha->flags.callout_init) 6697 callout_drain(&ha->qlnx_callout); 6698 6699 qlnx_mdelay(__func__, 1000); 6700 6701 ha->state = QLNX_STATE_CLOSED; 6702 6703 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 6704 return; 6705 } 6706 6707 static int 6708 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 6709 { 6710 int rval = -1; 6711 struct ecore_hwfn *p_hwfn; 6712 struct ecore_ptt *p_ptt; 6713 6714 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 6715 6716 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 6717 p_ptt = ecore_ptt_acquire(p_hwfn); 6718 6719 if (!p_ptt) { 6720 QL_DPRINT1(ha, (ha->pci_dev, "%s: ecore_ptt_acquire failed\n", 6721 __func__)); 6722 return (rval); 6723 } 6724 6725 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 6726 6727 if (rval == DBG_STATUS_OK) 6728 rval = 0; 6729 else { 6730 QL_DPRINT1(ha, (ha->pci_dev, 6731 "%s : ecore_dbg_grc_get_dump_buf_size failed [0x%x]\n", 6732 __func__, rval)); 6733 } 6734 6735 ecore_ptt_release(p_hwfn, p_ptt); 6736 6737 return (rval); 6738 } 6739 6740 static int 6741 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 6742 { 6743 int rval = -1; 6744 struct ecore_hwfn *p_hwfn; 6745 struct ecore_ptt *p_ptt; 6746 6747 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 6748 6749 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 6750 p_ptt = ecore_ptt_acquire(p_hwfn); 6751 6752 if (!p_ptt) { 6753 QL_DPRINT1(ha, (ha->pci_dev, "%s: ecore_ptt_acquire failed\n", 6754 __func__)); 6755 return (rval); 6756 } 6757 6758 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 6759 6760 if (rval == DBG_STATUS_OK) 6761 rval = 0; 6762 else { 6763 QL_DPRINT1(ha, (ha->pci_dev, "%s : " 6764 "ecore_dbg_idle_chk_get_dump_buf_size failed [0x%x]\n", 6765 __func__, rval)); 6766 } 6767 6768 ecore_ptt_release(p_hwfn, p_ptt); 6769 6770 return (rval); 6771 } 6772 6773 6774 static void 6775 qlnx_sample_storm_stats(qlnx_host_t *ha) 6776 { 6777 int i, index; 6778 struct ecore_dev *cdev; 6779 qlnx_storm_stats_t *s_stats; 6780 uint32_t reg; 6781 struct ecore_ptt *p_ptt; 6782 struct ecore_hwfn *hwfn; 6783 6784 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) { 6785 ha->storm_stats_enable = 0; 6786 return; 6787 } 6788 6789 cdev = &ha->cdev; 6790 6791 for_each_hwfn(cdev, i) { 6792 6793 hwfn = &cdev->hwfns[i]; 6794 6795 p_ptt = ecore_ptt_acquire(hwfn); 6796 if (!p_ptt) 6797 return; 6798 6799 index = ha->storm_stats_index + 6800 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN); 6801 6802 s_stats = &ha->storm_stats[index]; 6803 6804 /* XSTORM */ 6805 reg = XSEM_REG_FAST_MEMORY + 6806 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6807 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6808 6809 reg = XSEM_REG_FAST_MEMORY + 6810 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6811 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6812 6813 reg = XSEM_REG_FAST_MEMORY + 6814 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6815 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6816 6817 reg = XSEM_REG_FAST_MEMORY + 6818 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6819 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6820 6821 /* YSTORM */ 6822 reg = YSEM_REG_FAST_MEMORY + 6823 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6824 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6825 6826 reg = YSEM_REG_FAST_MEMORY + 6827 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6828 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6829 6830 reg = YSEM_REG_FAST_MEMORY + 6831 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6832 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6833 6834 reg = YSEM_REG_FAST_MEMORY + 6835 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6836 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6837 6838 /* PSTORM */ 6839 reg = PSEM_REG_FAST_MEMORY + 6840 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6841 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6842 6843 reg = PSEM_REG_FAST_MEMORY + 6844 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6845 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6846 6847 reg = PSEM_REG_FAST_MEMORY + 6848 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6849 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6850 6851 reg = PSEM_REG_FAST_MEMORY + 6852 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6853 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6854 6855 /* TSTORM */ 6856 reg = TSEM_REG_FAST_MEMORY + 6857 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6858 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6859 6860 reg = TSEM_REG_FAST_MEMORY + 6861 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6862 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6863 6864 reg = TSEM_REG_FAST_MEMORY + 6865 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6866 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6867 6868 reg = TSEM_REG_FAST_MEMORY + 6869 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6870 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6871 6872 /* MSTORM */ 6873 reg = MSEM_REG_FAST_MEMORY + 6874 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6875 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6876 6877 reg = MSEM_REG_FAST_MEMORY + 6878 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6879 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6880 6881 reg = MSEM_REG_FAST_MEMORY + 6882 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6883 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6884 6885 reg = MSEM_REG_FAST_MEMORY + 6886 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6887 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6888 6889 /* USTORM */ 6890 reg = USEM_REG_FAST_MEMORY + 6891 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6892 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6893 6894 reg = USEM_REG_FAST_MEMORY + 6895 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6896 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6897 6898 reg = USEM_REG_FAST_MEMORY + 6899 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6900 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6901 6902 reg = USEM_REG_FAST_MEMORY + 6903 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6904 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6905 6906 ecore_ptt_release(hwfn, p_ptt); 6907 } 6908 6909 ha->storm_stats_index++; 6910 6911 return; 6912 } 6913 6914 /* 6915 * Name: qlnx_dump_buf8 6916 * Function: dumps a buffer as bytes 6917 */ 6918 static void 6919 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len) 6920 { 6921 device_t dev; 6922 uint32_t i = 0; 6923 uint8_t *buf; 6924 6925 dev = ha->pci_dev; 6926 buf = dbuf; 6927 6928 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len); 6929 6930 while (len >= 16) { 6931 device_printf(dev,"0x%08x:" 6932 " %02x %02x %02x %02x %02x %02x %02x %02x" 6933 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 6934 buf[0], buf[1], buf[2], buf[3], 6935 buf[4], buf[5], buf[6], buf[7], 6936 buf[8], buf[9], buf[10], buf[11], 6937 buf[12], buf[13], buf[14], buf[15]); 6938 i += 16; 6939 len -= 16; 6940 buf += 16; 6941 } 6942 switch (len) { 6943 case 1: 6944 device_printf(dev,"0x%08x: %02x\n", i, buf[0]); 6945 break; 6946 case 2: 6947 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]); 6948 break; 6949 case 3: 6950 device_printf(dev,"0x%08x: %02x %02x %02x\n", 6951 i, buf[0], buf[1], buf[2]); 6952 break; 6953 case 4: 6954 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i, 6955 buf[0], buf[1], buf[2], buf[3]); 6956 break; 6957 case 5: 6958 device_printf(dev,"0x%08x:" 6959 " %02x %02x %02x %02x %02x\n", i, 6960 buf[0], buf[1], buf[2], buf[3], buf[4]); 6961 break; 6962 case 6: 6963 device_printf(dev,"0x%08x:" 6964 " %02x %02x %02x %02x %02x %02x\n", i, 6965 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); 6966 break; 6967 case 7: 6968 device_printf(dev,"0x%08x:" 6969 " %02x %02x %02x %02x %02x %02x %02x\n", i, 6970 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); 6971 break; 6972 case 8: 6973 device_printf(dev,"0x%08x:" 6974 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 6975 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 6976 buf[7]); 6977 break; 6978 case 9: 6979 device_printf(dev,"0x%08x:" 6980 " %02x %02x %02x %02x %02x %02x %02x %02x" 6981 " %02x\n", i, 6982 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 6983 buf[7], buf[8]); 6984 break; 6985 case 10: 6986 device_printf(dev,"0x%08x:" 6987 " %02x %02x %02x %02x %02x %02x %02x %02x" 6988 " %02x %02x\n", i, 6989 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 6990 buf[7], buf[8], buf[9]); 6991 break; 6992 case 11: 6993 device_printf(dev,"0x%08x:" 6994 " %02x %02x %02x %02x %02x %02x %02x %02x" 6995 " %02x %02x %02x\n", i, 6996 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 6997 buf[7], buf[8], buf[9], buf[10]); 6998 break; 6999 case 12: 7000 device_printf(dev,"0x%08x:" 7001 " %02x %02x %02x %02x %02x %02x %02x %02x" 7002 " %02x %02x %02x %02x\n", i, 7003 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7004 buf[7], buf[8], buf[9], buf[10], buf[11]); 7005 break; 7006 case 13: 7007 device_printf(dev,"0x%08x:" 7008 " %02x %02x %02x %02x %02x %02x %02x %02x" 7009 " %02x %02x %02x %02x %02x\n", i, 7010 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7011 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]); 7012 break; 7013 case 14: 7014 device_printf(dev,"0x%08x:" 7015 " %02x %02x %02x %02x %02x %02x %02x %02x" 7016 " %02x %02x %02x %02x %02x %02x\n", i, 7017 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7018 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7019 buf[13]); 7020 break; 7021 case 15: 7022 device_printf(dev,"0x%08x:" 7023 " %02x %02x %02x %02x %02x %02x %02x %02x" 7024 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7025 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7026 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7027 buf[13], buf[14]); 7028 break; 7029 default: 7030 break; 7031 } 7032 7033 device_printf(dev, "%s: %s dump end\n", __func__, msg); 7034 7035 return; 7036 } 7037 7038