1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 29 /* 30 * File: qlnx_os.c 31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "qlnx_os.h" 38 #include "bcm_osal.h" 39 #include "reg_addr.h" 40 #include "ecore_gtt_reg_addr.h" 41 #include "ecore.h" 42 #include "ecore_chain.h" 43 #include "ecore_status.h" 44 #include "ecore_hw.h" 45 #include "ecore_rt_defs.h" 46 #include "ecore_init_ops.h" 47 #include "ecore_int.h" 48 #include "ecore_cxt.h" 49 #include "ecore_spq.h" 50 #include "ecore_init_fw_funcs.h" 51 #include "ecore_sp_commands.h" 52 #include "ecore_dev_api.h" 53 #include "ecore_l2_api.h" 54 #include "ecore_mcp.h" 55 #include "ecore_hw_defs.h" 56 #include "mcp_public.h" 57 #include "ecore_iro.h" 58 #include "nvm_cfg.h" 59 #include "ecore_dev_api.h" 60 #include "ecore_dbg_fw_funcs.h" 61 62 #include "qlnx_ioctl.h" 63 #include "qlnx_def.h" 64 #include "qlnx_ver.h" 65 #include <sys/smp.h> 66 67 68 /* 69 * static functions 70 */ 71 /* 72 * ioctl related functions 73 */ 74 static void qlnx_add_sysctls(qlnx_host_t *ha); 75 76 /* 77 * main driver 78 */ 79 static void qlnx_release(qlnx_host_t *ha); 80 static void qlnx_fp_isr(void *arg); 81 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha); 82 static void qlnx_init(void *arg); 83 static void qlnx_init_locked(qlnx_host_t *ha); 84 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi); 85 static int qlnx_set_promisc(qlnx_host_t *ha); 86 static int qlnx_set_allmulti(qlnx_host_t *ha); 87 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 88 static int qlnx_media_change(struct ifnet *ifp); 89 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 90 static void qlnx_stop(qlnx_host_t *ha); 91 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, 92 struct mbuf **m_headp); 93 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha); 94 static uint32_t qlnx_get_optics(qlnx_host_t *ha, 95 struct qlnx_link_output *if_link); 96 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp); 97 static void qlnx_qflush(struct ifnet *ifp); 98 99 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha); 100 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha); 101 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha); 102 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha); 103 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha); 104 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha); 105 106 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver); 107 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size); 108 109 static int qlnx_nic_setup(struct ecore_dev *cdev, 110 struct ecore_pf_params *func_params); 111 static int qlnx_nic_start(struct ecore_dev *cdev); 112 static int qlnx_slowpath_start(qlnx_host_t *ha); 113 static int qlnx_slowpath_stop(qlnx_host_t *ha); 114 static int qlnx_init_hw(qlnx_host_t *ha); 115 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 116 char ver_str[VER_SIZE]); 117 static void qlnx_unload(qlnx_host_t *ha); 118 static int qlnx_load(qlnx_host_t *ha); 119 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 120 uint32_t add_mac); 121 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, 122 uint32_t len); 123 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq); 124 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq); 125 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, 126 struct qlnx_rx_queue *rxq); 127 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter); 128 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, 129 int hwfn_index); 130 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, 131 int hwfn_index); 132 static void qlnx_timer(void *arg); 133 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 134 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 135 static void qlnx_trigger_dump(qlnx_host_t *ha); 136 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 137 struct qlnx_tx_queue *txq); 138 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 139 int lro_enable); 140 static void qlnx_fp_taskqueue(void *context, int pending); 141 static void qlnx_sample_storm_stats(qlnx_host_t *ha); 142 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 143 struct qlnx_agg_info *tpa); 144 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa); 145 146 #if __FreeBSD_version >= 1100000 147 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt); 148 #endif 149 150 151 /* 152 * Hooks to the Operating Systems 153 */ 154 static int qlnx_pci_probe (device_t); 155 static int qlnx_pci_attach (device_t); 156 static int qlnx_pci_detach (device_t); 157 158 static device_method_t qlnx_pci_methods[] = { 159 /* Device interface */ 160 DEVMETHOD(device_probe, qlnx_pci_probe), 161 DEVMETHOD(device_attach, qlnx_pci_attach), 162 DEVMETHOD(device_detach, qlnx_pci_detach), 163 { 0, 0 } 164 }; 165 166 static driver_t qlnx_pci_driver = { 167 "ql", qlnx_pci_methods, sizeof (qlnx_host_t), 168 }; 169 170 static devclass_t qlnx_devclass; 171 172 MODULE_VERSION(if_qlnxe,1); 173 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0); 174 175 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1); 176 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1); 177 178 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver"); 179 180 181 char qlnx_dev_str[64]; 182 char qlnx_ver_str[VER_SIZE]; 183 char qlnx_name_str[NAME_SIZE]; 184 185 /* 186 * Some PCI Configuration Space Related Defines 187 */ 188 189 #ifndef PCI_VENDOR_QLOGIC 190 #define PCI_VENDOR_QLOGIC 0x1077 191 #endif 192 193 /* 40G Adapter QLE45xxx*/ 194 #ifndef QLOGIC_PCI_DEVICE_ID_1634 195 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634 196 #endif 197 198 /* 100G Adapter QLE45xxx*/ 199 #ifndef QLOGIC_PCI_DEVICE_ID_1644 200 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644 201 #endif 202 203 /* 25G Adapter QLE45xxx*/ 204 #ifndef QLOGIC_PCI_DEVICE_ID_1656 205 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656 206 #endif 207 208 /* 50G Adapter QLE45xxx*/ 209 #ifndef QLOGIC_PCI_DEVICE_ID_1654 210 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654 211 #endif 212 213 static int 214 qlnx_valid_device(device_t dev) 215 { 216 uint16_t device_id; 217 218 device_id = pci_get_device(dev); 219 220 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 221 (device_id == QLOGIC_PCI_DEVICE_ID_1644) || 222 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 223 (device_id == QLOGIC_PCI_DEVICE_ID_1654)) 224 return 0; 225 226 return -1; 227 } 228 229 /* 230 * Name: qlnx_pci_probe 231 * Function: Validate the PCI device to be a QLA80XX device 232 */ 233 static int 234 qlnx_pci_probe(device_t dev) 235 { 236 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d", 237 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); 238 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx"); 239 240 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) { 241 return (ENXIO); 242 } 243 244 switch (pci_get_device(dev)) { 245 246 case QLOGIC_PCI_DEVICE_ID_1644: 247 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 248 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function", 249 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 250 QLNX_VERSION_BUILD); 251 device_set_desc_copy(dev, qlnx_dev_str); 252 253 break; 254 255 case QLOGIC_PCI_DEVICE_ID_1634: 256 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 257 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function", 258 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 259 QLNX_VERSION_BUILD); 260 device_set_desc_copy(dev, qlnx_dev_str); 261 262 break; 263 264 case QLOGIC_PCI_DEVICE_ID_1656: 265 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 266 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function", 267 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 268 QLNX_VERSION_BUILD); 269 device_set_desc_copy(dev, qlnx_dev_str); 270 271 break; 272 273 case QLOGIC_PCI_DEVICE_ID_1654: 274 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 275 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function", 276 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 277 QLNX_VERSION_BUILD); 278 device_set_desc_copy(dev, qlnx_dev_str); 279 280 break; 281 282 default: 283 return (ENXIO); 284 } 285 286 return (BUS_PROBE_DEFAULT); 287 } 288 289 290 static void 291 qlnx_sp_intr(void *arg) 292 { 293 struct ecore_hwfn *p_hwfn; 294 qlnx_host_t *ha; 295 int i; 296 297 p_hwfn = arg; 298 299 if (p_hwfn == NULL) { 300 printf("%s: spurious slowpath intr\n", __func__); 301 return; 302 } 303 304 ha = (qlnx_host_t *)p_hwfn->p_dev; 305 306 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 307 308 for (i = 0; i < ha->cdev.num_hwfns; i++) { 309 if (&ha->cdev.hwfns[i] == p_hwfn) { 310 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]); 311 break; 312 } 313 } 314 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 315 316 return; 317 } 318 319 static void 320 qlnx_sp_taskqueue(void *context, int pending) 321 { 322 struct ecore_hwfn *p_hwfn; 323 324 p_hwfn = context; 325 326 if (p_hwfn != NULL) { 327 qlnx_sp_isr(p_hwfn); 328 } 329 return; 330 } 331 332 static int 333 qlnx_create_sp_taskqueues(qlnx_host_t *ha) 334 { 335 int i; 336 uint8_t tq_name[32]; 337 338 for (i = 0; i < ha->cdev.num_hwfns; i++) { 339 340 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 341 342 bzero(tq_name, sizeof (tq_name)); 343 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i); 344 345 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn); 346 347 ha->sp_taskqueue[i] = taskqueue_create_fast(tq_name, M_NOWAIT, 348 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]); 349 350 if (ha->sp_taskqueue[i] == NULL) 351 return (-1); 352 353 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s", 354 tq_name); 355 356 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__, 357 ha->sp_taskqueue[i])); 358 } 359 360 return (0); 361 } 362 363 static void 364 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha) 365 { 366 int i; 367 368 for (i = 0; i < ha->cdev.num_hwfns; i++) { 369 if (ha->sp_taskqueue[i] != NULL) { 370 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]); 371 taskqueue_free(ha->sp_taskqueue[i]); 372 } 373 } 374 return; 375 } 376 377 static void 378 qlnx_fp_taskqueue(void *context, int pending) 379 { 380 struct qlnx_fastpath *fp; 381 qlnx_host_t *ha; 382 struct ifnet *ifp; 383 struct mbuf *mp; 384 int ret; 385 386 fp = context; 387 388 if (fp == NULL) 389 return; 390 391 ha = (qlnx_host_t *)fp->edev; 392 393 ifp = ha->ifp; 394 395 mtx_lock(&fp->tx_mtx); 396 397 if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 398 IFF_DRV_RUNNING) || (!ha->link_up)) { 399 400 mtx_unlock(&fp->tx_mtx); 401 goto qlnx_fp_taskqueue_exit; 402 } 403 404 (void)qlnx_tx_int(ha, fp, fp->txq[0]); 405 406 mp = drbr_peek(ifp, fp->tx_br); 407 408 while (mp != NULL) { 409 410 ret = qlnx_send(ha, fp, &mp); 411 412 if (ret) { 413 414 if (mp != NULL) { 415 drbr_putback(ifp, fp->tx_br, mp); 416 } else { 417 fp->tx_pkts_processed++; 418 drbr_advance(ifp, fp->tx_br); 419 } 420 421 mtx_unlock(&fp->tx_mtx); 422 423 goto qlnx_fp_taskqueue_exit; 424 425 } else { 426 drbr_advance(ifp, fp->tx_br); 427 fp->tx_pkts_transmitted++; 428 fp->tx_pkts_processed++; 429 } 430 431 mp = drbr_peek(ifp, fp->tx_br); 432 } 433 434 (void)qlnx_tx_int(ha, fp, fp->txq[0]); 435 436 mtx_unlock(&fp->tx_mtx); 437 438 qlnx_fp_taskqueue_exit: 439 440 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 441 return; 442 } 443 444 static int 445 qlnx_create_fp_taskqueues(qlnx_host_t *ha) 446 { 447 int i; 448 uint8_t tq_name[32]; 449 struct qlnx_fastpath *fp; 450 451 for (i = 0; i < ha->num_rss; i++) { 452 453 fp = &ha->fp_array[i]; 454 455 bzero(tq_name, sizeof (tq_name)); 456 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 457 458 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp); 459 460 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, 461 taskqueue_thread_enqueue, 462 &fp->fp_taskqueue); 463 464 if (fp->fp_taskqueue == NULL) 465 return (-1); 466 467 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 468 tq_name); 469 470 QL_DPRINT1(ha, (ha->pci_dev, "%s: %p\n", __func__, 471 fp->fp_taskqueue)); 472 } 473 474 return (0); 475 } 476 477 static void 478 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha) 479 { 480 int i; 481 struct qlnx_fastpath *fp; 482 483 for (i = 0; i < ha->num_rss; i++) { 484 485 fp = &ha->fp_array[i]; 486 487 if (fp->fp_taskqueue != NULL) { 488 489 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 490 taskqueue_free(fp->fp_taskqueue); 491 fp->fp_taskqueue = NULL; 492 } 493 } 494 return; 495 } 496 497 static void 498 qlnx_drain_fp_taskqueues(qlnx_host_t *ha) 499 { 500 int i; 501 struct qlnx_fastpath *fp; 502 503 for (i = 0; i < ha->num_rss; i++) { 504 fp = &ha->fp_array[i]; 505 506 if (fp->fp_taskqueue != NULL) { 507 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 508 } 509 } 510 return; 511 } 512 513 /* 514 * Name: qlnx_pci_attach 515 * Function: attaches the device to the operating system 516 */ 517 static int 518 qlnx_pci_attach(device_t dev) 519 { 520 qlnx_host_t *ha = NULL; 521 uint32_t rsrc_len_reg = 0; 522 uint32_t rsrc_len_dbells = 0; 523 uint32_t rsrc_len_msix = 0; 524 int i; 525 uint32_t mfw_ver; 526 527 if ((ha = device_get_softc(dev)) == NULL) { 528 device_printf(dev, "cannot get softc\n"); 529 return (ENOMEM); 530 } 531 532 memset(ha, 0, sizeof (qlnx_host_t)); 533 534 if (qlnx_valid_device(dev) != 0) { 535 device_printf(dev, "device is not valid device\n"); 536 return (ENXIO); 537 } 538 ha->pci_func = pci_get_function(dev); 539 540 ha->pci_dev = dev; 541 542 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 543 mtx_init(&ha->tx_lock, "qlnx_tx_lock", MTX_NETWORK_LOCK, MTX_DEF); 544 545 ha->flags.lock_init = 1; 546 547 pci_enable_busmaster(dev); 548 549 /* 550 * map the PCI BARs 551 */ 552 553 ha->reg_rid = PCIR_BAR(0); 554 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 555 RF_ACTIVE); 556 557 if (ha->pci_reg == NULL) { 558 device_printf(dev, "unable to map BAR0\n"); 559 goto qlnx_pci_attach_err; 560 } 561 562 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 563 ha->reg_rid); 564 565 ha->dbells_rid = PCIR_BAR(2); 566 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 567 &ha->dbells_rid, RF_ACTIVE); 568 569 if (ha->pci_dbells == NULL) { 570 device_printf(dev, "unable to map BAR1\n"); 571 goto qlnx_pci_attach_err; 572 } 573 574 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 575 ha->dbells_rid); 576 577 ha->dbells_phys_addr = (uint64_t) 578 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);; 579 ha->dbells_size = rsrc_len_dbells; 580 581 ha->msix_rid = PCIR_BAR(4); 582 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 583 &ha->msix_rid, RF_ACTIVE); 584 585 if (ha->msix_bar == NULL) { 586 device_printf(dev, "unable to map BAR2\n"); 587 goto qlnx_pci_attach_err; 588 } 589 590 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 591 ha->msix_rid); 592 /* 593 * allocate dma tags 594 */ 595 596 if (qlnx_alloc_parent_dma_tag(ha)) 597 goto qlnx_pci_attach_err; 598 599 if (qlnx_alloc_tx_dma_tag(ha)) 600 goto qlnx_pci_attach_err; 601 602 if (qlnx_alloc_rx_dma_tag(ha)) 603 goto qlnx_pci_attach_err; 604 605 606 if (qlnx_init_hw(ha) != 0) 607 goto qlnx_pci_attach_err; 608 609 /* 610 * Allocate MSI-x vectors 611 */ 612 ha->num_rss = QLNX_MAX_RSS; 613 ha->num_tc = QLNX_MAX_TC; 614 615 ha->msix_count = pci_msix_count(dev); 616 617 if (ha->msix_count > (mp_ncpus + ha->cdev.num_hwfns)) 618 ha->msix_count = mp_ncpus + ha->cdev.num_hwfns; 619 620 if (!ha->msix_count || 621 (ha->msix_count < (ha->cdev.num_hwfns + 1 ))) { 622 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 623 ha->msix_count); 624 goto qlnx_pci_attach_err; 625 } 626 627 if (ha->msix_count > (ha->num_rss + ha->cdev.num_hwfns )) 628 ha->msix_count = ha->num_rss + ha->cdev.num_hwfns; 629 else 630 ha->num_rss = ha->msix_count - ha->cdev.num_hwfns; 631 632 QL_DPRINT1(ha, (dev, "%s:\n\t\t\tpci_reg [%p, 0x%08x 0x%08x]" 633 "\n\t\t\tdbells [%p, 0x%08x 0x%08x]" 634 "\n\t\t\tmsix [%p, 0x%08x 0x%08x 0x%x 0x%x]" 635 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n", 636 __func__, ha->pci_reg, rsrc_len_reg, 637 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 638 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 639 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc)); 640 641 if (pci_alloc_msix(dev, &ha->msix_count)) { 642 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__, 643 ha->msix_count); 644 ha->msix_count = 0; 645 goto qlnx_pci_attach_err; 646 } 647 648 /* 649 * Initialize slow path interrupt and task queue 650 */ 651 if (qlnx_create_sp_taskqueues(ha) != 0) 652 goto qlnx_pci_attach_err; 653 654 for (i = 0; i < ha->cdev.num_hwfns; i++) { 655 656 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 657 658 ha->sp_irq_rid[i] = i + 1; 659 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, 660 &ha->sp_irq_rid[i], 661 (RF_ACTIVE | RF_SHAREABLE)); 662 if (ha->sp_irq[i] == NULL) { 663 device_printf(dev, 664 "could not allocate mbx interrupt\n"); 665 goto qlnx_pci_attach_err; 666 } 667 668 if (bus_setup_intr(dev, ha->sp_irq[i], 669 (INTR_TYPE_NET | INTR_MPSAFE), NULL, 670 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) { 671 device_printf(dev, 672 "could not setup slow path interrupt\n"); 673 goto qlnx_pci_attach_err; 674 } 675 676 QL_DPRINT1(ha, (dev, "%s: p_hwfn [%p] sp_irq_rid %d" 677 " sp_irq %p sp_handle %p\n", __func__, p_hwfn, 678 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i])); 679 680 } 681 682 /* 683 * initialize fast path interrupt 684 */ 685 if (qlnx_create_fp_taskqueues(ha) != 0) 686 goto qlnx_pci_attach_err; 687 688 for (i = 0; i < ha->num_rss; i++) { 689 ha->irq_vec[i].rss_idx = i; 690 ha->irq_vec[i].ha = ha; 691 ha->irq_vec[i].irq_rid = (1 + ha->cdev.num_hwfns) + i; 692 693 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 694 &ha->irq_vec[i].irq_rid, 695 (RF_ACTIVE | RF_SHAREABLE)); 696 697 if (ha->irq_vec[i].irq == NULL) { 698 device_printf(dev, 699 "could not allocate interrupt[%d]\n", i); 700 goto qlnx_pci_attach_err; 701 } 702 703 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) { 704 device_printf(dev, "could not allocate tx_br[%d]\n", i); 705 goto qlnx_pci_attach_err; 706 707 } 708 } 709 710 callout_init(&ha->qlnx_callout, 1); 711 ha->flags.callout_init = 1; 712 713 for (i = 0; i < ha->cdev.num_hwfns; i++) { 714 715 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0) 716 goto qlnx_pci_attach_err; 717 if (ha->grcdump_size[i] == 0) 718 goto qlnx_pci_attach_err; 719 720 ha->grcdump_size[i] = ha->grcdump_size[i] << 2; 721 QL_DPRINT1(ha, (dev, "grcdump_size[%d] = 0x%08x\n", 722 i, ha->grcdump_size[i])); 723 724 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]); 725 if (ha->grcdump[i] == NULL) { 726 device_printf(dev, "grcdump alloc[%d] failed\n", i); 727 goto qlnx_pci_attach_err; 728 } 729 730 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0) 731 goto qlnx_pci_attach_err; 732 if (ha->idle_chk_size[i] == 0) 733 goto qlnx_pci_attach_err; 734 735 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2; 736 QL_DPRINT1(ha, (dev, "idle_chk_size[%d] = 0x%08x\n", 737 i, ha->idle_chk_size[i])); 738 739 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]); 740 741 if (ha->idle_chk[i] == NULL) { 742 device_printf(dev, "idle_chk alloc failed\n"); 743 goto qlnx_pci_attach_err; 744 } 745 } 746 747 if (qlnx_slowpath_start(ha) != 0) { 748 749 qlnx_mdelay(__func__, 1000); 750 qlnx_trigger_dump(ha); 751 752 goto qlnx_pci_attach_err0; 753 } else 754 ha->flags.slowpath_start = 1; 755 756 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) { 757 qlnx_mdelay(__func__, 1000); 758 qlnx_trigger_dump(ha); 759 760 goto qlnx_pci_attach_err0; 761 } 762 763 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) { 764 qlnx_mdelay(__func__, 1000); 765 qlnx_trigger_dump(ha); 766 767 goto qlnx_pci_attach_err0; 768 } 769 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d", 770 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF), 771 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF)); 772 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d", 773 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, 774 FW_ENGINEERING_VERSION); 775 776 QL_DPRINT1(ha, (dev, "%s: STORM_FW version %s MFW version %s\n", 777 __func__, ha->stormfw_ver, ha->mfw_ver)); 778 779 qlnx_init_ifnet(dev, ha); 780 781 /* 782 * add sysctls 783 */ 784 qlnx_add_sysctls(ha); 785 786 qlnx_pci_attach_err0: 787 /* 788 * create ioctl device interface 789 */ 790 if (qlnx_make_cdev(ha)) { 791 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 792 goto qlnx_pci_attach_err; 793 } 794 795 QL_DPRINT2(ha, (dev, "%s: success\n", __func__)); 796 797 return (0); 798 799 qlnx_pci_attach_err: 800 801 qlnx_release(ha); 802 803 return (ENXIO); 804 } 805 806 /* 807 * Name: qlnx_pci_detach 808 * Function: Unhooks the device from the operating system 809 */ 810 static int 811 qlnx_pci_detach(device_t dev) 812 { 813 qlnx_host_t *ha = NULL; 814 815 if ((ha = device_get_softc(dev)) == NULL) { 816 device_printf(dev, "cannot get softc\n"); 817 return (ENOMEM); 818 } 819 820 QLNX_LOCK(ha); 821 qlnx_stop(ha); 822 QLNX_UNLOCK(ha); 823 824 qlnx_release(ha); 825 826 return (0); 827 } 828 829 static int 830 qlnx_init_hw(qlnx_host_t *ha) 831 { 832 int rval = 0; 833 struct ecore_hw_prepare_params params; 834 835 ecore_init_struct(&ha->cdev); 836 837 /* ha->dp_module = ECORE_MSG_PROBE | 838 ECORE_MSG_INTR | 839 ECORE_MSG_SP | 840 ECORE_MSG_LINK | 841 ECORE_MSG_SPQ | 842 ECORE_MSG_RDMA; 843 ha->dp_level = ECORE_LEVEL_VERBOSE;*/ 844 ha->dp_level = ECORE_LEVEL_NOTICE; 845 846 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev); 847 848 ha->cdev.regview = ha->pci_reg; 849 ha->cdev.doorbells = ha->pci_dbells; 850 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 851 ha->cdev.db_size = ha->dbells_size; 852 853 bzero(¶ms, sizeof (struct ecore_hw_prepare_params)); 854 855 ha->personality = ECORE_PCI_DEFAULT; 856 857 params.personality = ha->personality; 858 859 params.drv_resc_alloc = false; 860 params.chk_reg_fifo = false; 861 params.initiate_pf_flr = true; 862 params.epoch = 0; 863 864 ecore_hw_prepare(&ha->cdev, ¶ms); 865 866 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str); 867 868 return (rval); 869 } 870 871 static void 872 qlnx_release(qlnx_host_t *ha) 873 { 874 device_t dev; 875 int i; 876 877 dev = ha->pci_dev; 878 879 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 880 881 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) { 882 if (ha->idle_chk[i] != NULL) { 883 free(ha->idle_chk[i], M_QLNXBUF); 884 ha->idle_chk[i] = NULL; 885 } 886 887 if (ha->grcdump[i] != NULL) { 888 free(ha->grcdump[i], M_QLNXBUF); 889 ha->grcdump[i] = NULL; 890 } 891 } 892 893 if (ha->flags.callout_init) 894 callout_drain(&ha->qlnx_callout); 895 896 if (ha->flags.slowpath_start) { 897 qlnx_slowpath_stop(ha); 898 } 899 900 ecore_hw_remove(&ha->cdev); 901 902 qlnx_del_cdev(ha); 903 904 if (ha->ifp != NULL) 905 ether_ifdetach(ha->ifp); 906 907 qlnx_free_tx_dma_tag(ha); 908 909 qlnx_free_rx_dma_tag(ha); 910 911 qlnx_free_parent_dma_tag(ha); 912 913 for (i = 0; i < ha->num_rss; i++) { 914 struct qlnx_fastpath *fp = &ha->fp_array[i]; 915 916 if (ha->irq_vec[i].handle) { 917 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 918 ha->irq_vec[i].handle); 919 } 920 921 if (ha->irq_vec[i].irq) { 922 (void)bus_release_resource(dev, SYS_RES_IRQ, 923 ha->irq_vec[i].irq_rid, 924 ha->irq_vec[i].irq); 925 } 926 927 qlnx_free_tx_br(ha, fp); 928 } 929 qlnx_destroy_fp_taskqueues(ha); 930 931 for (i = 0; i < ha->cdev.num_hwfns; i++) { 932 if (ha->sp_handle[i]) 933 (void)bus_teardown_intr(dev, ha->sp_irq[i], 934 ha->sp_handle[i]); 935 936 if (ha->sp_irq[i]) 937 (void) bus_release_resource(dev, SYS_RES_IRQ, 938 ha->sp_irq_rid[i], ha->sp_irq[i]); 939 } 940 941 qlnx_destroy_sp_taskqueues(ha); 942 943 if (ha->msix_count) 944 pci_release_msi(dev); 945 946 if (ha->flags.lock_init) { 947 mtx_destroy(&ha->tx_lock); 948 mtx_destroy(&ha->hw_lock); 949 } 950 951 if (ha->pci_reg) 952 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 953 ha->pci_reg); 954 955 if (ha->pci_dbells) 956 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid, 957 ha->pci_dbells); 958 959 if (ha->msix_bar) 960 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid, 961 ha->msix_bar); 962 963 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 964 return; 965 } 966 967 static void 968 qlnx_trigger_dump(qlnx_host_t *ha) 969 { 970 int i; 971 972 if (ha->ifp != NULL) 973 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 974 975 QL_DPRINT2(ha, (ha->pci_dev, "%s: start\n", __func__)); 976 977 for (i = 0; i < ha->cdev.num_hwfns; i++) { 978 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i); 979 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i); 980 } 981 982 QL_DPRINT2(ha, (ha->pci_dev, "%s: end\n", __func__)); 983 984 return; 985 } 986 987 static int 988 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS) 989 { 990 int err, ret = 0; 991 qlnx_host_t *ha; 992 993 err = sysctl_handle_int(oidp, &ret, 0, req); 994 995 if (err || !req->newptr) 996 return (err); 997 998 if (ret == 1) { 999 ha = (qlnx_host_t *)arg1; 1000 qlnx_trigger_dump(ha); 1001 } 1002 return (err); 1003 } 1004 1005 static int 1006 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS) 1007 { 1008 int err, i, ret = 0, usecs = 0; 1009 qlnx_host_t *ha; 1010 struct ecore_hwfn *p_hwfn; 1011 struct qlnx_fastpath *fp; 1012 1013 err = sysctl_handle_int(oidp, &usecs, 0, req); 1014 1015 if (err || !req->newptr || !usecs || (usecs > 255)) 1016 return (err); 1017 1018 ha = (qlnx_host_t *)arg1; 1019 1020 for (i = 0; i < ha->num_rss; i++) { 1021 1022 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1023 1024 fp = &ha->fp_array[i]; 1025 1026 if (fp->txq[0]->handle != NULL) { 1027 ret = ecore_set_queue_coalesce(p_hwfn, 0, 1028 (uint16_t)usecs, fp->txq[0]->handle); 1029 } 1030 } 1031 1032 if (!ret) 1033 ha->tx_coalesce_usecs = (uint8_t)usecs; 1034 1035 return (err); 1036 } 1037 1038 static int 1039 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS) 1040 { 1041 int err, i, ret = 0, usecs = 0; 1042 qlnx_host_t *ha; 1043 struct ecore_hwfn *p_hwfn; 1044 struct qlnx_fastpath *fp; 1045 1046 err = sysctl_handle_int(oidp, &usecs, 0, req); 1047 1048 if (err || !req->newptr || !usecs || (usecs > 255)) 1049 return (err); 1050 1051 ha = (qlnx_host_t *)arg1; 1052 1053 for (i = 0; i < ha->num_rss; i++) { 1054 1055 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1056 1057 fp = &ha->fp_array[i]; 1058 1059 if (fp->rxq->handle != NULL) { 1060 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs, 1061 0, fp->rxq->handle); 1062 } 1063 } 1064 1065 if (!ret) 1066 ha->rx_coalesce_usecs = (uint8_t)usecs; 1067 1068 return (err); 1069 } 1070 1071 static void 1072 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha) 1073 { 1074 struct sysctl_ctx_list *ctx; 1075 struct sysctl_oid_list *children; 1076 struct sysctl_oid *ctx_oid; 1077 1078 ctx = device_get_sysctl_ctx(ha->pci_dev); 1079 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1080 1081 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat", 1082 CTLFLAG_RD, NULL, "spstat"); 1083 children = SYSCTL_CHILDREN(ctx_oid); 1084 1085 SYSCTL_ADD_QUAD(ctx, children, 1086 OID_AUTO, "sp_interrupts", 1087 CTLFLAG_RD, &ha->sp_interrupts, 1088 "No. of slowpath interrupts"); 1089 1090 return; 1091 } 1092 1093 static void 1094 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha) 1095 { 1096 struct sysctl_ctx_list *ctx; 1097 struct sysctl_oid_list *children; 1098 struct sysctl_oid_list *node_children; 1099 struct sysctl_oid *ctx_oid; 1100 int i, j; 1101 uint8_t name_str[16]; 1102 1103 ctx = device_get_sysctl_ctx(ha->pci_dev); 1104 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1105 1106 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat", 1107 CTLFLAG_RD, NULL, "fpstat"); 1108 children = SYSCTL_CHILDREN(ctx_oid); 1109 1110 for (i = 0; i < ha->num_rss; i++) { 1111 1112 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1113 snprintf(name_str, sizeof(name_str), "%d", i); 1114 1115 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 1116 CTLFLAG_RD, NULL, name_str); 1117 node_children = SYSCTL_CHILDREN(ctx_oid); 1118 1119 /* Tx Related */ 1120 1121 SYSCTL_ADD_QUAD(ctx, node_children, 1122 OID_AUTO, "tx_pkts_processed", 1123 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed, 1124 "No. of packets processed for transmission"); 1125 1126 SYSCTL_ADD_QUAD(ctx, node_children, 1127 OID_AUTO, "tx_pkts_freed", 1128 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed, 1129 "No. of freed packets"); 1130 1131 SYSCTL_ADD_QUAD(ctx, node_children, 1132 OID_AUTO, "tx_pkts_transmitted", 1133 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted, 1134 "No. of transmitted packets"); 1135 1136 SYSCTL_ADD_QUAD(ctx, node_children, 1137 OID_AUTO, "tx_pkts_completed", 1138 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed, 1139 "No. of transmit completions"); 1140 1141 SYSCTL_ADD_QUAD(ctx, node_children, 1142 OID_AUTO, "tx_lso_wnd_min_len", 1143 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len, 1144 "tx_lso_wnd_min_len"); 1145 1146 SYSCTL_ADD_QUAD(ctx, node_children, 1147 OID_AUTO, "tx_defrag", 1148 CTLFLAG_RD, &ha->fp_array[i].tx_defrag, 1149 "tx_defrag"); 1150 1151 SYSCTL_ADD_QUAD(ctx, node_children, 1152 OID_AUTO, "tx_nsegs_gt_elem_left", 1153 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left, 1154 "tx_nsegs_gt_elem_left"); 1155 1156 SYSCTL_ADD_UINT(ctx, node_children, 1157 OID_AUTO, "tx_tso_max_nsegs", 1158 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs, 1159 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs"); 1160 1161 SYSCTL_ADD_UINT(ctx, node_children, 1162 OID_AUTO, "tx_tso_min_nsegs", 1163 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs, 1164 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs"); 1165 1166 SYSCTL_ADD_UINT(ctx, node_children, 1167 OID_AUTO, "tx_tso_max_pkt_len", 1168 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len, 1169 ha->fp_array[i].tx_tso_max_pkt_len, 1170 "tx_tso_max_pkt_len"); 1171 1172 SYSCTL_ADD_UINT(ctx, node_children, 1173 OID_AUTO, "tx_tso_min_pkt_len", 1174 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len, 1175 ha->fp_array[i].tx_tso_min_pkt_len, 1176 "tx_tso_min_pkt_len"); 1177 1178 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) { 1179 1180 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1181 snprintf(name_str, sizeof(name_str), 1182 "tx_pkts_nseg_%02d", (j+1)); 1183 1184 SYSCTL_ADD_QUAD(ctx, node_children, 1185 OID_AUTO, name_str, CTLFLAG_RD, 1186 &ha->fp_array[i].tx_pkts[j], name_str); 1187 } 1188 1189 SYSCTL_ADD_QUAD(ctx, node_children, 1190 OID_AUTO, "err_tx_nsegs_gt_elem_left", 1191 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left, 1192 "err_tx_nsegs_gt_elem_left"); 1193 1194 SYSCTL_ADD_QUAD(ctx, node_children, 1195 OID_AUTO, "err_tx_dmamap_create", 1196 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create, 1197 "err_tx_dmamap_create"); 1198 1199 SYSCTL_ADD_QUAD(ctx, node_children, 1200 OID_AUTO, "err_tx_defrag_dmamap_load", 1201 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load, 1202 "err_tx_defrag_dmamap_load"); 1203 1204 SYSCTL_ADD_QUAD(ctx, node_children, 1205 OID_AUTO, "err_tx_non_tso_max_seg", 1206 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg, 1207 "err_tx_non_tso_max_seg"); 1208 1209 SYSCTL_ADD_QUAD(ctx, node_children, 1210 OID_AUTO, "err_tx_dmamap_load", 1211 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load, 1212 "err_tx_dmamap_load"); 1213 1214 SYSCTL_ADD_QUAD(ctx, node_children, 1215 OID_AUTO, "err_tx_defrag", 1216 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag, 1217 "err_tx_defrag"); 1218 1219 SYSCTL_ADD_QUAD(ctx, node_children, 1220 OID_AUTO, "err_tx_free_pkt_null", 1221 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null, 1222 "err_tx_free_pkt_null"); 1223 1224 SYSCTL_ADD_QUAD(ctx, node_children, 1225 OID_AUTO, "err_tx_cons_idx_conflict", 1226 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict, 1227 "err_tx_cons_idx_conflict"); 1228 1229 #ifdef QLNX_TRACE_LRO_CNT 1230 SYSCTL_ADD_QUAD(ctx, node_children, 1231 OID_AUTO, "lro_cnt_64", 1232 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64, 1233 "lro_cnt_64"); 1234 1235 SYSCTL_ADD_QUAD(ctx, node_children, 1236 OID_AUTO, "lro_cnt_128", 1237 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128, 1238 "lro_cnt_128"); 1239 1240 SYSCTL_ADD_QUAD(ctx, node_children, 1241 OID_AUTO, "lro_cnt_256", 1242 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256, 1243 "lro_cnt_256"); 1244 1245 SYSCTL_ADD_QUAD(ctx, node_children, 1246 OID_AUTO, "lro_cnt_512", 1247 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512, 1248 "lro_cnt_512"); 1249 1250 SYSCTL_ADD_QUAD(ctx, node_children, 1251 OID_AUTO, "lro_cnt_1024", 1252 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024, 1253 "lro_cnt_1024"); 1254 #endif /* #ifdef QLNX_TRACE_LRO_CNT */ 1255 1256 /* Rx Related */ 1257 1258 SYSCTL_ADD_QUAD(ctx, node_children, 1259 OID_AUTO, "rx_pkts", 1260 CTLFLAG_RD, &ha->fp_array[i].rx_pkts, 1261 "No. of received packets"); 1262 1263 SYSCTL_ADD_QUAD(ctx, node_children, 1264 OID_AUTO, "tpa_start", 1265 CTLFLAG_RD, &ha->fp_array[i].tpa_start, 1266 "No. of tpa_start packets"); 1267 1268 SYSCTL_ADD_QUAD(ctx, node_children, 1269 OID_AUTO, "tpa_cont", 1270 CTLFLAG_RD, &ha->fp_array[i].tpa_cont, 1271 "No. of tpa_cont packets"); 1272 1273 SYSCTL_ADD_QUAD(ctx, node_children, 1274 OID_AUTO, "tpa_end", 1275 CTLFLAG_RD, &ha->fp_array[i].tpa_end, 1276 "No. of tpa_end packets"); 1277 1278 SYSCTL_ADD_QUAD(ctx, node_children, 1279 OID_AUTO, "err_m_getcl", 1280 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl, 1281 "err_m_getcl"); 1282 1283 SYSCTL_ADD_QUAD(ctx, node_children, 1284 OID_AUTO, "err_m_getjcl", 1285 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl, 1286 "err_m_getjcl"); 1287 1288 SYSCTL_ADD_QUAD(ctx, node_children, 1289 OID_AUTO, "err_rx_hw_errors", 1290 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors, 1291 "err_rx_hw_errors"); 1292 1293 SYSCTL_ADD_QUAD(ctx, node_children, 1294 OID_AUTO, "err_rx_alloc_errors", 1295 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors, 1296 "err_rx_alloc_errors"); 1297 } 1298 1299 return; 1300 } 1301 1302 static void 1303 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha) 1304 { 1305 struct sysctl_ctx_list *ctx; 1306 struct sysctl_oid_list *children; 1307 struct sysctl_oid *ctx_oid; 1308 1309 ctx = device_get_sysctl_ctx(ha->pci_dev); 1310 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1311 1312 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat", 1313 CTLFLAG_RD, NULL, "hwstat"); 1314 children = SYSCTL_CHILDREN(ctx_oid); 1315 1316 SYSCTL_ADD_QUAD(ctx, children, 1317 OID_AUTO, "no_buff_discards", 1318 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards, 1319 "No. of packets discarded due to lack of buffer"); 1320 1321 SYSCTL_ADD_QUAD(ctx, children, 1322 OID_AUTO, "packet_too_big_discard", 1323 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard, 1324 "No. of packets discarded because packet was too big"); 1325 1326 SYSCTL_ADD_QUAD(ctx, children, 1327 OID_AUTO, "ttl0_discard", 1328 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard, 1329 "ttl0_discard"); 1330 1331 SYSCTL_ADD_QUAD(ctx, children, 1332 OID_AUTO, "rx_ucast_bytes", 1333 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes, 1334 "rx_ucast_bytes"); 1335 1336 SYSCTL_ADD_QUAD(ctx, children, 1337 OID_AUTO, "rx_mcast_bytes", 1338 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes, 1339 "rx_mcast_bytes"); 1340 1341 SYSCTL_ADD_QUAD(ctx, children, 1342 OID_AUTO, "rx_bcast_bytes", 1343 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes, 1344 "rx_bcast_bytes"); 1345 1346 SYSCTL_ADD_QUAD(ctx, children, 1347 OID_AUTO, "rx_ucast_pkts", 1348 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts, 1349 "rx_ucast_pkts"); 1350 1351 SYSCTL_ADD_QUAD(ctx, children, 1352 OID_AUTO, "rx_mcast_pkts", 1353 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts, 1354 "rx_mcast_pkts"); 1355 1356 SYSCTL_ADD_QUAD(ctx, children, 1357 OID_AUTO, "rx_bcast_pkts", 1358 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts, 1359 "rx_bcast_pkts"); 1360 1361 SYSCTL_ADD_QUAD(ctx, children, 1362 OID_AUTO, "mftag_filter_discards", 1363 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards, 1364 "mftag_filter_discards"); 1365 1366 SYSCTL_ADD_QUAD(ctx, children, 1367 OID_AUTO, "mac_filter_discards", 1368 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards, 1369 "mac_filter_discards"); 1370 1371 SYSCTL_ADD_QUAD(ctx, children, 1372 OID_AUTO, "tx_ucast_bytes", 1373 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes, 1374 "tx_ucast_bytes"); 1375 1376 SYSCTL_ADD_QUAD(ctx, children, 1377 OID_AUTO, "tx_mcast_bytes", 1378 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes, 1379 "tx_mcast_bytes"); 1380 1381 SYSCTL_ADD_QUAD(ctx, children, 1382 OID_AUTO, "tx_bcast_bytes", 1383 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes, 1384 "tx_bcast_bytes"); 1385 1386 SYSCTL_ADD_QUAD(ctx, children, 1387 OID_AUTO, "tx_ucast_pkts", 1388 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts, 1389 "tx_ucast_pkts"); 1390 1391 SYSCTL_ADD_QUAD(ctx, children, 1392 OID_AUTO, "tx_mcast_pkts", 1393 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts, 1394 "tx_mcast_pkts"); 1395 1396 SYSCTL_ADD_QUAD(ctx, children, 1397 OID_AUTO, "tx_bcast_pkts", 1398 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts, 1399 "tx_bcast_pkts"); 1400 1401 SYSCTL_ADD_QUAD(ctx, children, 1402 OID_AUTO, "tx_err_drop_pkts", 1403 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts, 1404 "tx_err_drop_pkts"); 1405 1406 SYSCTL_ADD_QUAD(ctx, children, 1407 OID_AUTO, "tpa_coalesced_pkts", 1408 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts, 1409 "tpa_coalesced_pkts"); 1410 1411 SYSCTL_ADD_QUAD(ctx, children, 1412 OID_AUTO, "tpa_coalesced_events", 1413 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events, 1414 "tpa_coalesced_events"); 1415 1416 SYSCTL_ADD_QUAD(ctx, children, 1417 OID_AUTO, "tpa_aborts_num", 1418 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num, 1419 "tpa_aborts_num"); 1420 1421 SYSCTL_ADD_QUAD(ctx, children, 1422 OID_AUTO, "tpa_not_coalesced_pkts", 1423 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts, 1424 "tpa_not_coalesced_pkts"); 1425 1426 SYSCTL_ADD_QUAD(ctx, children, 1427 OID_AUTO, "tpa_coalesced_bytes", 1428 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes, 1429 "tpa_coalesced_bytes"); 1430 1431 SYSCTL_ADD_QUAD(ctx, children, 1432 OID_AUTO, "rx_64_byte_packets", 1433 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets, 1434 "rx_64_byte_packets"); 1435 1436 SYSCTL_ADD_QUAD(ctx, children, 1437 OID_AUTO, "rx_65_to_127_byte_packets", 1438 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets, 1439 "rx_65_to_127_byte_packets"); 1440 1441 SYSCTL_ADD_QUAD(ctx, children, 1442 OID_AUTO, "rx_128_to_255_byte_packets", 1443 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets, 1444 "rx_128_to_255_byte_packets"); 1445 1446 SYSCTL_ADD_QUAD(ctx, children, 1447 OID_AUTO, "rx_256_to_511_byte_packets", 1448 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets, 1449 "rx_256_to_511_byte_packets"); 1450 1451 SYSCTL_ADD_QUAD(ctx, children, 1452 OID_AUTO, "rx_512_to_1023_byte_packets", 1453 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets, 1454 "rx_512_to_1023_byte_packets"); 1455 1456 SYSCTL_ADD_QUAD(ctx, children, 1457 OID_AUTO, "rx_1024_to_1518_byte_packets", 1458 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets, 1459 "rx_1024_to_1518_byte_packets"); 1460 1461 SYSCTL_ADD_QUAD(ctx, children, 1462 OID_AUTO, "rx_1519_to_1522_byte_packets", 1463 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets, 1464 "rx_1519_to_1522_byte_packets"); 1465 1466 SYSCTL_ADD_QUAD(ctx, children, 1467 OID_AUTO, "rx_1523_to_2047_byte_packets", 1468 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets, 1469 "rx_1523_to_2047_byte_packets"); 1470 1471 SYSCTL_ADD_QUAD(ctx, children, 1472 OID_AUTO, "rx_2048_to_4095_byte_packets", 1473 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets, 1474 "rx_2048_to_4095_byte_packets"); 1475 1476 SYSCTL_ADD_QUAD(ctx, children, 1477 OID_AUTO, "rx_4096_to_9216_byte_packets", 1478 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets, 1479 "rx_4096_to_9216_byte_packets"); 1480 1481 SYSCTL_ADD_QUAD(ctx, children, 1482 OID_AUTO, "rx_9217_to_16383_byte_packets", 1483 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets, 1484 "rx_9217_to_16383_byte_packets"); 1485 1486 SYSCTL_ADD_QUAD(ctx, children, 1487 OID_AUTO, "rx_crc_errors", 1488 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors, 1489 "rx_crc_errors"); 1490 1491 SYSCTL_ADD_QUAD(ctx, children, 1492 OID_AUTO, "rx_mac_crtl_frames", 1493 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames, 1494 "rx_mac_crtl_frames"); 1495 1496 SYSCTL_ADD_QUAD(ctx, children, 1497 OID_AUTO, "rx_pause_frames", 1498 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames, 1499 "rx_pause_frames"); 1500 1501 SYSCTL_ADD_QUAD(ctx, children, 1502 OID_AUTO, "rx_pfc_frames", 1503 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames, 1504 "rx_pfc_frames"); 1505 1506 SYSCTL_ADD_QUAD(ctx, children, 1507 OID_AUTO, "rx_align_errors", 1508 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors, 1509 "rx_align_errors"); 1510 1511 SYSCTL_ADD_QUAD(ctx, children, 1512 OID_AUTO, "rx_carrier_errors", 1513 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors, 1514 "rx_carrier_errors"); 1515 1516 SYSCTL_ADD_QUAD(ctx, children, 1517 OID_AUTO, "rx_oversize_packets", 1518 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets, 1519 "rx_oversize_packets"); 1520 1521 SYSCTL_ADD_QUAD(ctx, children, 1522 OID_AUTO, "rx_jabbers", 1523 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers, 1524 "rx_jabbers"); 1525 1526 SYSCTL_ADD_QUAD(ctx, children, 1527 OID_AUTO, "rx_undersize_packets", 1528 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets, 1529 "rx_undersize_packets"); 1530 1531 SYSCTL_ADD_QUAD(ctx, children, 1532 OID_AUTO, "rx_fragments", 1533 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments, 1534 "rx_fragments"); 1535 1536 SYSCTL_ADD_QUAD(ctx, children, 1537 OID_AUTO, "tx_64_byte_packets", 1538 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets, 1539 "tx_64_byte_packets"); 1540 1541 SYSCTL_ADD_QUAD(ctx, children, 1542 OID_AUTO, "tx_65_to_127_byte_packets", 1543 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets, 1544 "tx_65_to_127_byte_packets"); 1545 1546 SYSCTL_ADD_QUAD(ctx, children, 1547 OID_AUTO, "tx_128_to_255_byte_packets", 1548 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets, 1549 "tx_128_to_255_byte_packets"); 1550 1551 SYSCTL_ADD_QUAD(ctx, children, 1552 OID_AUTO, "tx_256_to_511_byte_packets", 1553 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets, 1554 "tx_256_to_511_byte_packets"); 1555 1556 SYSCTL_ADD_QUAD(ctx, children, 1557 OID_AUTO, "tx_512_to_1023_byte_packets", 1558 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets, 1559 "tx_512_to_1023_byte_packets"); 1560 1561 SYSCTL_ADD_QUAD(ctx, children, 1562 OID_AUTO, "tx_1024_to_1518_byte_packets", 1563 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets, 1564 "tx_1024_to_1518_byte_packets"); 1565 1566 SYSCTL_ADD_QUAD(ctx, children, 1567 OID_AUTO, "tx_1519_to_2047_byte_packets", 1568 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets, 1569 "tx_1519_to_2047_byte_packets"); 1570 1571 SYSCTL_ADD_QUAD(ctx, children, 1572 OID_AUTO, "tx_2048_to_4095_byte_packets", 1573 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets, 1574 "tx_2048_to_4095_byte_packets"); 1575 1576 SYSCTL_ADD_QUAD(ctx, children, 1577 OID_AUTO, "tx_4096_to_9216_byte_packets", 1578 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets, 1579 "tx_4096_to_9216_byte_packets"); 1580 1581 SYSCTL_ADD_QUAD(ctx, children, 1582 OID_AUTO, "tx_9217_to_16383_byte_packets", 1583 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets, 1584 "tx_9217_to_16383_byte_packets"); 1585 1586 SYSCTL_ADD_QUAD(ctx, children, 1587 OID_AUTO, "tx_pause_frames", 1588 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames, 1589 "tx_pause_frames"); 1590 1591 SYSCTL_ADD_QUAD(ctx, children, 1592 OID_AUTO, "tx_pfc_frames", 1593 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames, 1594 "tx_pfc_frames"); 1595 1596 SYSCTL_ADD_QUAD(ctx, children, 1597 OID_AUTO, "tx_lpi_entry_count", 1598 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count, 1599 "tx_lpi_entry_count"); 1600 1601 SYSCTL_ADD_QUAD(ctx, children, 1602 OID_AUTO, "tx_total_collisions", 1603 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions, 1604 "tx_total_collisions"); 1605 1606 SYSCTL_ADD_QUAD(ctx, children, 1607 OID_AUTO, "brb_truncates", 1608 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates, 1609 "brb_truncates"); 1610 1611 SYSCTL_ADD_QUAD(ctx, children, 1612 OID_AUTO, "brb_discards", 1613 CTLFLAG_RD, &ha->hw_stats.common.brb_discards, 1614 "brb_discards"); 1615 1616 SYSCTL_ADD_QUAD(ctx, children, 1617 OID_AUTO, "rx_mac_bytes", 1618 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes, 1619 "rx_mac_bytes"); 1620 1621 SYSCTL_ADD_QUAD(ctx, children, 1622 OID_AUTO, "rx_mac_uc_packets", 1623 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets, 1624 "rx_mac_uc_packets"); 1625 1626 SYSCTL_ADD_QUAD(ctx, children, 1627 OID_AUTO, "rx_mac_mc_packets", 1628 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets, 1629 "rx_mac_mc_packets"); 1630 1631 SYSCTL_ADD_QUAD(ctx, children, 1632 OID_AUTO, "rx_mac_bc_packets", 1633 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets, 1634 "rx_mac_bc_packets"); 1635 1636 SYSCTL_ADD_QUAD(ctx, children, 1637 OID_AUTO, "rx_mac_frames_ok", 1638 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok, 1639 "rx_mac_frames_ok"); 1640 1641 SYSCTL_ADD_QUAD(ctx, children, 1642 OID_AUTO, "tx_mac_bytes", 1643 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes, 1644 "tx_mac_bytes"); 1645 1646 SYSCTL_ADD_QUAD(ctx, children, 1647 OID_AUTO, "tx_mac_uc_packets", 1648 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets, 1649 "tx_mac_uc_packets"); 1650 1651 SYSCTL_ADD_QUAD(ctx, children, 1652 OID_AUTO, "tx_mac_mc_packets", 1653 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets, 1654 "tx_mac_mc_packets"); 1655 1656 SYSCTL_ADD_QUAD(ctx, children, 1657 OID_AUTO, "tx_mac_bc_packets", 1658 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets, 1659 "tx_mac_bc_packets"); 1660 1661 SYSCTL_ADD_QUAD(ctx, children, 1662 OID_AUTO, "tx_mac_ctrl_frames", 1663 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames, 1664 "tx_mac_ctrl_frames"); 1665 return; 1666 } 1667 1668 static void 1669 qlnx_add_sysctls(qlnx_host_t *ha) 1670 { 1671 device_t dev = ha->pci_dev; 1672 struct sysctl_ctx_list *ctx; 1673 struct sysctl_oid_list *children; 1674 1675 ctx = device_get_sysctl_ctx(dev); 1676 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 1677 1678 qlnx_add_fp_stats_sysctls(ha); 1679 qlnx_add_sp_stats_sysctls(ha); 1680 qlnx_add_hw_stats_sysctls(ha); 1681 1682 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version", 1683 CTLFLAG_RD, qlnx_ver_str, 0, 1684 "Driver Version"); 1685 1686 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version", 1687 CTLFLAG_RD, ha->stormfw_ver, 0, 1688 "STORM Firmware Version"); 1689 1690 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version", 1691 CTLFLAG_RD, ha->mfw_ver, 0, 1692 "Management Firmware Version"); 1693 1694 SYSCTL_ADD_UINT(ctx, children, 1695 OID_AUTO, "personality", CTLFLAG_RD, 1696 &ha->personality, ha->personality, 1697 "\tpersonality = 0 => Ethernet Only\n" 1698 "\tpersonality = 3 => Ethernet and RoCE\n" 1699 "\tpersonality = 4 => Ethernet and iWARP\n" 1700 "\tpersonality = 6 => Default in Shared Memory\n"); 1701 1702 ha->dbg_level = 0; 1703 1704 SYSCTL_ADD_UINT(ctx, children, 1705 OID_AUTO, "debug", CTLFLAG_RW, 1706 &ha->dbg_level, ha->dbg_level, "Debug Level"); 1707 1708 ha->dp_level = 0; 1709 SYSCTL_ADD_UINT(ctx, children, 1710 OID_AUTO, "dp_level", CTLFLAG_RW, 1711 &ha->dp_level, ha->dp_level, "DP Level"); 1712 1713 ha->dp_module = 0; 1714 SYSCTL_ADD_UINT(ctx, children, 1715 OID_AUTO, "dp_module", CTLFLAG_RW, 1716 &ha->dp_module, ha->dp_module, "DP Module"); 1717 1718 ha->err_inject = 0; 1719 1720 SYSCTL_ADD_UINT(ctx, children, 1721 OID_AUTO, "err_inject", CTLFLAG_RW, 1722 &ha->err_inject, ha->err_inject, "Error Inject"); 1723 1724 ha->storm_stats_enable = 0; 1725 1726 SYSCTL_ADD_UINT(ctx, children, 1727 OID_AUTO, "storm_stats_enable", CTLFLAG_RW, 1728 &ha->storm_stats_enable, ha->storm_stats_enable, 1729 "Enable Storm Statistics Gathering"); 1730 1731 ha->storm_stats_index = 0; 1732 1733 SYSCTL_ADD_UINT(ctx, children, 1734 OID_AUTO, "storm_stats_index", CTLFLAG_RD, 1735 &ha->storm_stats_index, ha->storm_stats_index, 1736 "Enable Storm Statistics Gathering Current Index"); 1737 1738 ha->grcdump_taken = 0; 1739 SYSCTL_ADD_UINT(ctx, children, 1740 OID_AUTO, "grcdump_taken", CTLFLAG_RD, 1741 &ha->grcdump_taken, ha->grcdump_taken, "grcdump_taken"); 1742 1743 ha->idle_chk_taken = 0; 1744 SYSCTL_ADD_UINT(ctx, children, 1745 OID_AUTO, "idle_chk_taken", CTLFLAG_RD, 1746 &ha->idle_chk_taken, ha->idle_chk_taken, "idle_chk_taken"); 1747 1748 SYSCTL_ADD_UINT(ctx, children, 1749 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD, 1750 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs, 1751 "rx_coalesce_usecs"); 1752 1753 SYSCTL_ADD_UINT(ctx, children, 1754 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD, 1755 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs, 1756 "tx_coalesce_usecs"); 1757 1758 ha->rx_pkt_threshold = 32; 1759 SYSCTL_ADD_UINT(ctx, children, 1760 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW, 1761 &ha->rx_pkt_threshold, ha->rx_pkt_threshold, 1762 "No. of Rx Pkts to process at a time"); 1763 1764 ha->rx_jumbo_buf_eq_mtu = 0; 1765 SYSCTL_ADD_UINT(ctx, children, 1766 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW, 1767 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu, 1768 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n" 1769 "otherwise Rx Jumbo buffers are set to >= MTU size\n"); 1770 1771 SYSCTL_ADD_PROC(ctx, children, 1772 OID_AUTO, "trigger_dump", CTLTYPE_INT | CTLFLAG_RW, 1773 (void *)ha, 0, 1774 qlnx_trigger_dump_sysctl, "I", "trigger_dump"); 1775 1776 SYSCTL_ADD_PROC(ctx, children, 1777 OID_AUTO, "set_rx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW, 1778 (void *)ha, 0, 1779 qlnx_set_rx_coalesce, "I", 1780 "rx interrupt coalesce period microseconds"); 1781 1782 SYSCTL_ADD_PROC(ctx, children, 1783 OID_AUTO, "set_tx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW, 1784 (void *)ha, 0, 1785 qlnx_set_tx_coalesce, "I", 1786 "tx interrupt coalesce period microseconds"); 1787 1788 SYSCTL_ADD_QUAD(ctx, children, 1789 OID_AUTO, "err_illegal_intr", CTLFLAG_RD, 1790 &ha->err_illegal_intr, "err_illegal_intr"); 1791 1792 SYSCTL_ADD_QUAD(ctx, children, 1793 OID_AUTO, "err_fp_null", CTLFLAG_RD, 1794 &ha->err_fp_null, "err_fp_null"); 1795 1796 SYSCTL_ADD_QUAD(ctx, children, 1797 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD, 1798 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type"); 1799 return; 1800 } 1801 1802 1803 1804 /***************************************************************************** 1805 * Operating System Network Interface Functions 1806 *****************************************************************************/ 1807 1808 static void 1809 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) 1810 { 1811 uint16_t device_id; 1812 struct ifnet *ifp; 1813 1814 ifp = ha->ifp = if_alloc(IFT_ETHER); 1815 1816 if (ifp == NULL) 1817 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 1818 1819 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1820 1821 device_id = pci_get_device(ha->pci_dev); 1822 1823 #if __FreeBSD_version >= 1000000 1824 1825 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) 1826 ifp->if_baudrate = IF_Gbps(40); 1827 else if (device_id == QLOGIC_PCI_DEVICE_ID_1656) 1828 ifp->if_baudrate = IF_Gbps(25); 1829 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) 1830 ifp->if_baudrate = IF_Gbps(50); 1831 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) 1832 ifp->if_baudrate = IF_Gbps(100); 1833 1834 ifp->if_capabilities = IFCAP_LINKSTATE; 1835 #else 1836 ifp->if_mtu = ETHERMTU; 1837 ifp->if_baudrate = (1 * 1000 * 1000 *1000); 1838 1839 #endif /* #if __FreeBSD_version >= 1000000 */ 1840 1841 ifp->if_init = qlnx_init; 1842 ifp->if_softc = ha; 1843 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1844 ifp->if_ioctl = qlnx_ioctl; 1845 ifp->if_transmit = qlnx_transmit; 1846 ifp->if_qflush = qlnx_qflush; 1847 1848 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha)); 1849 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha); 1850 IFQ_SET_READY(&ifp->if_snd); 1851 1852 #if __FreeBSD_version >= 1100036 1853 if_setgetcounterfn(ifp, qlnx_get_counter); 1854 #endif 1855 1856 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1857 1858 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN); 1859 ether_ifattach(ifp, ha->primary_mac); 1860 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN); 1861 1862 ifp->if_capabilities = IFCAP_HWCSUM; 1863 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 1864 1865 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1866 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1867 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 1868 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1869 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 1870 ifp->if_capabilities |= IFCAP_TSO4; 1871 ifp->if_capabilities |= IFCAP_TSO6; 1872 ifp->if_capabilities |= IFCAP_LRO; 1873 1874 ifp->if_capenable = ifp->if_capabilities; 1875 1876 ifp->if_hwassist = CSUM_IP; 1877 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP; 1878 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; 1879 ifp->if_hwassist |= CSUM_TSO; 1880 1881 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1882 1883 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\ 1884 qlnx_media_status); 1885 1886 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) { 1887 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL); 1888 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL); 1889 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL); 1890 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1656) { 1891 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL); 1892 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL); 1893 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) { 1894 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL); 1895 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL); 1896 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) { 1897 ifmedia_add(&ha->media, 1898 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL); 1899 ifmedia_add(&ha->media, 1900 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL); 1901 ifmedia_add(&ha->media, 1902 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL); 1903 } 1904 1905 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL); 1906 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 1907 1908 1909 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 1910 1911 QL_DPRINT2(ha, (dev, "%s: exit\n", __func__)); 1912 1913 return; 1914 } 1915 1916 static void 1917 qlnx_init_locked(qlnx_host_t *ha) 1918 { 1919 struct ifnet *ifp = ha->ifp; 1920 1921 qlnx_stop(ha); 1922 1923 if (qlnx_load(ha) == 0) { 1924 ifp->if_drv_flags |= IFF_DRV_RUNNING; 1925 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 1926 } 1927 1928 return; 1929 } 1930 1931 static void 1932 qlnx_init(void *arg) 1933 { 1934 qlnx_host_t *ha; 1935 1936 ha = (qlnx_host_t *)arg; 1937 1938 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 1939 1940 QLNX_LOCK(ha); 1941 qlnx_init_locked(ha); 1942 QLNX_UNLOCK(ha); 1943 1944 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 1945 1946 return; 1947 } 1948 1949 static int 1950 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac) 1951 { 1952 struct ecore_filter_mcast *mcast; 1953 struct ecore_dev *cdev; 1954 int rc; 1955 1956 cdev = &ha->cdev; 1957 1958 mcast = &ha->ecore_mcast; 1959 bzero(mcast, sizeof(struct ecore_filter_mcast)); 1960 1961 if (add_mac) 1962 mcast->opcode = ECORE_FILTER_ADD; 1963 else 1964 mcast->opcode = ECORE_FILTER_REMOVE; 1965 1966 mcast->num_mc_addrs = 1; 1967 memcpy(mcast->mac, mac_addr, ETH_ALEN); 1968 1969 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 1970 1971 return (rc); 1972 } 1973 1974 static int 1975 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta) 1976 { 1977 int i; 1978 1979 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 1980 1981 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) 1982 return 0; /* its been already added */ 1983 } 1984 1985 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 1986 1987 if ((ha->mcast[i].addr[0] == 0) && 1988 (ha->mcast[i].addr[1] == 0) && 1989 (ha->mcast[i].addr[2] == 0) && 1990 (ha->mcast[i].addr[3] == 0) && 1991 (ha->mcast[i].addr[4] == 0) && 1992 (ha->mcast[i].addr[5] == 0)) { 1993 1994 if (qlnx_config_mcast_mac_addr(ha, mta, 1)) 1995 return (-1); 1996 1997 bcopy(mta, ha->mcast[i].addr, ETH_ALEN); 1998 ha->nmcast++; 1999 2000 return 0; 2001 } 2002 } 2003 return 0; 2004 } 2005 2006 static int 2007 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta) 2008 { 2009 int i; 2010 2011 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2012 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { 2013 2014 if (qlnx_config_mcast_mac_addr(ha, mta, 0)) 2015 return (-1); 2016 2017 ha->mcast[i].addr[0] = 0; 2018 ha->mcast[i].addr[1] = 0; 2019 ha->mcast[i].addr[2] = 0; 2020 ha->mcast[i].addr[3] = 0; 2021 ha->mcast[i].addr[4] = 0; 2022 ha->mcast[i].addr[5] = 0; 2023 2024 ha->nmcast--; 2025 2026 return 0; 2027 } 2028 } 2029 return 0; 2030 } 2031 2032 /* 2033 * Name: qls_hw_set_multi 2034 * Function: Sets the Multicast Addresses provided the host O.S into the 2035 * hardware (for the given interface) 2036 */ 2037 static void 2038 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 2039 uint32_t add_mac) 2040 { 2041 int i; 2042 2043 for (i = 0; i < mcnt; i++) { 2044 if (add_mac) { 2045 if (qlnx_hw_add_mcast(ha, mta)) 2046 break; 2047 } else { 2048 if (qlnx_hw_del_mcast(ha, mta)) 2049 break; 2050 } 2051 2052 mta += ETHER_HDR_LEN; 2053 } 2054 return; 2055 } 2056 2057 2058 #define QLNX_MCAST_ADDRS_SIZE (QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN) 2059 static int 2060 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi) 2061 { 2062 uint8_t mta[QLNX_MCAST_ADDRS_SIZE]; 2063 struct ifmultiaddr *ifma; 2064 int mcnt = 0; 2065 struct ifnet *ifp = ha->ifp; 2066 int ret = 0; 2067 2068 if_maddr_rlock(ifp); 2069 2070 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2071 2072 if (ifma->ifma_addr->sa_family != AF_LINK) 2073 continue; 2074 2075 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS) 2076 break; 2077 2078 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 2079 &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN); 2080 2081 mcnt++; 2082 } 2083 2084 if_maddr_runlock(ifp); 2085 2086 QLNX_LOCK(ha); 2087 qlnx_hw_set_multi(ha, mta, mcnt, add_multi); 2088 QLNX_UNLOCK(ha); 2089 2090 return (ret); 2091 } 2092 2093 static int 2094 qlnx_set_promisc(qlnx_host_t *ha) 2095 { 2096 int rc = 0; 2097 uint8_t filter; 2098 2099 filter = ha->filter; 2100 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2101 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 2102 2103 rc = qlnx_set_rx_accept_filter(ha, filter); 2104 return (rc); 2105 } 2106 2107 static int 2108 qlnx_set_allmulti(qlnx_host_t *ha) 2109 { 2110 int rc = 0; 2111 uint8_t filter; 2112 2113 filter = ha->filter; 2114 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2115 rc = qlnx_set_rx_accept_filter(ha, filter); 2116 2117 return (rc); 2118 } 2119 2120 2121 static int 2122 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2123 { 2124 int ret = 0, mask; 2125 struct ifreq *ifr = (struct ifreq *)data; 2126 struct ifaddr *ifa = (struct ifaddr *)data; 2127 qlnx_host_t *ha; 2128 2129 ha = (qlnx_host_t *)ifp->if_softc; 2130 2131 switch (cmd) { 2132 case SIOCSIFADDR: 2133 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFADDR (0x%lx)\n", 2134 __func__, cmd)); 2135 2136 if (ifa->ifa_addr->sa_family == AF_INET) { 2137 ifp->if_flags |= IFF_UP; 2138 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2139 QLNX_LOCK(ha); 2140 qlnx_init_locked(ha); 2141 QLNX_UNLOCK(ha); 2142 } 2143 QL_DPRINT4(ha, (ha->pci_dev, 2144 "%s: SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 2145 __func__, cmd, 2146 ntohl(IA_SIN(ifa)->sin_addr.s_addr))); 2147 2148 arp_ifinit(ifp, ifa); 2149 } else { 2150 ether_ioctl(ifp, cmd, data); 2151 } 2152 break; 2153 2154 case SIOCSIFMTU: 2155 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFMTU (0x%lx)\n", 2156 __func__, cmd)); 2157 2158 if (ifr->ifr_mtu > QLNX_MAX_MTU) { 2159 ret = EINVAL; 2160 } else { 2161 QLNX_LOCK(ha); 2162 ifp->if_mtu = ifr->ifr_mtu; 2163 ha->max_frame_size = 2164 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2165 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2166 qlnx_init_locked(ha); 2167 } 2168 2169 QLNX_UNLOCK(ha); 2170 2171 if (ret) 2172 ret = EINVAL; 2173 } 2174 2175 break; 2176 2177 case SIOCSIFFLAGS: 2178 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFFLAGS (0x%lx)\n", 2179 __func__, cmd)); 2180 2181 QLNX_LOCK(ha); 2182 2183 if (ifp->if_flags & IFF_UP) { 2184 if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2185 if ((ifp->if_flags ^ ha->if_flags) & 2186 IFF_PROMISC) { 2187 ret = qlnx_set_promisc(ha); 2188 } else if ((ifp->if_flags ^ ha->if_flags) & 2189 IFF_ALLMULTI) { 2190 ret = qlnx_set_allmulti(ha); 2191 } 2192 } else { 2193 ha->max_frame_size = ifp->if_mtu + 2194 ETHER_HDR_LEN + ETHER_CRC_LEN; 2195 qlnx_init_locked(ha); 2196 } 2197 } else { 2198 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2199 qlnx_stop(ha); 2200 ha->if_flags = ifp->if_flags; 2201 } 2202 2203 QLNX_UNLOCK(ha); 2204 break; 2205 2206 case SIOCADDMULTI: 2207 QL_DPRINT4(ha, (ha->pci_dev, 2208 "%s: %s (0x%lx)\n", __func__, "SIOCADDMULTI", cmd)); 2209 2210 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2211 if (qlnx_set_multi(ha, 1)) 2212 ret = EINVAL; 2213 } 2214 break; 2215 2216 case SIOCDELMULTI: 2217 QL_DPRINT4(ha, (ha->pci_dev, 2218 "%s: %s (0x%lx)\n", __func__, "SIOCDELMULTI", cmd)); 2219 2220 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2221 if (qlnx_set_multi(ha, 0)) 2222 ret = EINVAL; 2223 } 2224 break; 2225 2226 case SIOCSIFMEDIA: 2227 case SIOCGIFMEDIA: 2228 QL_DPRINT4(ha, (ha->pci_dev, 2229 "%s: SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", 2230 __func__, cmd)); 2231 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 2232 break; 2233 2234 case SIOCSIFCAP: 2235 2236 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2237 2238 QL_DPRINT4(ha, (ha->pci_dev, "%s: SIOCSIFCAP (0x%lx)\n", 2239 __func__, cmd)); 2240 2241 if (mask & IFCAP_HWCSUM) 2242 ifp->if_capenable ^= IFCAP_HWCSUM; 2243 if (mask & IFCAP_TSO4) 2244 ifp->if_capenable ^= IFCAP_TSO4; 2245 if (mask & IFCAP_TSO6) 2246 ifp->if_capenable ^= IFCAP_TSO6; 2247 if (mask & IFCAP_VLAN_HWTAGGING) 2248 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2249 if (mask & IFCAP_VLAN_HWTSO) 2250 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2251 if (mask & IFCAP_LRO) 2252 ifp->if_capenable ^= IFCAP_LRO; 2253 2254 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 2255 qlnx_init(ha); 2256 2257 VLAN_CAPABILITIES(ifp); 2258 break; 2259 2260 #if (__FreeBSD_version >= 1100101) 2261 2262 case SIOCGI2C: 2263 { 2264 struct ifi2creq i2c; 2265 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 2266 struct ecore_ptt *p_ptt; 2267 2268 ret = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 2269 2270 if (ret) 2271 break; 2272 2273 if ((i2c.len > sizeof (i2c.data)) || 2274 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) { 2275 ret = EINVAL; 2276 break; 2277 } 2278 2279 p_ptt = ecore_ptt_acquire(p_hwfn); 2280 2281 if (!p_ptt) { 2282 QL_DPRINT1(ha, (ha->pci_dev, "%s :" 2283 " ecore_ptt_acquire failed\n", __func__)); 2284 ret = -1; 2285 break; 2286 } 2287 2288 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt, 2289 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset, 2290 i2c.len, &i2c.data[0]); 2291 2292 ecore_ptt_release(p_hwfn, p_ptt); 2293 2294 if (ret) { 2295 ret = -1; 2296 break; 2297 } 2298 2299 ret = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 2300 2301 QL_DPRINT8(ha, (ha->pci_dev, "SIOCGI2C copyout ret = %d" 2302 " len = %d addr = 0x%02x offset = 0x%04x" 2303 " data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x" 2304 " 0x%02x 0x%02x 0x%02x\n", 2305 ret, i2c.len, i2c.dev_addr, i2c.offset, 2306 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3], 2307 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7])); 2308 break; 2309 } 2310 #endif /* #if (__FreeBSD_version >= 1100101) */ 2311 2312 default: 2313 QL_DPRINT4(ha, (ha->pci_dev, "%s: default (0x%lx)\n", 2314 __func__, cmd)); 2315 ret = ether_ioctl(ifp, cmd, data); 2316 break; 2317 } 2318 2319 return (ret); 2320 } 2321 2322 static int 2323 qlnx_media_change(struct ifnet *ifp) 2324 { 2325 qlnx_host_t *ha; 2326 struct ifmedia *ifm; 2327 int ret = 0; 2328 2329 ha = (qlnx_host_t *)ifp->if_softc; 2330 2331 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2332 2333 ifm = &ha->media; 2334 2335 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2336 ret = EINVAL; 2337 2338 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 2339 2340 return (ret); 2341 } 2342 2343 static void 2344 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2345 { 2346 qlnx_host_t *ha; 2347 2348 ha = (qlnx_host_t *)ifp->if_softc; 2349 2350 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2351 2352 ifmr->ifm_status = IFM_AVALID; 2353 ifmr->ifm_active = IFM_ETHER; 2354 2355 if (ha->link_up) { 2356 ifmr->ifm_status |= IFM_ACTIVE; 2357 ifmr->ifm_active |= 2358 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link)); 2359 2360 if (ha->if_link.link_partner_caps & 2361 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause)) 2362 ifmr->ifm_active |= 2363 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 2364 } 2365 2366 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit (%s)\n", __func__, 2367 (ha->link_up ? "link_up" : "link_down"))); 2368 2369 return; 2370 } 2371 2372 2373 static void 2374 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2375 struct qlnx_tx_queue *txq) 2376 { 2377 u16 idx; 2378 struct mbuf *mp; 2379 bus_dmamap_t map; 2380 int i; 2381 struct eth_tx_bd *tx_data_bd; 2382 struct eth_tx_1st_bd *first_bd; 2383 int nbds = 0; 2384 2385 idx = txq->sw_tx_cons; 2386 mp = txq->sw_tx_ring[idx].mp; 2387 map = txq->sw_tx_ring[idx].map; 2388 2389 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){ 2390 2391 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL); 2392 2393 QL_DPRINT1(ha, (ha->pci_dev, "%s: (mp == NULL) " 2394 " tx_idx = 0x%x" 2395 " ecore_prod_idx = 0x%x" 2396 " ecore_cons_idx = 0x%x" 2397 " hw_bd_cons = 0x%x" 2398 " txq_db_last = 0x%x" 2399 " elem_left = 0x%x\n", 2400 __func__, 2401 fp->rss_id, 2402 ecore_chain_get_prod_idx(&txq->tx_pbl), 2403 ecore_chain_get_cons_idx(&txq->tx_pbl), 2404 le16toh(*txq->hw_cons_ptr), 2405 txq->tx_db.raw, 2406 ecore_chain_get_elem_left(&txq->tx_pbl))); 2407 2408 fp->err_tx_free_pkt_null++; 2409 2410 //DEBUG 2411 qlnx_trigger_dump(ha); 2412 2413 return; 2414 } else { 2415 2416 QLNX_INC_OPACKETS((ha->ifp)); 2417 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len)); 2418 2419 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE); 2420 bus_dmamap_unload(ha->tx_tag, map); 2421 2422 fp->tx_pkts_freed++; 2423 fp->tx_pkts_completed++; 2424 2425 m_freem(mp); 2426 } 2427 2428 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl); 2429 nbds = first_bd->data.nbds; 2430 2431 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0); 2432 2433 for (i = 1; i < nbds; i++) { 2434 tx_data_bd = ecore_chain_consume(&txq->tx_pbl); 2435 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0); 2436 } 2437 txq->sw_tx_ring[idx].flags = 0; 2438 txq->sw_tx_ring[idx].mp = NULL; 2439 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0; 2440 2441 return; 2442 } 2443 2444 static void 2445 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2446 struct qlnx_tx_queue *txq) 2447 { 2448 u16 hw_bd_cons; 2449 u16 ecore_cons_idx; 2450 uint16_t diff; 2451 2452 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 2453 2454 while (hw_bd_cons != 2455 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 2456 2457 if (hw_bd_cons < ecore_cons_idx) { 2458 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 2459 } else { 2460 diff = hw_bd_cons - ecore_cons_idx; 2461 } 2462 if ((diff > TX_RING_SIZE) || 2463 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){ 2464 2465 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF); 2466 2467 QL_DPRINT1(ha, (ha->pci_dev, "%s: (diff = 0x%x) " 2468 " tx_idx = 0x%x" 2469 " ecore_prod_idx = 0x%x" 2470 " ecore_cons_idx = 0x%x" 2471 " hw_bd_cons = 0x%x" 2472 " txq_db_last = 0x%x" 2473 " elem_left = 0x%x\n", 2474 __func__, diff, 2475 fp->rss_id, 2476 ecore_chain_get_prod_idx(&txq->tx_pbl), 2477 ecore_chain_get_cons_idx(&txq->tx_pbl), 2478 le16toh(*txq->hw_cons_ptr), 2479 txq->tx_db.raw, 2480 ecore_chain_get_elem_left(&txq->tx_pbl))); 2481 2482 fp->err_tx_cons_idx_conflict++; 2483 2484 //DEBUG 2485 qlnx_trigger_dump(ha); 2486 } 2487 2488 qlnx_free_tx_pkt(ha, fp, txq); 2489 2490 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 2491 } 2492 return; 2493 } 2494 2495 static int 2496 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp) 2497 { 2498 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc; 2499 struct qlnx_fastpath *fp; 2500 int rss_id = 0, ret = 0; 2501 2502 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2503 2504 #if __FreeBSD_version >= 1100000 2505 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 2506 #else 2507 if (mp->m_flags & M_FLOWID) 2508 #endif 2509 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) % 2510 ha->num_rss; 2511 2512 fp = &ha->fp_array[rss_id]; 2513 2514 if (fp->tx_br == NULL) { 2515 ret = EINVAL; 2516 goto qlnx_transmit_exit; 2517 } 2518 2519 if (mp != NULL) { 2520 ret = drbr_enqueue(ifp, fp->tx_br, mp); 2521 } 2522 2523 if (fp->fp_taskqueue != NULL) 2524 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 2525 2526 ret = 0; 2527 2528 qlnx_transmit_exit: 2529 2530 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit ret = %d\n", __func__, ret)); 2531 return ret; 2532 } 2533 2534 static void 2535 qlnx_qflush(struct ifnet *ifp) 2536 { 2537 int rss_id; 2538 struct qlnx_fastpath *fp; 2539 struct mbuf *mp; 2540 qlnx_host_t *ha; 2541 2542 ha = (qlnx_host_t *)ifp->if_softc; 2543 2544 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2545 2546 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 2547 2548 fp = &ha->fp_array[rss_id]; 2549 2550 if (fp == NULL) 2551 continue; 2552 2553 if (fp->tx_br) { 2554 mtx_lock(&fp->tx_mtx); 2555 2556 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 2557 fp->tx_pkts_freed++; 2558 m_freem(mp); 2559 } 2560 mtx_unlock(&fp->tx_mtx); 2561 } 2562 } 2563 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 2564 2565 return; 2566 } 2567 2568 static void 2569 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value) 2570 { 2571 struct ecore_dev *cdev; 2572 uint32_t offset; 2573 2574 cdev = &ha->cdev; 2575 2576 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)cdev->doorbells); 2577 2578 bus_write_4(ha->pci_dbells, offset, value); 2579 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ); 2580 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ); 2581 2582 return; 2583 } 2584 2585 static uint32_t 2586 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp) 2587 { 2588 struct ether_vlan_header *eh = NULL; 2589 struct ip *ip = NULL; 2590 struct ip6_hdr *ip6 = NULL; 2591 struct tcphdr *th = NULL; 2592 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0; 2593 uint16_t etype = 0; 2594 device_t dev; 2595 uint8_t buf[sizeof(struct ip6_hdr)]; 2596 2597 dev = ha->pci_dev; 2598 2599 eh = mtod(mp, struct ether_vlan_header *); 2600 2601 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2602 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2603 etype = ntohs(eh->evl_proto); 2604 } else { 2605 ehdrlen = ETHER_HDR_LEN; 2606 etype = ntohs(eh->evl_encap_proto); 2607 } 2608 2609 switch (etype) { 2610 2611 case ETHERTYPE_IP: 2612 ip = (struct ip *)(mp->m_data + ehdrlen); 2613 2614 ip_hlen = sizeof (struct ip); 2615 2616 if (mp->m_len < (ehdrlen + ip_hlen)) { 2617 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 2618 ip = (struct ip *)buf; 2619 } 2620 2621 th = (struct tcphdr *)(ip + 1); 2622 offset = ip_hlen + ehdrlen + (th->th_off << 2); 2623 break; 2624 2625 case ETHERTYPE_IPV6: 2626 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 2627 2628 ip_hlen = sizeof(struct ip6_hdr); 2629 2630 if (mp->m_len < (ehdrlen + ip_hlen)) { 2631 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 2632 buf); 2633 ip6 = (struct ip6_hdr *)buf; 2634 } 2635 th = (struct tcphdr *)(ip6 + 1); 2636 offset = ip_hlen + ehdrlen + (th->th_off << 2); 2637 break; 2638 2639 default: 2640 break; 2641 } 2642 2643 return (offset); 2644 } 2645 2646 static __inline int 2647 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs, 2648 uint32_t offset) 2649 { 2650 int i; 2651 uint32_t sum, nbds_in_hdr = 1; 2652 bus_dma_segment_t *t_segs = segs; 2653 2654 /* count the number of segments spanned by TCP header */ 2655 2656 i = 0; 2657 while ((i < nsegs) && (offset > t_segs->ds_len)) { 2658 nbds_in_hdr++; 2659 offset = offset - t_segs->ds_len; 2660 t_segs++; 2661 i++; 2662 } 2663 2664 while (nsegs >= QLNX_MAX_SEGMENTS_NON_TSO) { 2665 2666 sum = 0; 2667 2668 for (i = 0; i < (ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr); i++){ 2669 sum += segs->ds_len; 2670 segs++; 2671 } 2672 2673 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) { 2674 fp->tx_lso_wnd_min_len++; 2675 return (-1); 2676 } 2677 2678 nsegs -= QLNX_MAX_SEGMENTS_NON_TSO; 2679 } 2680 2681 return (0); 2682 } 2683 2684 static int 2685 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp) 2686 { 2687 bus_dma_segment_t *segs; 2688 bus_dmamap_t map = 0; 2689 uint32_t nsegs = 0; 2690 int ret = -1; 2691 struct mbuf *m_head = *m_headp; 2692 uint16_t idx = 0; 2693 uint16_t elem_left; 2694 2695 uint8_t nbd = 0; 2696 struct qlnx_tx_queue *txq; 2697 2698 struct eth_tx_1st_bd *first_bd; 2699 struct eth_tx_2nd_bd *second_bd; 2700 struct eth_tx_3rd_bd *third_bd; 2701 struct eth_tx_bd *tx_data_bd; 2702 2703 int seg_idx = 0; 2704 uint32_t nbds_in_hdr = 0; 2705 uint32_t offset = 0; 2706 2707 QL_DPRINT8(ha, (ha->pci_dev, "%s: enter\n", __func__)); 2708 2709 if (!ha->link_up) 2710 return (-1); 2711 2712 first_bd = NULL; 2713 second_bd = NULL; 2714 third_bd = NULL; 2715 tx_data_bd = NULL; 2716 2717 txq = fp->txq[0]; 2718 idx = txq->sw_tx_prod; 2719 2720 map = txq->sw_tx_ring[idx].map; 2721 segs = txq->segs; 2722 2723 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 2724 BUS_DMA_NOWAIT); 2725 2726 #ifdef QLNX_TRACE_TSO_PKT_LEN 2727 2728 if (!fp->tx_tso_min_pkt_len) { 2729 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 2730 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 2731 } else { 2732 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len) 2733 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 2734 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len) 2735 fp->tx_tso_max_pkt_len = m_head->m_pkthdr.len; 2736 } 2737 2738 #endif /* #ifdef QLNX_TRACE_TSO_PKT_LEN */ 2739 2740 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 2741 offset = qlnx_tcp_offset(ha, m_head); 2742 2743 if ((ret == EFBIG) || 2744 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && ( 2745 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) || 2746 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) && 2747 qlnx_tso_check(fp, segs, nsegs, offset))))) { 2748 2749 struct mbuf *m; 2750 2751 QL_DPRINT8(ha, (ha->pci_dev, "%s: EFBIG [%d]\n", __func__, 2752 m_head->m_pkthdr.len)); 2753 2754 fp->tx_defrag++; 2755 2756 m = m_defrag(m_head, M_NOWAIT); 2757 if (m == NULL) { 2758 fp->err_tx_defrag++; 2759 fp->tx_pkts_freed++; 2760 m_freem(m_head); 2761 *m_headp = NULL; 2762 QL_DPRINT1(ha, (ha->pci_dev, 2763 "%s: m_defrag() = NULL [%d]\n", 2764 __func__, ret)); 2765 return (ENOBUFS); 2766 } 2767 2768 m_head = m; 2769 *m_headp = m_head; 2770 2771 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 2772 segs, &nsegs, BUS_DMA_NOWAIT))) { 2773 2774 fp->err_tx_defrag_dmamap_load++; 2775 2776 QL_DPRINT1(ha, (ha->pci_dev, 2777 "%s: bus_dmamap_load_mbuf_sg failed0[%d, %d]\n", 2778 __func__, ret, m_head->m_pkthdr.len)); 2779 2780 fp->tx_pkts_freed++; 2781 m_freem(m_head); 2782 *m_headp = NULL; 2783 2784 return (ret); 2785 } 2786 2787 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && 2788 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) { 2789 2790 fp->err_tx_non_tso_max_seg++; 2791 2792 QL_DPRINT1(ha, (ha->pci_dev, 2793 "%s: (%d) nsegs too many for non-TSO[%d, %d]\n", 2794 __func__, ret, nsegs, m_head->m_pkthdr.len)); 2795 2796 fp->tx_pkts_freed++; 2797 m_freem(m_head); 2798 *m_headp = NULL; 2799 2800 return (ret); 2801 } 2802 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 2803 offset = qlnx_tcp_offset(ha, m_head); 2804 2805 } else if (ret) { 2806 2807 fp->err_tx_dmamap_load++; 2808 2809 QL_DPRINT1(ha, (ha->pci_dev, 2810 "%s: bus_dmamap_load_mbuf_sg failed1[%d, %d]\n", 2811 __func__, ret, m_head->m_pkthdr.len)); 2812 2813 fp->tx_pkts_freed++; 2814 m_freem(m_head); 2815 *m_headp = NULL; 2816 return (ret); 2817 } 2818 2819 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet")); 2820 2821 #ifdef QLNX_TRACE_TSO_PKT_LEN 2822 2823 if (nsegs < QLNX_FP_MAX_SEGS) 2824 fp->tx_pkts[(nsegs - 1)]++; 2825 else 2826 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; 2827 2828 #endif /* #ifdef QLNX_TRACE_TSO_PKT_LEN */ 2829 2830 if ((nsegs + QLNX_TX_ELEM_RESERVE) > 2831 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) { 2832 2833 QL_DPRINT1(ha, (ha->pci_dev, "%s: (%d, 0x%x) insuffient BDs" 2834 "in chain[%d] trying to free packets\n", 2835 __func__, nsegs, elem_left, fp->rss_id)); 2836 2837 fp->tx_nsegs_gt_elem_left++; 2838 2839 (void)qlnx_tx_int(ha, fp, txq); 2840 2841 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left = 2842 ecore_chain_get_elem_left(&txq->tx_pbl))) { 2843 2844 QL_DPRINT1(ha, (ha->pci_dev, 2845 "%s: (%d, 0x%x) insuffient BDs in chain[%d]\n", 2846 __func__, nsegs, elem_left, fp->rss_id)); 2847 2848 fp->err_tx_nsegs_gt_elem_left++; 2849 ha->storm_stats_enable = 1; 2850 return (ENOBUFS); 2851 } 2852 } 2853 2854 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 2855 2856 txq->sw_tx_ring[idx].mp = m_head; 2857 2858 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); 2859 2860 memset(first_bd, 0, sizeof(*first_bd)); 2861 2862 first_bd->data.bd_flags.bitfields = 2863 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 2864 2865 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len); 2866 2867 nbd++; 2868 2869 if (m_head->m_pkthdr.csum_flags & CSUM_IP) { 2870 first_bd->data.bd_flags.bitfields |= 2871 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 2872 } 2873 2874 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) { 2875 first_bd->data.bd_flags.bitfields |= 2876 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT); 2877 } 2878 2879 if (m_head->m_flags & M_VLANTAG) { 2880 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag; 2881 first_bd->data.bd_flags.bitfields |= 2882 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT); 2883 } 2884 2885 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 2886 2887 first_bd->data.bd_flags.bitfields |= 2888 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); 2889 first_bd->data.bd_flags.bitfields |= 2890 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 2891 2892 nbds_in_hdr = 1; 2893 2894 if (offset == segs->ds_len) { 2895 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 2896 segs++; 2897 seg_idx++; 2898 2899 second_bd = (struct eth_tx_2nd_bd *) 2900 ecore_chain_produce(&txq->tx_pbl); 2901 memset(second_bd, 0, sizeof(*second_bd)); 2902 nbd++; 2903 2904 if (seg_idx < nsegs) { 2905 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 2906 (segs->ds_addr), (segs->ds_len)); 2907 segs++; 2908 seg_idx++; 2909 } 2910 2911 third_bd = (struct eth_tx_3rd_bd *) 2912 ecore_chain_produce(&txq->tx_pbl); 2913 memset(third_bd, 0, sizeof(*third_bd)); 2914 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 2915 third_bd->data.bitfields |= 2916 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 2917 nbd++; 2918 2919 if (seg_idx < nsegs) { 2920 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 2921 (segs->ds_addr), (segs->ds_len)); 2922 segs++; 2923 seg_idx++; 2924 } 2925 2926 for (; seg_idx < nsegs; seg_idx++) { 2927 tx_data_bd = (struct eth_tx_bd *) 2928 ecore_chain_produce(&txq->tx_pbl); 2929 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 2930 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 2931 segs->ds_addr,\ 2932 segs->ds_len); 2933 segs++; 2934 nbd++; 2935 } 2936 2937 } else if (offset < segs->ds_len) { 2938 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 2939 2940 second_bd = (struct eth_tx_2nd_bd *) 2941 ecore_chain_produce(&txq->tx_pbl); 2942 memset(second_bd, 0, sizeof(*second_bd)); 2943 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 2944 (segs->ds_addr + offset),\ 2945 (segs->ds_len - offset)); 2946 nbd++; 2947 segs++; 2948 2949 third_bd = (struct eth_tx_3rd_bd *) 2950 ecore_chain_produce(&txq->tx_pbl); 2951 memset(third_bd, 0, sizeof(*third_bd)); 2952 2953 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 2954 segs->ds_addr,\ 2955 segs->ds_len); 2956 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 2957 third_bd->data.bitfields |= 2958 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 2959 segs++; 2960 nbd++; 2961 2962 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) { 2963 tx_data_bd = (struct eth_tx_bd *) 2964 ecore_chain_produce(&txq->tx_pbl); 2965 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 2966 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 2967 segs->ds_addr,\ 2968 segs->ds_len); 2969 segs++; 2970 nbd++; 2971 } 2972 2973 } else { 2974 offset = offset - segs->ds_len; 2975 segs++; 2976 2977 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 2978 2979 if (offset) 2980 nbds_in_hdr++; 2981 2982 tx_data_bd = (struct eth_tx_bd *) 2983 ecore_chain_produce(&txq->tx_pbl); 2984 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 2985 2986 if (second_bd == NULL) { 2987 second_bd = (struct eth_tx_2nd_bd *) 2988 tx_data_bd; 2989 } else if (third_bd == NULL) { 2990 third_bd = (struct eth_tx_3rd_bd *) 2991 tx_data_bd; 2992 } 2993 2994 if (offset && (offset < segs->ds_len)) { 2995 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 2996 segs->ds_addr, offset); 2997 2998 tx_data_bd = (struct eth_tx_bd *) 2999 ecore_chain_produce(&txq->tx_pbl); 3000 3001 memset(tx_data_bd, 0, 3002 sizeof(*tx_data_bd)); 3003 3004 if (second_bd == NULL) { 3005 second_bd = 3006 (struct eth_tx_2nd_bd *)tx_data_bd; 3007 } else if (third_bd == NULL) { 3008 third_bd = 3009 (struct eth_tx_3rd_bd *)tx_data_bd; 3010 } 3011 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3012 (segs->ds_addr + offset), \ 3013 (segs->ds_len - offset)); 3014 nbd++; 3015 offset = 0; 3016 } else { 3017 if (offset) 3018 offset = offset - segs->ds_len; 3019 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3020 segs->ds_addr, segs->ds_len); 3021 } 3022 segs++; 3023 nbd++; 3024 } 3025 3026 if (third_bd == NULL) { 3027 third_bd = (struct eth_tx_3rd_bd *) 3028 ecore_chain_produce(&txq->tx_pbl); 3029 memset(third_bd, 0, sizeof(*third_bd)); 3030 } 3031 3032 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3033 third_bd->data.bitfields |= 3034 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3035 } 3036 } else { 3037 segs++; 3038 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3039 tx_data_bd = (struct eth_tx_bd *) 3040 ecore_chain_produce(&txq->tx_pbl); 3041 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3042 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\ 3043 segs->ds_len); 3044 segs++; 3045 nbd++; 3046 } 3047 first_bd->data.bitfields = 3048 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) 3049 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 3050 first_bd->data.bitfields = 3051 htole16(first_bd->data.bitfields); 3052 } 3053 3054 3055 first_bd->data.nbds = nbd; 3056 3057 #ifdef QLNX_TRACE_TSO_PKT_LEN 3058 3059 if (fp->tx_tso_max_nsegs < nsegs) 3060 fp->tx_tso_max_nsegs = nsegs; 3061 3062 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs)) 3063 fp->tx_tso_min_nsegs = nsegs; 3064 3065 #endif /* #ifdef QLNX_TRACE_TSO_PKT_LEN */ 3066 3067 txq->sw_tx_ring[idx].nsegs = nsegs; 3068 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1); 3069 3070 txq->tx_db.data.bd_prod = 3071 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl)); 3072 3073 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw); 3074 3075 QL_DPRINT8(ha, (ha->pci_dev, "%s: exit\n", __func__)); 3076 return (0); 3077 } 3078 3079 static void 3080 qlnx_stop(qlnx_host_t *ha) 3081 { 3082 struct ifnet *ifp = ha->ifp; 3083 device_t dev; 3084 int i; 3085 3086 dev = ha->pci_dev; 3087 3088 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 3089 3090 /* 3091 * We simply lock and unlock each fp->tx_mtx to 3092 * propagate the if_drv_flags 3093 * state to each tx thread 3094 */ 3095 if (ha->state == QLNX_STATE_OPEN) { 3096 for (i = 0; i < ha->num_rss; i++) { 3097 struct qlnx_fastpath *fp = &ha->fp_array[i]; 3098 3099 mtx_lock(&fp->tx_mtx); 3100 mtx_unlock(&fp->tx_mtx); 3101 3102 if (fp->fp_taskqueue != NULL) 3103 taskqueue_enqueue(fp->fp_taskqueue, 3104 &fp->fp_task); 3105 } 3106 } 3107 3108 qlnx_unload(ha); 3109 3110 return; 3111 } 3112 3113 static int 3114 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha) 3115 { 3116 return(TX_RING_SIZE - 1); 3117 } 3118 3119 uint8_t * 3120 qlnx_get_mac_addr(qlnx_host_t *ha) 3121 { 3122 struct ecore_hwfn *p_hwfn; 3123 3124 p_hwfn = &ha->cdev.hwfns[0]; 3125 return (p_hwfn->hw_info.hw_mac_addr); 3126 } 3127 3128 static uint32_t 3129 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link) 3130 { 3131 uint32_t ifm_type = 0; 3132 3133 switch (if_link->media_type) { 3134 3135 case MEDIA_MODULE_FIBER: 3136 case MEDIA_UNSPECIFIED: 3137 if (if_link->speed == (100 * 1000)) 3138 ifm_type = QLNX_IFM_100G_SR4; 3139 else if (if_link->speed == (40 * 1000)) 3140 ifm_type = IFM_40G_SR4; 3141 else if (if_link->speed == (25 * 1000)) 3142 ifm_type = QLNX_IFM_25G_SR; 3143 break; 3144 3145 case MEDIA_DA_TWINAX: 3146 if (if_link->speed == (100 * 1000)) 3147 ifm_type = QLNX_IFM_100G_CR4; 3148 else if (if_link->speed == (40 * 1000)) 3149 ifm_type = IFM_40G_CR4; 3150 else if (if_link->speed == (25 * 1000)) 3151 ifm_type = QLNX_IFM_25G_CR; 3152 break; 3153 3154 default : 3155 ifm_type = IFM_UNKNOWN; 3156 break; 3157 } 3158 return (ifm_type); 3159 } 3160 3161 3162 3163 /***************************************************************************** 3164 * Interrupt Service Functions 3165 *****************************************************************************/ 3166 3167 static int 3168 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3169 struct mbuf *mp_head, uint16_t len) 3170 { 3171 struct mbuf *mp, *mpf, *mpl; 3172 struct sw_rx_data *sw_rx_data; 3173 struct qlnx_rx_queue *rxq; 3174 uint16_t len_in_buffer; 3175 3176 rxq = fp->rxq; 3177 mpf = mpl = mp = NULL; 3178 3179 while (len) { 3180 3181 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3182 3183 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3184 mp = sw_rx_data->data; 3185 3186 if (mp == NULL) { 3187 QL_DPRINT1(ha, (ha->pci_dev, "%s: mp = NULL\n", 3188 __func__)); 3189 fp->err_rx_mp_null++; 3190 rxq->sw_rx_cons = 3191 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3192 3193 if (mpf != NULL) 3194 m_freem(mpf); 3195 3196 return (-1); 3197 } 3198 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3199 BUS_DMASYNC_POSTREAD); 3200 3201 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3202 3203 QL_DPRINT1(ha, (ha->pci_dev, 3204 "%s: New buffer allocation failed, dropping" 3205 " incoming packet and reusing its buffer\n", 3206 __func__)); 3207 3208 qlnx_reuse_rx_data(rxq); 3209 fp->err_rx_alloc_errors++; 3210 3211 if (mpf != NULL) 3212 m_freem(mpf); 3213 3214 return (-1); 3215 } 3216 ecore_chain_consume(&rxq->rx_bd_ring); 3217 3218 if (len > rxq->rx_buf_size) 3219 len_in_buffer = rxq->rx_buf_size; 3220 else 3221 len_in_buffer = len; 3222 3223 len = len - len_in_buffer; 3224 3225 mp->m_flags &= ~M_PKTHDR; 3226 mp->m_next = NULL; 3227 mp->m_len = len_in_buffer; 3228 3229 if (mpf == NULL) 3230 mpf = mpl = mp; 3231 else { 3232 mpl->m_next = mp; 3233 mpl = mp; 3234 } 3235 } 3236 3237 if (mpf != NULL) 3238 mp_head->m_next = mpf; 3239 3240 return (0); 3241 } 3242 3243 static void 3244 qlnx_tpa_start(qlnx_host_t *ha, 3245 struct qlnx_fastpath *fp, 3246 struct qlnx_rx_queue *rxq, 3247 struct eth_fast_path_rx_tpa_start_cqe *cqe) 3248 { 3249 uint32_t agg_index; 3250 struct ifnet *ifp = ha->ifp; 3251 struct mbuf *mp; 3252 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3253 struct sw_rx_data *sw_rx_data; 3254 dma_addr_t addr; 3255 bus_dmamap_t map; 3256 struct eth_rx_bd *rx_bd; 3257 int i; 3258 device_t dev; 3259 #if __FreeBSD_version >= 1100000 3260 uint8_t hash_type; 3261 #endif /* #if __FreeBSD_version >= 1100000 */ 3262 3263 dev = ha->pci_dev; 3264 agg_index = cqe->tpa_agg_index; 3265 3266 QL_DPRINT7(ha, (dev, "%s[%d]: enter\n " 3267 "\t type = 0x%x\n" 3268 "\t bitfields = 0x%x\n" 3269 "\t seg_len = 0x%x\n" 3270 "\t pars_flags = 0x%x\n" 3271 "\t vlan_tag = 0x%x\n" 3272 "\t rss_hash = 0x%x\n" 3273 "\t len_on_first_bd = 0x%x\n" 3274 "\t placement_offset = 0x%x\n" 3275 "\t tpa_agg_index = 0x%x\n" 3276 "\t header_len = 0x%x\n" 3277 "\t ext_bd_len_list[0] = 0x%x\n" 3278 "\t ext_bd_len_list[1] = 0x%x\n" 3279 "\t ext_bd_len_list[2] = 0x%x\n" 3280 "\t ext_bd_len_list[3] = 0x%x\n" 3281 "\t ext_bd_len_list[4] = 0x%x\n", 3282 __func__, fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len, 3283 cqe->pars_flags.flags, cqe->vlan_tag, 3284 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset, 3285 cqe->tpa_agg_index, cqe->header_len, 3286 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1], 3287 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3], 3288 cqe->ext_bd_len_list[4])); 3289 3290 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3291 fp->err_rx_tpa_invalid_agg_num++; 3292 return; 3293 } 3294 3295 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3296 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); 3297 mp = sw_rx_data->data; 3298 3299 QL_DPRINT7(ha, (dev, "%s[%d]: mp = %p \n ", __func__, fp->rss_id, mp)); 3300 3301 if (mp == NULL) { 3302 QL_DPRINT7(ha, (dev, "%s[%d]: mp = NULL\n", __func__, 3303 fp->rss_id)); 3304 fp->err_rx_mp_null++; 3305 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3306 3307 return; 3308 } 3309 3310 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) { 3311 3312 QL_DPRINT7(ha, (dev, "%s[%d]: CQE in CONS = %u has error," 3313 " flags = %x, dropping incoming packet\n", __func__, 3314 fp->rss_id, rxq->sw_rx_cons, 3315 le16toh(cqe->pars_flags.flags))); 3316 3317 fp->err_rx_hw_errors++; 3318 3319 qlnx_reuse_rx_data(rxq); 3320 3321 QLNX_INC_IERRORS(ifp); 3322 3323 return; 3324 } 3325 3326 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3327 3328 QL_DPRINT7(ha, (dev, "%s[%d]: New buffer allocation failed," 3329 " dropping incoming packet and reusing its buffer\n", 3330 __func__, fp->rss_id)); 3331 3332 fp->err_rx_alloc_errors++; 3333 QLNX_INC_IQDROPS(ifp); 3334 3335 /* 3336 * Load the tpa mbuf into the rx ring and save the 3337 * posted mbuf 3338 */ 3339 3340 map = sw_rx_data->map; 3341 addr = sw_rx_data->dma_addr; 3342 3343 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 3344 3345 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data; 3346 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr; 3347 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map; 3348 3349 rxq->tpa_info[agg_index].rx_buf.data = mp; 3350 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr; 3351 rxq->tpa_info[agg_index].rx_buf.map = map; 3352 3353 rx_bd = (struct eth_rx_bd *) 3354 ecore_chain_produce(&rxq->rx_bd_ring); 3355 3356 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr)); 3357 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr)); 3358 3359 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3360 BUS_DMASYNC_PREREAD); 3361 3362 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 3363 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3364 3365 ecore_chain_consume(&rxq->rx_bd_ring); 3366 3367 /* Now reuse any buffers posted in ext_bd_len_list */ 3368 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 3369 3370 if (cqe->ext_bd_len_list[i] == 0) 3371 break; 3372 3373 qlnx_reuse_rx_data(rxq); 3374 } 3375 3376 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 3377 return; 3378 } 3379 3380 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 3381 3382 QL_DPRINT7(ha, (dev, "%s[%d]: invalid aggregation state," 3383 " dropping incoming packet and reusing its buffer\n", 3384 __func__, fp->rss_id)); 3385 3386 QLNX_INC_IQDROPS(ifp); 3387 3388 /* if we already have mbuf head in aggregation free it */ 3389 if (rxq->tpa_info[agg_index].mpf) { 3390 m_freem(rxq->tpa_info[agg_index].mpf); 3391 rxq->tpa_info[agg_index].mpl = NULL; 3392 } 3393 rxq->tpa_info[agg_index].mpf = mp; 3394 rxq->tpa_info[agg_index].mpl = NULL; 3395 3396 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3397 ecore_chain_consume(&rxq->rx_bd_ring); 3398 3399 /* Now reuse any buffers posted in ext_bd_len_list */ 3400 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 3401 3402 if (cqe->ext_bd_len_list[i] == 0) 3403 break; 3404 3405 qlnx_reuse_rx_data(rxq); 3406 } 3407 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 3408 3409 return; 3410 } 3411 3412 /* 3413 * first process the ext_bd_len_list 3414 * if this fails then we simply drop the packet 3415 */ 3416 ecore_chain_consume(&rxq->rx_bd_ring); 3417 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3418 3419 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 3420 3421 QL_DPRINT7(ha, (dev, "%s[%d]: 4\n ", __func__, fp->rss_id)); 3422 3423 if (cqe->ext_bd_len_list[i] == 0) 3424 break; 3425 3426 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3427 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3428 BUS_DMASYNC_POSTREAD); 3429 3430 mpc = sw_rx_data->data; 3431 3432 if (mpc == NULL) { 3433 QL_DPRINT7(ha, (ha->pci_dev, "%s[%d]: mpc = NULL\n", 3434 __func__, fp->rss_id)); 3435 fp->err_rx_mp_null++; 3436 if (mpf != NULL) 3437 m_freem(mpf); 3438 mpf = mpl = NULL; 3439 rxq->tpa_info[agg_index].agg_state = 3440 QLNX_AGG_STATE_ERROR; 3441 ecore_chain_consume(&rxq->rx_bd_ring); 3442 rxq->sw_rx_cons = 3443 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3444 continue; 3445 } 3446 3447 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3448 QL_DPRINT7(ha, (dev, 3449 "%s[%d]: New buffer allocation failed, dropping" 3450 " incoming packet and reusing its buffer\n", 3451 __func__, fp->rss_id)); 3452 3453 qlnx_reuse_rx_data(rxq); 3454 3455 if (mpf != NULL) 3456 m_freem(mpf); 3457 mpf = mpl = NULL; 3458 3459 rxq->tpa_info[agg_index].agg_state = 3460 QLNX_AGG_STATE_ERROR; 3461 3462 ecore_chain_consume(&rxq->rx_bd_ring); 3463 rxq->sw_rx_cons = 3464 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3465 3466 continue; 3467 } 3468 3469 mpc->m_flags &= ~M_PKTHDR; 3470 mpc->m_next = NULL; 3471 mpc->m_len = cqe->ext_bd_len_list[i]; 3472 3473 3474 if (mpf == NULL) { 3475 mpf = mpl = mpc; 3476 } else { 3477 mpl->m_len = ha->rx_buf_size; 3478 mpl->m_next = mpc; 3479 mpl = mpc; 3480 } 3481 3482 ecore_chain_consume(&rxq->rx_bd_ring); 3483 rxq->sw_rx_cons = 3484 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3485 } 3486 3487 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 3488 3489 QL_DPRINT7(ha, (dev, "%s[%d]: invalid aggregation state," 3490 " dropping incoming packet and reusing its buffer\n", 3491 __func__, fp->rss_id)); 3492 3493 QLNX_INC_IQDROPS(ifp); 3494 3495 rxq->tpa_info[agg_index].mpf = mp; 3496 rxq->tpa_info[agg_index].mpl = NULL; 3497 3498 return; 3499 } 3500 3501 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset; 3502 3503 if (mpf != NULL) { 3504 mp->m_len = ha->rx_buf_size; 3505 mp->m_next = mpf; 3506 rxq->tpa_info[agg_index].mpf = mp; 3507 rxq->tpa_info[agg_index].mpl = mpl; 3508 } else { 3509 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset; 3510 rxq->tpa_info[agg_index].mpf = mp; 3511 rxq->tpa_info[agg_index].mpl = mp; 3512 mp->m_next = NULL; 3513 } 3514 3515 mp->m_flags |= M_PKTHDR; 3516 3517 /* assign packet to this interface interface */ 3518 mp->m_pkthdr.rcvif = ifp; 3519 3520 /* assume no hardware checksum has complated */ 3521 mp->m_pkthdr.csum_flags = 0; 3522 3523 //mp->m_pkthdr.flowid = fp->rss_id; 3524 mp->m_pkthdr.flowid = cqe->rss_hash; 3525 3526 #if __FreeBSD_version >= 1100000 3527 3528 hash_type = cqe->bitfields & 3529 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 3530 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 3531 3532 switch (hash_type) { 3533 3534 case RSS_HASH_TYPE_IPV4: 3535 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 3536 break; 3537 3538 case RSS_HASH_TYPE_TCP_IPV4: 3539 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 3540 break; 3541 3542 case RSS_HASH_TYPE_IPV6: 3543 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 3544 break; 3545 3546 case RSS_HASH_TYPE_TCP_IPV6: 3547 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 3548 break; 3549 3550 default: 3551 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 3552 break; 3553 } 3554 3555 #else 3556 mp->m_flags |= M_FLOWID; 3557 #endif 3558 3559 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | 3560 CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 3561 3562 mp->m_pkthdr.csum_data = 0xFFFF; 3563 3564 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) { 3565 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag); 3566 mp->m_flags |= M_VLANTAG; 3567 } 3568 3569 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START; 3570 3571 QL_DPRINT7(ha, (dev, "%s[%d]: 5\n" "\tagg_state = %d\n" 3572 "\t mpf = %p mpl = %p\n", __func__, fp->rss_id, 3573 rxq->tpa_info[agg_index].agg_state, 3574 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl)); 3575 3576 return; 3577 } 3578 3579 static void 3580 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3581 struct qlnx_rx_queue *rxq, 3582 struct eth_fast_path_rx_tpa_cont_cqe *cqe) 3583 { 3584 struct sw_rx_data *sw_rx_data; 3585 int i; 3586 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3587 struct mbuf *mp; 3588 uint32_t agg_index; 3589 device_t dev; 3590 3591 dev = ha->pci_dev; 3592 3593 QL_DPRINT7(ha, (dev, "%s[%d]: enter\n " 3594 "\t type = 0x%x\n" 3595 "\t tpa_agg_index = 0x%x\n" 3596 "\t len_list[0] = 0x%x\n" 3597 "\t len_list[1] = 0x%x\n" 3598 "\t len_list[2] = 0x%x\n" 3599 "\t len_list[3] = 0x%x\n" 3600 "\t len_list[4] = 0x%x\n" 3601 "\t len_list[5] = 0x%x\n", 3602 __func__, fp->rss_id, cqe->type, cqe->tpa_agg_index, 3603 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 3604 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5])); 3605 3606 agg_index = cqe->tpa_agg_index; 3607 3608 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3609 QL_DPRINT7(ha, (dev, "%s[%d]: 0\n ", __func__, fp->rss_id)); 3610 fp->err_rx_tpa_invalid_agg_num++; 3611 return; 3612 } 3613 3614 3615 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) { 3616 3617 QL_DPRINT7(ha, (dev, "%s[%d]: 1\n ", __func__, fp->rss_id)); 3618 3619 if (cqe->len_list[i] == 0) 3620 break; 3621 3622 if (rxq->tpa_info[agg_index].agg_state != 3623 QLNX_AGG_STATE_START) { 3624 qlnx_reuse_rx_data(rxq); 3625 continue; 3626 } 3627 3628 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3629 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3630 BUS_DMASYNC_POSTREAD); 3631 3632 mpc = sw_rx_data->data; 3633 3634 if (mpc == NULL) { 3635 3636 QL_DPRINT7(ha, (dev, "%s[%d]: mpc = NULL\n", 3637 __func__, fp->rss_id)); 3638 3639 fp->err_rx_mp_null++; 3640 if (mpf != NULL) 3641 m_freem(mpf); 3642 mpf = mpl = NULL; 3643 rxq->tpa_info[agg_index].agg_state = 3644 QLNX_AGG_STATE_ERROR; 3645 ecore_chain_consume(&rxq->rx_bd_ring); 3646 rxq->sw_rx_cons = 3647 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3648 continue; 3649 } 3650 3651 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3652 3653 QL_DPRINT7(ha, (dev, 3654 "%s[%d]: New buffer allocation failed, dropping" 3655 " incoming packet and reusing its buffer\n", 3656 __func__, fp->rss_id)); 3657 3658 qlnx_reuse_rx_data(rxq); 3659 3660 if (mpf != NULL) 3661 m_freem(mpf); 3662 mpf = mpl = NULL; 3663 3664 rxq->tpa_info[agg_index].agg_state = 3665 QLNX_AGG_STATE_ERROR; 3666 3667 ecore_chain_consume(&rxq->rx_bd_ring); 3668 rxq->sw_rx_cons = 3669 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3670 3671 continue; 3672 } 3673 3674 mpc->m_flags &= ~M_PKTHDR; 3675 mpc->m_next = NULL; 3676 mpc->m_len = cqe->len_list[i]; 3677 3678 3679 if (mpf == NULL) { 3680 mpf = mpl = mpc; 3681 } else { 3682 mpl->m_len = ha->rx_buf_size; 3683 mpl->m_next = mpc; 3684 mpl = mpc; 3685 } 3686 3687 ecore_chain_consume(&rxq->rx_bd_ring); 3688 rxq->sw_rx_cons = 3689 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3690 } 3691 3692 QL_DPRINT7(ha, (dev, "%s[%d]: 2\n" "\tmpf = %p mpl = %p\n", 3693 __func__, fp->rss_id, mpf, mpl)); 3694 3695 if (mpf != NULL) { 3696 mp = rxq->tpa_info[agg_index].mpl; 3697 mp->m_len = ha->rx_buf_size; 3698 mp->m_next = mpf; 3699 rxq->tpa_info[agg_index].mpl = mpl; 3700 } 3701 3702 return; 3703 } 3704 3705 static int 3706 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3707 struct qlnx_rx_queue *rxq, 3708 struct eth_fast_path_rx_tpa_end_cqe *cqe) 3709 { 3710 struct sw_rx_data *sw_rx_data; 3711 int i; 3712 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3713 struct mbuf *mp; 3714 uint32_t agg_index; 3715 uint32_t len = 0; 3716 struct ifnet *ifp = ha->ifp; 3717 device_t dev; 3718 3719 dev = ha->pci_dev; 3720 3721 QL_DPRINT7(ha, (dev, "%s[%d]: enter\n " 3722 "\t type = 0x%x\n" 3723 "\t tpa_agg_index = 0x%x\n" 3724 "\t total_packet_len = 0x%x\n" 3725 "\t num_of_bds = 0x%x\n" 3726 "\t end_reason = 0x%x\n" 3727 "\t num_of_coalesced_segs = 0x%x\n" 3728 "\t ts_delta = 0x%x\n" 3729 "\t len_list[0] = 0x%x\n" 3730 "\t len_list[1] = 0x%x\n" 3731 "\t len_list[2] = 0x%x\n" 3732 "\t len_list[3] = 0x%x\n", 3733 __func__, fp->rss_id, cqe->type, cqe->tpa_agg_index, 3734 cqe->total_packet_len, cqe->num_of_bds, 3735 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta, 3736 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 3737 cqe->len_list[3])); 3738 3739 agg_index = cqe->tpa_agg_index; 3740 3741 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3742 3743 QL_DPRINT7(ha, (dev, "%s[%d]: 0\n ", __func__, fp->rss_id)); 3744 3745 fp->err_rx_tpa_invalid_agg_num++; 3746 return (0); 3747 } 3748 3749 3750 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) { 3751 3752 QL_DPRINT7(ha, (dev, "%s[%d]: 1\n ", __func__, fp->rss_id)); 3753 3754 if (cqe->len_list[i] == 0) 3755 break; 3756 3757 if (rxq->tpa_info[agg_index].agg_state != 3758 QLNX_AGG_STATE_START) { 3759 3760 QL_DPRINT7(ha, (dev, "%s[%d]: 2\n ", __func__, 3761 fp->rss_id)); 3762 3763 qlnx_reuse_rx_data(rxq); 3764 continue; 3765 } 3766 3767 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3768 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3769 BUS_DMASYNC_POSTREAD); 3770 3771 mpc = sw_rx_data->data; 3772 3773 if (mpc == NULL) { 3774 3775 QL_DPRINT7(ha, (dev, "%s[%d]: mpc = NULL\n", 3776 __func__, fp->rss_id)); 3777 3778 fp->err_rx_mp_null++; 3779 if (mpf != NULL) 3780 m_freem(mpf); 3781 mpf = mpl = NULL; 3782 rxq->tpa_info[agg_index].agg_state = 3783 QLNX_AGG_STATE_ERROR; 3784 ecore_chain_consume(&rxq->rx_bd_ring); 3785 rxq->sw_rx_cons = 3786 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3787 continue; 3788 } 3789 3790 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3791 QL_DPRINT7(ha, (dev, 3792 "%s[%d]: New buffer allocation failed, dropping" 3793 " incoming packet and reusing its buffer\n", 3794 __func__, fp->rss_id)); 3795 3796 qlnx_reuse_rx_data(rxq); 3797 3798 if (mpf != NULL) 3799 m_freem(mpf); 3800 mpf = mpl = NULL; 3801 3802 rxq->tpa_info[agg_index].agg_state = 3803 QLNX_AGG_STATE_ERROR; 3804 3805 ecore_chain_consume(&rxq->rx_bd_ring); 3806 rxq->sw_rx_cons = 3807 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3808 3809 continue; 3810 } 3811 3812 mpc->m_flags &= ~M_PKTHDR; 3813 mpc->m_next = NULL; 3814 mpc->m_len = cqe->len_list[i]; 3815 3816 3817 if (mpf == NULL) { 3818 mpf = mpl = mpc; 3819 } else { 3820 mpl->m_len = ha->rx_buf_size; 3821 mpl->m_next = mpc; 3822 mpl = mpc; 3823 } 3824 3825 ecore_chain_consume(&rxq->rx_bd_ring); 3826 rxq->sw_rx_cons = 3827 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3828 } 3829 3830 QL_DPRINT7(ha, (dev, "%s[%d]: 5\n ", __func__, fp->rss_id)); 3831 3832 if (mpf != NULL) { 3833 3834 QL_DPRINT7(ha, (dev, "%s[%d]: 6\n ", __func__, fp->rss_id)); 3835 3836 mp = rxq->tpa_info[agg_index].mpl; 3837 mp->m_len = ha->rx_buf_size; 3838 mp->m_next = mpf; 3839 } 3840 3841 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { 3842 3843 QL_DPRINT7(ha, (dev, "%s[%d]: 7\n ", __func__, fp->rss_id)); 3844 3845 if (rxq->tpa_info[agg_index].mpf != NULL) 3846 m_freem(rxq->tpa_info[agg_index].mpf); 3847 rxq->tpa_info[agg_index].mpf = NULL; 3848 rxq->tpa_info[agg_index].mpl = NULL; 3849 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 3850 return (0); 3851 } 3852 3853 mp = rxq->tpa_info[agg_index].mpf; 3854 m_adj(mp, rxq->tpa_info[agg_index].placement_offset); 3855 mp->m_pkthdr.len = cqe->total_packet_len; 3856 3857 if (mp->m_next == NULL) 3858 mp->m_len = mp->m_pkthdr.len; 3859 else { 3860 /* compute the total packet length */ 3861 mpf = mp; 3862 while (mpf != NULL) { 3863 len += mpf->m_len; 3864 mpf = mpf->m_next; 3865 } 3866 3867 if (cqe->total_packet_len > len) { 3868 mpl = rxq->tpa_info[agg_index].mpl; 3869 mpl->m_len += (cqe->total_packet_len - len); 3870 } 3871 } 3872 3873 QLNX_INC_IPACKETS(ifp); 3874 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len)); 3875 3876 QL_DPRINT7(ha, (dev, "%s[%d]: 8 csum_data = 0x%x csum_flags = 0x%lx\n " 3877 "m_len = 0x%x m_pkthdr_len = 0x%x\n", 3878 __func__, fp->rss_id, mp->m_pkthdr.csum_data, 3879 mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len)); 3880 3881 (*ifp->if_input)(ifp, mp); 3882 3883 rxq->tpa_info[agg_index].mpf = NULL; 3884 rxq->tpa_info[agg_index].mpl = NULL; 3885 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 3886 3887 return (cqe->num_of_coalesced_segs); 3888 } 3889 3890 static int 3891 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 3892 int lro_enable) 3893 { 3894 uint16_t hw_comp_cons, sw_comp_cons; 3895 int rx_pkt = 0; 3896 struct qlnx_rx_queue *rxq = fp->rxq; 3897 struct ifnet *ifp = ha->ifp; 3898 struct ecore_dev *cdev = &ha->cdev; 3899 struct ecore_hwfn *p_hwfn; 3900 3901 #ifdef QLNX_SOFT_LRO 3902 struct lro_ctrl *lro; 3903 3904 lro = &rxq->lro; 3905 #endif /* #ifdef QLNX_SOFT_LRO */ 3906 3907 hw_comp_cons = le16toh(*rxq->hw_cons_ptr); 3908 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 3909 3910 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)]; 3911 3912 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 3913 * / BD in the while-loop before reading hw_comp_cons. If the CQE is 3914 * read before it is written by FW, then FW writes CQE and SB, and then 3915 * the CPU reads the hw_comp_cons, it will use an old CQE. 3916 */ 3917 3918 /* Loop to complete all indicated BDs */ 3919 while (sw_comp_cons != hw_comp_cons) { 3920 union eth_rx_cqe *cqe; 3921 struct eth_fast_path_rx_reg_cqe *fp_cqe; 3922 struct sw_rx_data *sw_rx_data; 3923 register struct mbuf *mp; 3924 enum eth_rx_cqe_type cqe_type; 3925 uint16_t len, pad, len_on_first_bd; 3926 uint8_t *data; 3927 #if __FreeBSD_version >= 1100000 3928 uint8_t hash_type; 3929 #endif /* #if __FreeBSD_version >= 1100000 */ 3930 3931 /* Get the CQE from the completion ring */ 3932 cqe = (union eth_rx_cqe *) 3933 ecore_chain_consume(&rxq->rx_comp_ring); 3934 cqe_type = cqe->fast_path_regular.type; 3935 3936 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { 3937 QL_DPRINT3(ha, (ha->pci_dev, "Got a slowath CQE\n")); 3938 3939 ecore_eth_cqe_completion(p_hwfn, 3940 (struct eth_slow_path_rx_cqe *)cqe); 3941 goto next_cqe; 3942 } 3943 3944 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { 3945 3946 switch (cqe_type) { 3947 3948 case ETH_RX_CQE_TYPE_TPA_START: 3949 qlnx_tpa_start(ha, fp, rxq, 3950 &cqe->fast_path_tpa_start); 3951 fp->tpa_start++; 3952 break; 3953 3954 case ETH_RX_CQE_TYPE_TPA_CONT: 3955 qlnx_tpa_cont(ha, fp, rxq, 3956 &cqe->fast_path_tpa_cont); 3957 fp->tpa_cont++; 3958 break; 3959 3960 case ETH_RX_CQE_TYPE_TPA_END: 3961 rx_pkt += qlnx_tpa_end(ha, fp, rxq, 3962 &cqe->fast_path_tpa_end); 3963 fp->tpa_end++; 3964 break; 3965 3966 default: 3967 break; 3968 } 3969 3970 goto next_cqe; 3971 } 3972 3973 /* Get the data from the SW ring */ 3974 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3975 mp = sw_rx_data->data; 3976 3977 if (mp == NULL) { 3978 QL_DPRINT1(ha, (ha->pci_dev, "%s: mp = NULL\n", 3979 __func__)); 3980 fp->err_rx_mp_null++; 3981 rxq->sw_rx_cons = 3982 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3983 goto next_cqe; 3984 } 3985 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3986 BUS_DMASYNC_POSTREAD); 3987 3988 /* non GRO */ 3989 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */ 3990 len = le16toh(fp_cqe->pkt_len); 3991 pad = fp_cqe->placement_offset; 3992 3993 QL_DPRINT3(ha, 3994 (ha->pci_dev, "CQE type = %x, flags = %x, vlan = %x," 3995 " len %u, parsing flags = %d pad = %d\n", 3996 cqe_type, fp_cqe->bitfields, 3997 le16toh(fp_cqe->vlan_tag), 3998 len, le16toh(fp_cqe->pars_flags.flags), pad)); 3999 4000 data = mtod(mp, uint8_t *); 4001 data = data + pad; 4002 4003 if (0) 4004 qlnx_dump_buf8(ha, __func__, data, len); 4005 4006 /* For every Rx BD consumed, we allocate a new BD so the BD ring 4007 * is always with a fixed size. If allocation fails, we take the 4008 * consumed BD and return it to the ring in the PROD position. 4009 * The packet that was received on that BD will be dropped (and 4010 * not passed to the upper stack). 4011 */ 4012 /* If this is an error packet then drop it */ 4013 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) & 4014 CQE_FLAGS_ERR) { 4015 4016 QL_DPRINT1(ha, (ha->pci_dev, 4017 "CQE in CONS = %u has error, flags = %x," 4018 " dropping incoming packet\n", sw_comp_cons, 4019 le16toh(cqe->fast_path_regular.pars_flags.flags))); 4020 4021 fp->err_rx_hw_errors++; 4022 4023 qlnx_reuse_rx_data(rxq); 4024 4025 QLNX_INC_IERRORS(ifp); 4026 4027 goto next_cqe; 4028 } 4029 4030 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4031 4032 QL_DPRINT1(ha, (ha->pci_dev, 4033 "New buffer allocation failed, dropping" 4034 " incoming packet and reusing its buffer\n")); 4035 4036 qlnx_reuse_rx_data(rxq); 4037 4038 fp->err_rx_alloc_errors++; 4039 4040 QLNX_INC_IQDROPS(ifp); 4041 4042 goto next_cqe; 4043 } 4044 4045 ecore_chain_consume(&rxq->rx_bd_ring); 4046 4047 len_on_first_bd = fp_cqe->len_on_first_bd; 4048 m_adj(mp, pad); 4049 mp->m_pkthdr.len = len; 4050 4051 QL_DPRINT1(ha, 4052 (ha->pci_dev, "%s: len = %d len_on_first_bd = %d\n", 4053 __func__, len, len_on_first_bd)); 4054 4055 if ((len > 60 ) && (len > len_on_first_bd)) { 4056 4057 mp->m_len = len_on_first_bd; 4058 4059 if (qlnx_rx_jumbo_chain(ha, fp, mp, 4060 (len - len_on_first_bd)) != 0) { 4061 4062 m_freem(mp); 4063 4064 QLNX_INC_IQDROPS(ifp); 4065 4066 goto next_cqe; 4067 } 4068 4069 } else if (len_on_first_bd < len) { 4070 fp->err_rx_jumbo_chain_pkts++; 4071 } else { 4072 mp->m_len = len; 4073 } 4074 4075 mp->m_flags |= M_PKTHDR; 4076 4077 /* assign packet to this interface interface */ 4078 mp->m_pkthdr.rcvif = ifp; 4079 4080 /* assume no hardware checksum has complated */ 4081 mp->m_pkthdr.csum_flags = 0; 4082 4083 mp->m_pkthdr.flowid = fp_cqe->rss_hash; 4084 4085 #if __FreeBSD_version >= 1100000 4086 4087 hash_type = fp_cqe->bitfields & 4088 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4089 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4090 4091 switch (hash_type) { 4092 4093 case RSS_HASH_TYPE_IPV4: 4094 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4095 break; 4096 4097 case RSS_HASH_TYPE_TCP_IPV4: 4098 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4099 break; 4100 4101 case RSS_HASH_TYPE_IPV6: 4102 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4103 break; 4104 4105 case RSS_HASH_TYPE_TCP_IPV6: 4106 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4107 break; 4108 4109 default: 4110 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4111 break; 4112 } 4113 4114 #else 4115 mp->m_flags |= M_FLOWID; 4116 #endif 4117 4118 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) { 4119 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4120 } 4121 4122 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) { 4123 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4124 } 4125 4126 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) { 4127 mp->m_pkthdr.csum_data = 0xFFFF; 4128 mp->m_pkthdr.csum_flags |= 4129 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4130 } 4131 4132 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) { 4133 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag); 4134 mp->m_flags |= M_VLANTAG; 4135 } 4136 4137 QLNX_INC_IPACKETS(ifp); 4138 QLNX_INC_IBYTES(ifp, len); 4139 4140 #ifdef QLNX_SOFT_LRO 4141 4142 if (lro_enable) { 4143 4144 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 4145 4146 tcp_lro_queue_mbuf(lro, mp); 4147 4148 #else 4149 4150 if (tcp_lro_rx(lro, mp, 0)) 4151 (*ifp->if_input)(ifp, mp); 4152 4153 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 4154 4155 } else { 4156 (*ifp->if_input)(ifp, mp); 4157 } 4158 #else 4159 4160 (*ifp->if_input)(ifp, mp); 4161 4162 #endif /* #ifdef QLNX_SOFT_LRO */ 4163 4164 rx_pkt++; 4165 4166 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4167 4168 next_cqe: /* don't consume bd rx buffer */ 4169 ecore_chain_recycle_consumed(&rxq->rx_comp_ring); 4170 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4171 4172 /* CR TPA - revisit how to handle budget in TPA perhaps 4173 increase on "end" */ 4174 if (rx_pkt == budget) 4175 break; 4176 } /* repeat while sw_comp_cons != hw_comp_cons... */ 4177 4178 /* Update producers */ 4179 qlnx_update_rx_prod(p_hwfn, rxq); 4180 4181 return rx_pkt; 4182 } 4183 4184 /* 4185 * fast path interrupt 4186 */ 4187 4188 static void 4189 qlnx_fp_isr(void *arg) 4190 { 4191 qlnx_ivec_t *ivec = arg; 4192 qlnx_host_t *ha; 4193 struct qlnx_fastpath *fp = NULL; 4194 int idx, lro_enable, tc; 4195 int rx_int = 0, total_rx_count = 0; 4196 4197 ha = ivec->ha; 4198 lro_enable = ha->ifp->if_capenable & IFCAP_LRO; 4199 4200 if (ha->state != QLNX_STATE_OPEN) { 4201 return; 4202 } 4203 4204 idx = ivec->rss_idx; 4205 4206 if ((idx = ivec->rss_idx) >= ha->num_rss) { 4207 QL_DPRINT1(ha, (ha->pci_dev, "%s: illegal interrupt[%d]\n", 4208 __func__, idx)); 4209 ha->err_illegal_intr++; 4210 return; 4211 } 4212 fp = &ha->fp_array[idx]; 4213 4214 if (fp == NULL) { 4215 QL_DPRINT1(ha, (ha->pci_dev, "%s: fp_array[%d] NULL\n", 4216 __func__, idx)); 4217 ha->err_fp_null++; 4218 } else { 4219 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); 4220 4221 do { 4222 for (tc = 0; tc < ha->num_tc; tc++) { 4223 if (mtx_trylock(&fp->tx_mtx)) { 4224 qlnx_tx_int(ha, fp, fp->txq[tc]); 4225 mtx_unlock(&fp->tx_mtx); 4226 } 4227 } 4228 4229 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, 4230 lro_enable); 4231 4232 if (rx_int) { 4233 fp->rx_pkts += rx_int; 4234 total_rx_count += rx_int; 4235 } 4236 4237 } while (rx_int); 4238 4239 4240 #ifdef QLNX_SOFT_LRO 4241 { 4242 struct lro_ctrl *lro; 4243 4244 lro = &fp->rxq->lro; 4245 4246 if (lro_enable && total_rx_count) { 4247 4248 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 4249 4250 #ifdef QLNX_TRACE_LRO_CNT 4251 if (lro->lro_mbuf_count & ~1023) 4252 fp->lro_cnt_1024++; 4253 else if (lro->lro_mbuf_count & ~511) 4254 fp->lro_cnt_512++; 4255 else if (lro->lro_mbuf_count & ~255) 4256 fp->lro_cnt_256++; 4257 else if (lro->lro_mbuf_count & ~127) 4258 fp->lro_cnt_128++; 4259 else if (lro->lro_mbuf_count & ~63) 4260 fp->lro_cnt_64++; 4261 #endif /* #ifdef QLNX_TRACE_LRO_CNT */ 4262 4263 tcp_lro_flush_all(lro); 4264 4265 #else 4266 struct lro_entry *queued; 4267 4268 while ((!SLIST_EMPTY(&lro->lro_active))) { 4269 queued = SLIST_FIRST(&lro->lro_active); 4270 SLIST_REMOVE_HEAD(&lro->lro_active, \ 4271 next); 4272 tcp_lro_flush(lro, queued); 4273 } 4274 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 4275 } 4276 } 4277 #endif /* #ifdef QLNX_SOFT_LRO */ 4278 4279 if (fp->fp_taskqueue != NULL) 4280 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 4281 4282 ecore_sb_update_sb_idx(fp->sb_info); 4283 rmb(); 4284 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 4285 4286 return; 4287 } 4288 4289 return; 4290 } 4291 4292 4293 /* 4294 * slow path interrupt processing function 4295 * can be invoked in polled mode or in interrupt mode via taskqueue. 4296 */ 4297 void 4298 qlnx_sp_isr(void *arg) 4299 { 4300 struct ecore_hwfn *p_hwfn; 4301 qlnx_host_t *ha; 4302 4303 p_hwfn = arg; 4304 4305 ha = (qlnx_host_t *)p_hwfn->p_dev; 4306 4307 ha->sp_interrupts++; 4308 4309 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 4310 4311 ecore_int_sp_dpc(p_hwfn); 4312 4313 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 4314 4315 return; 4316 } 4317 4318 /***************************************************************************** 4319 * Support Functions for DMA'able Memory 4320 *****************************************************************************/ 4321 4322 static void 4323 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 4324 { 4325 *((bus_addr_t *)arg) = 0; 4326 4327 if (error) { 4328 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 4329 return; 4330 } 4331 4332 *((bus_addr_t *)arg) = segs[0].ds_addr; 4333 4334 return; 4335 } 4336 4337 static int 4338 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 4339 { 4340 int ret = 0; 4341 device_t dev; 4342 bus_addr_t b_addr; 4343 4344 dev = ha->pci_dev; 4345 4346 ret = bus_dma_tag_create( 4347 ha->parent_tag,/* parent */ 4348 dma_buf->alignment, 4349 ((bus_size_t)(1ULL << 32)),/* boundary */ 4350 BUS_SPACE_MAXADDR, /* lowaddr */ 4351 BUS_SPACE_MAXADDR, /* highaddr */ 4352 NULL, NULL, /* filter, filterarg */ 4353 dma_buf->size, /* maxsize */ 4354 1, /* nsegments */ 4355 dma_buf->size, /* maxsegsize */ 4356 0, /* flags */ 4357 NULL, NULL, /* lockfunc, lockarg */ 4358 &dma_buf->dma_tag); 4359 4360 if (ret) { 4361 QL_DPRINT1(ha, 4362 (dev, "%s: could not create dma tag\n", __func__)); 4363 goto qlnx_alloc_dmabuf_exit; 4364 } 4365 ret = bus_dmamem_alloc(dma_buf->dma_tag, 4366 (void **)&dma_buf->dma_b, 4367 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 4368 &dma_buf->dma_map); 4369 if (ret) { 4370 bus_dma_tag_destroy(dma_buf->dma_tag); 4371 QL_DPRINT1(ha, 4372 (dev, "%s: bus_dmamem_alloc failed\n", __func__)); 4373 goto qlnx_alloc_dmabuf_exit; 4374 } 4375 4376 ret = bus_dmamap_load(dma_buf->dma_tag, 4377 dma_buf->dma_map, 4378 dma_buf->dma_b, 4379 dma_buf->size, 4380 qlnx_dmamap_callback, 4381 &b_addr, BUS_DMA_NOWAIT); 4382 4383 if (ret || !b_addr) { 4384 bus_dma_tag_destroy(dma_buf->dma_tag); 4385 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 4386 dma_buf->dma_map); 4387 ret = -1; 4388 goto qlnx_alloc_dmabuf_exit; 4389 } 4390 4391 dma_buf->dma_addr = b_addr; 4392 4393 qlnx_alloc_dmabuf_exit: 4394 4395 return ret; 4396 } 4397 4398 static void 4399 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 4400 { 4401 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 4402 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 4403 bus_dma_tag_destroy(dma_buf->dma_tag); 4404 return; 4405 } 4406 4407 void * 4408 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size) 4409 { 4410 qlnx_dma_t dma_buf; 4411 qlnx_dma_t *dma_p; 4412 qlnx_host_t *ha; 4413 device_t dev; 4414 4415 ha = (qlnx_host_t *)ecore_dev; 4416 dev = ha->pci_dev; 4417 4418 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4419 4420 memset(&dma_buf, 0, sizeof (qlnx_dma_t)); 4421 4422 dma_buf.size = size + PAGE_SIZE; 4423 dma_buf.alignment = 8; 4424 4425 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0) 4426 return (NULL); 4427 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size); 4428 4429 *phys = dma_buf.dma_addr; 4430 4431 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size); 4432 4433 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t)); 4434 4435 QL_DPRINT5(ha, (dev, "%s: [%p %p %p %p 0x%08x ]\n", __func__, 4436 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag, 4437 dma_buf.dma_b, (void *)dma_buf.dma_addr, size)); 4438 4439 return (dma_buf.dma_b); 4440 } 4441 4442 void 4443 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys, 4444 uint32_t size) 4445 { 4446 qlnx_dma_t dma_buf, *dma_p; 4447 qlnx_host_t *ha; 4448 device_t dev; 4449 4450 ha = (qlnx_host_t *)ecore_dev; 4451 dev = ha->pci_dev; 4452 4453 if (v_addr == NULL) 4454 return; 4455 4456 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4457 4458 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size); 4459 4460 QL_DPRINT5(ha, (dev, "%s: [%p %p %p %p 0x%08x ]\n", __func__, 4461 (void *)dma_p->dma_map, (void *)dma_p->dma_tag, 4462 dma_p->dma_b, (void *)dma_p->dma_addr, size)); 4463 4464 dma_buf = *dma_p; 4465 4466 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf); 4467 return; 4468 } 4469 4470 static int 4471 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha) 4472 { 4473 int ret; 4474 device_t dev; 4475 4476 dev = ha->pci_dev; 4477 4478 /* 4479 * Allocate parent DMA Tag 4480 */ 4481 ret = bus_dma_tag_create( 4482 bus_get_dma_tag(dev), /* parent */ 4483 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 4484 BUS_SPACE_MAXADDR, /* lowaddr */ 4485 BUS_SPACE_MAXADDR, /* highaddr */ 4486 NULL, NULL, /* filter, filterarg */ 4487 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 4488 0, /* nsegments */ 4489 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 4490 0, /* flags */ 4491 NULL, NULL, /* lockfunc, lockarg */ 4492 &ha->parent_tag); 4493 4494 if (ret) { 4495 QL_DPRINT1(ha, (dev, "%s: could not create parent dma tag\n", 4496 __func__)); 4497 return (-1); 4498 } 4499 4500 ha->flags.parent_tag = 1; 4501 4502 return (0); 4503 } 4504 4505 static void 4506 qlnx_free_parent_dma_tag(qlnx_host_t *ha) 4507 { 4508 if (ha->parent_tag != NULL) { 4509 bus_dma_tag_destroy(ha->parent_tag); 4510 ha->parent_tag = NULL; 4511 } 4512 return; 4513 } 4514 4515 static int 4516 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha) 4517 { 4518 if (bus_dma_tag_create(NULL, /* parent */ 4519 1, 0, /* alignment, bounds */ 4520 BUS_SPACE_MAXADDR, /* lowaddr */ 4521 BUS_SPACE_MAXADDR, /* highaddr */ 4522 NULL, NULL, /* filter, filterarg */ 4523 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */ 4524 QLNX_MAX_SEGMENTS, /* nsegments */ 4525 (PAGE_SIZE * 4), /* maxsegsize */ 4526 BUS_DMA_ALLOCNOW, /* flags */ 4527 NULL, /* lockfunc */ 4528 NULL, /* lockfuncarg */ 4529 &ha->tx_tag)) { 4530 4531 QL_DPRINT1(ha, (ha->pci_dev, "%s: tx_tag alloc failed\n", 4532 __func__)); 4533 return (-1); 4534 } 4535 4536 return (0); 4537 } 4538 4539 static void 4540 qlnx_free_tx_dma_tag(qlnx_host_t *ha) 4541 { 4542 if (ha->tx_tag != NULL) { 4543 bus_dma_tag_destroy(ha->tx_tag); 4544 ha->tx_tag = NULL; 4545 } 4546 return; 4547 } 4548 4549 static int 4550 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha) 4551 { 4552 if (bus_dma_tag_create(NULL, /* parent */ 4553 1, 0, /* alignment, bounds */ 4554 BUS_SPACE_MAXADDR, /* lowaddr */ 4555 BUS_SPACE_MAXADDR, /* highaddr */ 4556 NULL, NULL, /* filter, filterarg */ 4557 MJUM9BYTES, /* maxsize */ 4558 1, /* nsegments */ 4559 MJUM9BYTES, /* maxsegsize */ 4560 BUS_DMA_ALLOCNOW, /* flags */ 4561 NULL, /* lockfunc */ 4562 NULL, /* lockfuncarg */ 4563 &ha->rx_tag)) { 4564 4565 QL_DPRINT1(ha, (ha->pci_dev, "%s: rx_tag alloc failed\n", 4566 __func__)); 4567 4568 return (-1); 4569 } 4570 return (0); 4571 } 4572 4573 static void 4574 qlnx_free_rx_dma_tag(qlnx_host_t *ha) 4575 { 4576 if (ha->rx_tag != NULL) { 4577 bus_dma_tag_destroy(ha->rx_tag); 4578 ha->rx_tag = NULL; 4579 } 4580 return; 4581 } 4582 4583 /********************************* 4584 * Exported functions 4585 *********************************/ 4586 uint32_t 4587 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id) 4588 { 4589 uint32_t bar_size; 4590 4591 bar_id = bar_id * 2; 4592 4593 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev, 4594 SYS_RES_MEMORY, 4595 PCIR_BAR(bar_id)); 4596 4597 return (bar_size); 4598 } 4599 4600 uint32_t 4601 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value) 4602 { 4603 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4604 pci_reg, 1); 4605 return 0; 4606 } 4607 4608 uint32_t 4609 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg, 4610 uint16_t *reg_value) 4611 { 4612 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4613 pci_reg, 2); 4614 return 0; 4615 } 4616 4617 uint32_t 4618 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg, 4619 uint32_t *reg_value) 4620 { 4621 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4622 pci_reg, 4); 4623 return 0; 4624 } 4625 4626 void 4627 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value) 4628 { 4629 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4630 pci_reg, reg_value, 1); 4631 return; 4632 } 4633 4634 void 4635 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg, 4636 uint16_t reg_value) 4637 { 4638 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4639 pci_reg, reg_value, 2); 4640 return; 4641 } 4642 4643 void 4644 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg, 4645 uint32_t reg_value) 4646 { 4647 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4648 pci_reg, reg_value, 4); 4649 return; 4650 } 4651 4652 4653 int 4654 qlnx_pci_find_capability(void *ecore_dev, int cap) 4655 { 4656 int reg; 4657 4658 if (pci_find_cap(((qlnx_host_t *)ecore_dev)->pci_dev, PCIY_EXPRESS, 4659 ®) == 0) 4660 return reg; 4661 else { 4662 QL_DPRINT1(((qlnx_host_t *)ecore_dev), 4663 (((qlnx_host_t *)ecore_dev)->pci_dev, 4664 "%s: failed\n", __func__)); 4665 return 0; 4666 } 4667 } 4668 4669 uint32_t 4670 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr) 4671 { 4672 uint32_t data32; 4673 struct ecore_dev *cdev; 4674 struct ecore_hwfn *p_hwfn; 4675 4676 p_hwfn = hwfn; 4677 4678 cdev = p_hwfn->p_dev; 4679 4680 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) - 4681 (uint8_t *)(cdev->regview)) + reg_addr; 4682 4683 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr); 4684 4685 return (data32); 4686 } 4687 4688 void 4689 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 4690 { 4691 struct ecore_dev *cdev; 4692 struct ecore_hwfn *p_hwfn; 4693 4694 p_hwfn = hwfn; 4695 4696 cdev = p_hwfn->p_dev; 4697 4698 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) - 4699 (uint8_t *)(cdev->regview)) + reg_addr; 4700 4701 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value); 4702 4703 return; 4704 } 4705 4706 void 4707 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value) 4708 { 4709 struct ecore_dev *cdev; 4710 struct ecore_hwfn *p_hwfn; 4711 4712 p_hwfn = hwfn; 4713 4714 cdev = p_hwfn->p_dev; 4715 4716 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) - 4717 (uint8_t *)(cdev->regview)) + reg_addr; 4718 4719 bus_write_2(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value); 4720 4721 return; 4722 } 4723 4724 void 4725 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 4726 { 4727 struct ecore_dev *cdev; 4728 struct ecore_hwfn *p_hwfn; 4729 4730 p_hwfn = hwfn; 4731 4732 cdev = p_hwfn->p_dev; 4733 4734 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->doorbells) - 4735 (uint8_t *)(cdev->doorbells)) + reg_addr; 4736 4737 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, reg_addr, value); 4738 4739 return; 4740 } 4741 4742 uint32_t 4743 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr) 4744 { 4745 uint32_t data32; 4746 uint32_t offset; 4747 struct ecore_dev *cdev; 4748 4749 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 4750 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 4751 4752 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset); 4753 4754 return (data32); 4755 } 4756 4757 void 4758 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value) 4759 { 4760 uint32_t offset; 4761 struct ecore_dev *cdev; 4762 4763 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 4764 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 4765 4766 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value); 4767 4768 return; 4769 } 4770 4771 void * 4772 qlnx_zalloc(uint32_t size) 4773 { 4774 caddr_t va; 4775 4776 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT); 4777 bzero(va, size); 4778 return ((void *)va); 4779 } 4780 4781 void 4782 qlnx_barrier(void *p_hwfn) 4783 { 4784 qlnx_host_t *ha; 4785 4786 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 4787 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE); 4788 } 4789 4790 void 4791 qlnx_link_update(void *p_hwfn) 4792 { 4793 qlnx_host_t *ha; 4794 int prev_link_state; 4795 4796 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 4797 4798 qlnx_fill_link(p_hwfn, &ha->if_link); 4799 4800 prev_link_state = ha->link_up; 4801 ha->link_up = ha->if_link.link_up; 4802 4803 if (prev_link_state != ha->link_up) { 4804 if (ha->link_up) { 4805 if_link_state_change(ha->ifp, LINK_STATE_UP); 4806 } else { 4807 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 4808 } 4809 } 4810 return; 4811 } 4812 4813 void 4814 qlnx_fill_link(struct ecore_hwfn *hwfn, struct qlnx_link_output *if_link) 4815 { 4816 struct ecore_mcp_link_params link_params; 4817 struct ecore_mcp_link_state link_state; 4818 4819 memset(if_link, 0, sizeof(*if_link)); 4820 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params)); 4821 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state)); 4822 4823 /* Prepare source inputs */ 4824 /* we only deal with physical functions */ 4825 memcpy(&link_params, ecore_mcp_get_link_params(hwfn), 4826 sizeof(link_params)); 4827 memcpy(&link_state, ecore_mcp_get_link_state(hwfn), 4828 sizeof(link_state)); 4829 4830 ecore_mcp_get_media_type(hwfn->p_dev, &if_link->media_type); 4831 4832 /* Set the link parameters to pass to protocol driver */ 4833 if (link_state.link_up) { 4834 if_link->link_up = true; 4835 if_link->speed = link_state.speed; 4836 } 4837 4838 if_link->supported_caps = QLNX_LINK_CAP_FIBRE; 4839 4840 if (link_params.speed.autoneg) 4841 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg; 4842 4843 if (link_params.pause.autoneg || 4844 (link_params.pause.forced_rx && link_params.pause.forced_tx)) 4845 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause; 4846 4847 if (link_params.pause.autoneg || link_params.pause.forced_rx || 4848 link_params.pause.forced_tx) 4849 if_link->supported_caps |= QLNX_LINK_CAP_Pause; 4850 4851 if (link_params.speed.advertised_speeds & 4852 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 4853 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half | 4854 QLNX_LINK_CAP_1000baseT_Full; 4855 4856 if (link_params.speed.advertised_speeds & 4857 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 4858 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full; 4859 4860 if (link_params.speed.advertised_speeds & 4861 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 4862 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full; 4863 4864 if (link_params.speed.advertised_speeds & 4865 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) 4866 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 4867 4868 if (link_params.speed.advertised_speeds & 4869 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 4870 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 4871 4872 if (link_params.speed.advertised_speeds & 4873 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 4874 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 4875 4876 if_link->advertised_caps = if_link->supported_caps; 4877 4878 if_link->autoneg = link_params.speed.autoneg; 4879 if_link->duplex = QLNX_LINK_DUPLEX; 4880 4881 /* Link partner capabilities */ 4882 4883 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD) 4884 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half; 4885 4886 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD) 4887 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full; 4888 4889 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G) 4890 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full; 4891 4892 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G) 4893 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full; 4894 4895 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G) 4896 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 4897 4898 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G) 4899 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 4900 4901 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G) 4902 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 4903 4904 if (link_state.an_complete) 4905 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg; 4906 4907 if (link_state.partner_adv_pause) 4908 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause; 4909 4910 if ((link_state.partner_adv_pause == 4911 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) || 4912 (link_state.partner_adv_pause == 4913 ECORE_LINK_PARTNER_BOTH_PAUSE)) 4914 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause; 4915 4916 return; 4917 } 4918 4919 static int 4920 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params) 4921 { 4922 int rc, i; 4923 4924 for (i = 0; i < cdev->num_hwfns; i++) { 4925 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 4926 p_hwfn->pf_params = *func_params; 4927 } 4928 4929 rc = ecore_resc_alloc(cdev); 4930 if (rc) 4931 goto qlnx_nic_setup_exit; 4932 4933 ecore_resc_setup(cdev); 4934 4935 qlnx_nic_setup_exit: 4936 4937 return rc; 4938 } 4939 4940 static int 4941 qlnx_nic_start(struct ecore_dev *cdev) 4942 { 4943 int rc; 4944 struct ecore_hw_init_params params; 4945 4946 bzero(¶ms, sizeof (struct ecore_hw_init_params)); 4947 4948 params.p_tunn = NULL; 4949 params.b_hw_start = true; 4950 params.int_mode = cdev->int_mode; 4951 params.allow_npar_tx_switch = true; 4952 params.bin_fw_data = NULL; 4953 4954 rc = ecore_hw_init(cdev, ¶ms); 4955 if (rc) { 4956 ecore_resc_free(cdev); 4957 return rc; 4958 } 4959 4960 return 0; 4961 } 4962 4963 static int 4964 qlnx_slowpath_start(qlnx_host_t *ha) 4965 { 4966 struct ecore_dev *cdev; 4967 struct ecore_pf_params pf_params; 4968 int rc; 4969 4970 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 4971 pf_params.eth_pf_params.num_cons = 4972 (ha->num_rss) * (ha->num_tc + 1); 4973 4974 cdev = &ha->cdev; 4975 4976 rc = qlnx_nic_setup(cdev, &pf_params); 4977 if (rc) 4978 goto qlnx_slowpath_start_exit; 4979 4980 cdev->int_mode = ECORE_INT_MODE_MSIX; 4981 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; 4982 4983 #ifdef QLNX_MAX_COALESCE 4984 cdev->rx_coalesce_usecs = 255; 4985 cdev->tx_coalesce_usecs = 255; 4986 #endif 4987 4988 rc = qlnx_nic_start(cdev); 4989 4990 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs; 4991 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs; 4992 4993 qlnx_slowpath_start_exit: 4994 4995 return (rc); 4996 } 4997 4998 static int 4999 qlnx_slowpath_stop(qlnx_host_t *ha) 5000 { 5001 struct ecore_dev *cdev; 5002 device_t dev = ha->pci_dev; 5003 int i; 5004 5005 cdev = &ha->cdev; 5006 5007 ecore_hw_stop(cdev); 5008 5009 for (i = 0; i < ha->cdev.num_hwfns; i++) { 5010 5011 if (ha->sp_handle[i]) 5012 (void)bus_teardown_intr(dev, ha->sp_irq[i], 5013 ha->sp_handle[i]); 5014 5015 ha->sp_handle[i] = NULL; 5016 5017 if (ha->sp_irq[i]) 5018 (void) bus_release_resource(dev, SYS_RES_IRQ, 5019 ha->sp_irq_rid[i], ha->sp_irq[i]); 5020 ha->sp_irq[i] = NULL; 5021 } 5022 5023 ecore_resc_free(cdev); 5024 5025 return 0; 5026 } 5027 5028 static void 5029 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 5030 char ver_str[VER_SIZE]) 5031 { 5032 int i; 5033 5034 memcpy(cdev->name, name, NAME_SIZE); 5035 5036 for_each_hwfn(cdev, i) { 5037 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 5038 } 5039 5040 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD; 5041 5042 return ; 5043 } 5044 5045 void 5046 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats) 5047 { 5048 enum ecore_mcp_protocol_type type; 5049 union ecore_mcp_protocol_stats *stats; 5050 struct ecore_eth_stats eth_stats; 5051 device_t dev; 5052 5053 dev = ((qlnx_host_t *)cdev)->pci_dev; 5054 stats = proto_stats; 5055 type = proto_type; 5056 5057 switch (type) { 5058 case ECORE_MCP_LAN_STATS: 5059 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats); 5060 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; 5061 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts; 5062 stats->lan_stats.fcs_err = -1; 5063 break; 5064 5065 default: 5066 ((qlnx_host_t *)cdev)->err_get_proto_invalid_type++; 5067 5068 QL_DPRINT1(((qlnx_host_t *)cdev), 5069 (dev, "%s: invalid protocol type 0x%x\n", __func__, 5070 type)); 5071 break; 5072 } 5073 return; 5074 } 5075 5076 static int 5077 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver) 5078 { 5079 struct ecore_hwfn *p_hwfn; 5080 struct ecore_ptt *p_ptt; 5081 5082 p_hwfn = &ha->cdev.hwfns[0]; 5083 p_ptt = ecore_ptt_acquire(p_hwfn); 5084 5085 if (p_ptt == NULL) { 5086 QL_DPRINT1(ha, (ha->pci_dev, 5087 "%s : ecore_ptt_acquire failed\n", __func__)); 5088 return (-1); 5089 } 5090 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL); 5091 5092 ecore_ptt_release(p_hwfn, p_ptt); 5093 5094 return (0); 5095 } 5096 5097 static int 5098 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size) 5099 { 5100 struct ecore_hwfn *p_hwfn; 5101 struct ecore_ptt *p_ptt; 5102 5103 p_hwfn = &ha->cdev.hwfns[0]; 5104 p_ptt = ecore_ptt_acquire(p_hwfn); 5105 5106 if (p_ptt == NULL) { 5107 QL_DPRINT1(ha, (ha->pci_dev, 5108 "%s : ecore_ptt_acquire failed\n", __func__)); 5109 return (-1); 5110 } 5111 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size); 5112 5113 ecore_ptt_release(p_hwfn, p_ptt); 5114 5115 return (0); 5116 } 5117 5118 static int 5119 qlnx_alloc_mem_arrays(qlnx_host_t *ha) 5120 { 5121 struct ecore_dev *cdev; 5122 5123 cdev = &ha->cdev; 5124 5125 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS)); 5126 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS)); 5127 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS)); 5128 5129 return 0; 5130 } 5131 5132 static void 5133 qlnx_init_fp(qlnx_host_t *ha) 5134 { 5135 int rss_id, txq_array_index, tc; 5136 5137 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 5138 5139 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 5140 5141 fp->rss_id = rss_id; 5142 fp->edev = ha; 5143 fp->sb_info = &ha->sb_array[rss_id]; 5144 fp->rxq = &ha->rxq_array[rss_id]; 5145 fp->rxq->rxq_id = rss_id; 5146 5147 for (tc = 0; tc < ha->num_tc; tc++) { 5148 txq_array_index = tc * ha->num_rss + rss_id; 5149 fp->txq[tc] = &ha->txq_array[txq_array_index]; 5150 fp->txq[tc]->index = txq_array_index; 5151 } 5152 5153 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str, 5154 rss_id); 5155 5156 /* reset all the statistics counters */ 5157 5158 fp->tx_pkts_processed = 0; 5159 fp->tx_pkts_freed = 0; 5160 fp->tx_pkts_transmitted = 0; 5161 fp->tx_pkts_completed = 0; 5162 fp->tx_lso_wnd_min_len = 0; 5163 fp->tx_defrag = 0; 5164 fp->tx_nsegs_gt_elem_left = 0; 5165 fp->tx_tso_max_nsegs = 0; 5166 fp->tx_tso_min_nsegs = 0; 5167 fp->err_tx_nsegs_gt_elem_left = 0; 5168 fp->err_tx_dmamap_create = 0; 5169 fp->err_tx_defrag_dmamap_load = 0; 5170 fp->err_tx_non_tso_max_seg = 0; 5171 fp->err_tx_dmamap_load = 0; 5172 fp->err_tx_defrag = 0; 5173 fp->err_tx_free_pkt_null = 0; 5174 fp->err_tx_cons_idx_conflict = 0; 5175 5176 fp->rx_pkts = 0; 5177 fp->err_m_getcl = 0; 5178 fp->err_m_getjcl = 0; 5179 } 5180 return; 5181 } 5182 5183 static void 5184 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info) 5185 { 5186 struct ecore_dev *cdev; 5187 5188 cdev = &ha->cdev; 5189 5190 if (sb_info->sb_virt) { 5191 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt), 5192 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt))); 5193 sb_info->sb_virt = NULL; 5194 } 5195 } 5196 5197 static int 5198 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info, 5199 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id) 5200 { 5201 struct ecore_hwfn *p_hwfn; 5202 int hwfn_index, rc; 5203 u16 rel_sb_id; 5204 5205 hwfn_index = sb_id % cdev->num_hwfns; 5206 p_hwfn = &cdev->hwfns[hwfn_index]; 5207 rel_sb_id = sb_id / cdev->num_hwfns; 5208 5209 QL_DPRINT2(((qlnx_host_t *)cdev), (((qlnx_host_t *)cdev)->pci_dev, 5210 "%s: hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x " 5211 "sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n", 5212 __func__, hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info, 5213 sb_virt_addr, (void *)sb_phy_addr)); 5214 5215 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, 5216 sb_virt_addr, sb_phy_addr, rel_sb_id); 5217 5218 return rc; 5219 } 5220 5221 /* This function allocates fast-path status block memory */ 5222 static int 5223 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id) 5224 { 5225 struct status_block *sb_virt; 5226 bus_addr_t sb_phys; 5227 int rc; 5228 uint32_t size; 5229 struct ecore_dev *cdev; 5230 5231 cdev = &ha->cdev; 5232 5233 size = sizeof(*sb_virt); 5234 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size); 5235 5236 if (!sb_virt) { 5237 QL_DPRINT1(ha, (ha->pci_dev, 5238 "%s: Status block allocation failed\n", __func__)); 5239 return -ENOMEM; 5240 } 5241 5242 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id); 5243 if (rc) { 5244 QL_DPRINT1(ha, (ha->pci_dev, "%s: failed\n", __func__)); 5245 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size); 5246 } 5247 5248 return rc; 5249 } 5250 5251 static void 5252 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5253 { 5254 int i; 5255 struct sw_rx_data *rx_buf; 5256 5257 for (i = 0; i < rxq->num_rx_buffers; i++) { 5258 5259 rx_buf = &rxq->sw_rx_ring[i]; 5260 5261 if (rx_buf->data != NULL) { 5262 if (rx_buf->map != NULL) { 5263 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 5264 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 5265 rx_buf->map = NULL; 5266 } 5267 m_freem(rx_buf->data); 5268 rx_buf->data = NULL; 5269 } 5270 } 5271 return; 5272 } 5273 5274 static void 5275 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5276 { 5277 struct ecore_dev *cdev; 5278 int i; 5279 5280 cdev = &ha->cdev; 5281 5282 qlnx_free_rx_buffers(ha, rxq); 5283 5284 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 5285 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]); 5286 if (rxq->tpa_info[i].mpf != NULL) 5287 m_freem(rxq->tpa_info[i].mpf); 5288 } 5289 5290 bzero((void *)&rxq->sw_rx_ring[0], 5291 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 5292 5293 /* Free the real RQ ring used by FW */ 5294 if (rxq->rx_bd_ring.p_virt_addr) { 5295 ecore_chain_free(cdev, &rxq->rx_bd_ring); 5296 rxq->rx_bd_ring.p_virt_addr = NULL; 5297 } 5298 5299 /* Free the real completion ring used by FW */ 5300 if (rxq->rx_comp_ring.p_virt_addr && 5301 rxq->rx_comp_ring.pbl_sp.p_virt_table) { 5302 ecore_chain_free(cdev, &rxq->rx_comp_ring); 5303 rxq->rx_comp_ring.p_virt_addr = NULL; 5304 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL; 5305 } 5306 5307 #ifdef QLNX_SOFT_LRO 5308 { 5309 struct lro_ctrl *lro; 5310 5311 lro = &rxq->lro; 5312 tcp_lro_free(lro); 5313 } 5314 #endif /* #ifdef QLNX_SOFT_LRO */ 5315 5316 return; 5317 } 5318 5319 static int 5320 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5321 { 5322 register struct mbuf *mp; 5323 uint16_t rx_buf_size; 5324 struct sw_rx_data *sw_rx_data; 5325 struct eth_rx_bd *rx_bd; 5326 dma_addr_t dma_addr; 5327 bus_dmamap_t map; 5328 bus_dma_segment_t segs[1]; 5329 int nsegs; 5330 int ret; 5331 struct ecore_dev *cdev; 5332 5333 cdev = &ha->cdev; 5334 5335 rx_buf_size = rxq->rx_buf_size; 5336 5337 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 5338 5339 if (mp == NULL) { 5340 QL_DPRINT1(ha, (ha->pci_dev, 5341 "%s : Failed to allocate Rx data\n", __func__)); 5342 return -ENOMEM; 5343 } 5344 5345 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 5346 5347 map = (bus_dmamap_t)0; 5348 5349 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 5350 BUS_DMA_NOWAIT); 5351 dma_addr = segs[0].ds_addr; 5352 5353 if (ret || !dma_addr || (nsegs != 1)) { 5354 m_freem(mp); 5355 QL_DPRINT1(ha, (ha->pci_dev, 5356 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 5357 __func__, ret, (long long unsigned int)dma_addr, 5358 nsegs)); 5359 return -ENOMEM; 5360 } 5361 5362 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 5363 sw_rx_data->data = mp; 5364 sw_rx_data->dma_addr = dma_addr; 5365 sw_rx_data->map = map; 5366 5367 /* Advance PROD and get BD pointer */ 5368 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); 5369 rx_bd->addr.hi = htole32(U64_HI(dma_addr)); 5370 rx_bd->addr.lo = htole32(U64_LO(dma_addr)); 5371 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 5372 5373 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 5374 5375 return 0; 5376 } 5377 5378 static int 5379 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 5380 struct qlnx_agg_info *tpa) 5381 { 5382 struct mbuf *mp; 5383 dma_addr_t dma_addr; 5384 bus_dmamap_t map; 5385 bus_dma_segment_t segs[1]; 5386 int nsegs; 5387 int ret; 5388 struct sw_rx_data *rx_buf; 5389 5390 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 5391 5392 if (mp == NULL) { 5393 QL_DPRINT1(ha, (ha->pci_dev, 5394 "%s : Failed to allocate Rx data\n", __func__)); 5395 return -ENOMEM; 5396 } 5397 5398 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 5399 5400 map = (bus_dmamap_t)0; 5401 5402 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 5403 BUS_DMA_NOWAIT); 5404 dma_addr = segs[0].ds_addr; 5405 5406 if (ret || !dma_addr || (nsegs != 1)) { 5407 m_freem(mp); 5408 QL_DPRINT1(ha, (ha->pci_dev, 5409 "%s: bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 5410 __func__, ret, (long long unsigned int)dma_addr, 5411 nsegs)); 5412 return -ENOMEM; 5413 } 5414 5415 rx_buf = &tpa->rx_buf; 5416 5417 memset(rx_buf, 0, sizeof (struct sw_rx_data)); 5418 5419 rx_buf->data = mp; 5420 rx_buf->dma_addr = dma_addr; 5421 rx_buf->map = map; 5422 5423 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 5424 5425 return (0); 5426 } 5427 5428 static void 5429 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa) 5430 { 5431 struct sw_rx_data *rx_buf; 5432 5433 rx_buf = &tpa->rx_buf; 5434 5435 if (rx_buf->data != NULL) { 5436 if (rx_buf->map != NULL) { 5437 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 5438 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 5439 rx_buf->map = NULL; 5440 } 5441 m_freem(rx_buf->data); 5442 rx_buf->data = NULL; 5443 } 5444 return; 5445 } 5446 5447 /* This function allocates all memory needed per Rx queue */ 5448 static int 5449 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5450 { 5451 int i, rc, num_allocated; 5452 struct ifnet *ifp; 5453 struct ecore_dev *cdev; 5454 5455 cdev = &ha->cdev; 5456 ifp = ha->ifp; 5457 5458 rxq->num_rx_buffers = RX_RING_SIZE; 5459 5460 rxq->rx_buf_size = ha->rx_buf_size; 5461 5462 /* Allocate the parallel driver ring for Rx buffers */ 5463 bzero((void *)&rxq->sw_rx_ring[0], 5464 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 5465 5466 /* Allocate FW Rx ring */ 5467 5468 rc = ecore_chain_alloc(cdev, 5469 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 5470 ECORE_CHAIN_MODE_NEXT_PTR, 5471 ECORE_CHAIN_CNT_TYPE_U16, 5472 RX_RING_SIZE, 5473 sizeof(struct eth_rx_bd), 5474 &rxq->rx_bd_ring, NULL); 5475 5476 if (rc) 5477 goto err; 5478 5479 /* Allocate FW completion ring */ 5480 rc = ecore_chain_alloc(cdev, 5481 ECORE_CHAIN_USE_TO_CONSUME, 5482 ECORE_CHAIN_MODE_PBL, 5483 ECORE_CHAIN_CNT_TYPE_U16, 5484 RX_RING_SIZE, 5485 sizeof(union eth_rx_cqe), 5486 &rxq->rx_comp_ring, NULL); 5487 5488 if (rc) 5489 goto err; 5490 5491 /* Allocate buffers for the Rx ring */ 5492 5493 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 5494 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size, 5495 &rxq->tpa_info[i]); 5496 if (rc) 5497 break; 5498 5499 } 5500 5501 for (i = 0; i < rxq->num_rx_buffers; i++) { 5502 rc = qlnx_alloc_rx_buffer(ha, rxq); 5503 if (rc) 5504 break; 5505 } 5506 num_allocated = i; 5507 if (!num_allocated) { 5508 QL_DPRINT1(ha, (ha->pci_dev, 5509 "%s: Rx buffers allocation failed\n", __func__)); 5510 goto err; 5511 } else if (num_allocated < rxq->num_rx_buffers) { 5512 QL_DPRINT1(ha, (ha->pci_dev, 5513 "%s: Allocated less buffers than" 5514 " desired (%d allocated)\n", __func__, num_allocated)); 5515 } 5516 5517 #ifdef QLNX_SOFT_LRO 5518 5519 { 5520 struct lro_ctrl *lro; 5521 5522 lro = &rxq->lro; 5523 5524 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 5525 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) { 5526 QL_DPRINT1(ha, (ha->pci_dev, 5527 "%s: tcp_lro_init[%d] failed\n", 5528 __func__, rxq->rxq_id)); 5529 goto err; 5530 } 5531 #else 5532 if (tcp_lro_init(lro)) { 5533 QL_DPRINT1(ha, (ha->pci_dev, 5534 "%s: tcp_lro_init[%d] failed\n", 5535 __func__, rxq->rxq_id)); 5536 goto err; 5537 } 5538 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 5539 5540 lro->ifp = ha->ifp; 5541 } 5542 #endif /* #ifdef QLNX_SOFT_LRO */ 5543 return 0; 5544 5545 err: 5546 qlnx_free_mem_rxq(ha, rxq); 5547 return -ENOMEM; 5548 } 5549 5550 5551 static void 5552 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 5553 struct qlnx_tx_queue *txq) 5554 { 5555 struct ecore_dev *cdev; 5556 5557 cdev = &ha->cdev; 5558 5559 bzero((void *)&txq->sw_tx_ring[0], 5560 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 5561 5562 /* Free the real RQ ring used by FW */ 5563 if (txq->tx_pbl.p_virt_addr) { 5564 ecore_chain_free(cdev, &txq->tx_pbl); 5565 txq->tx_pbl.p_virt_addr = NULL; 5566 } 5567 return; 5568 } 5569 5570 /* This function allocates all memory needed per Tx queue */ 5571 static int 5572 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 5573 struct qlnx_tx_queue *txq) 5574 { 5575 int ret = ECORE_SUCCESS; 5576 union eth_tx_bd_types *p_virt; 5577 struct ecore_dev *cdev; 5578 5579 cdev = &ha->cdev; 5580 5581 bzero((void *)&txq->sw_tx_ring[0], 5582 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 5583 5584 /* Allocate the real Tx ring to be used by FW */ 5585 ret = ecore_chain_alloc(cdev, 5586 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 5587 ECORE_CHAIN_MODE_PBL, 5588 ECORE_CHAIN_CNT_TYPE_U16, 5589 TX_RING_SIZE, 5590 sizeof(*p_virt), 5591 &txq->tx_pbl, NULL); 5592 5593 if (ret != ECORE_SUCCESS) { 5594 goto err; 5595 } 5596 5597 txq->num_tx_buffers = TX_RING_SIZE; 5598 5599 return 0; 5600 5601 err: 5602 qlnx_free_mem_txq(ha, fp, txq); 5603 return -ENOMEM; 5604 } 5605 5606 static void 5607 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5608 { 5609 struct mbuf *mp; 5610 struct ifnet *ifp = ha->ifp; 5611 5612 if (mtx_initialized(&fp->tx_mtx)) { 5613 5614 if (fp->tx_br != NULL) { 5615 5616 mtx_lock(&fp->tx_mtx); 5617 5618 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 5619 fp->tx_pkts_freed++; 5620 m_freem(mp); 5621 } 5622 5623 mtx_unlock(&fp->tx_mtx); 5624 5625 buf_ring_free(fp->tx_br, M_DEVBUF); 5626 fp->tx_br = NULL; 5627 } 5628 mtx_destroy(&fp->tx_mtx); 5629 } 5630 return; 5631 } 5632 5633 static void 5634 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5635 { 5636 int tc; 5637 5638 qlnx_free_mem_sb(ha, fp->sb_info); 5639 5640 qlnx_free_mem_rxq(ha, fp->rxq); 5641 5642 for (tc = 0; tc < ha->num_tc; tc++) 5643 qlnx_free_mem_txq(ha, fp, fp->txq[tc]); 5644 5645 return; 5646 } 5647 5648 static int 5649 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5650 { 5651 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 5652 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id); 5653 5654 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 5655 5656 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF, 5657 M_NOWAIT, &fp->tx_mtx); 5658 if (fp->tx_br == NULL) { 5659 QL_DPRINT1(ha, (ha->pci_dev, "buf_ring_alloc failed for " 5660 " fp[%d, %d]\n", ha->dev_unit, fp->rss_id)); 5661 return -ENOMEM; 5662 } 5663 return 0; 5664 } 5665 5666 static int 5667 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5668 { 5669 int rc, tc; 5670 5671 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id); 5672 if (rc) 5673 goto err; 5674 5675 if (ha->rx_jumbo_buf_eq_mtu) { 5676 if (ha->max_frame_size <= MCLBYTES) 5677 ha->rx_buf_size = MCLBYTES; 5678 else if (ha->max_frame_size <= MJUMPAGESIZE) 5679 ha->rx_buf_size = MJUMPAGESIZE; 5680 else if (ha->max_frame_size <= MJUM9BYTES) 5681 ha->rx_buf_size = MJUM9BYTES; 5682 else if (ha->max_frame_size <= MJUM16BYTES) 5683 ha->rx_buf_size = MJUM16BYTES; 5684 } else { 5685 if (ha->max_frame_size <= MCLBYTES) 5686 ha->rx_buf_size = MCLBYTES; 5687 else 5688 ha->rx_buf_size = MJUMPAGESIZE; 5689 } 5690 5691 rc = qlnx_alloc_mem_rxq(ha, fp->rxq); 5692 if (rc) 5693 goto err; 5694 5695 for (tc = 0; tc < ha->num_tc; tc++) { 5696 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]); 5697 if (rc) 5698 goto err; 5699 } 5700 5701 return 0; 5702 5703 err: 5704 qlnx_free_mem_fp(ha, fp); 5705 return -ENOMEM; 5706 } 5707 5708 static void 5709 qlnx_free_mem_load(qlnx_host_t *ha) 5710 { 5711 int i; 5712 struct ecore_dev *cdev; 5713 5714 cdev = &ha->cdev; 5715 5716 for (i = 0; i < ha->num_rss; i++) { 5717 struct qlnx_fastpath *fp = &ha->fp_array[i]; 5718 5719 qlnx_free_mem_fp(ha, fp); 5720 } 5721 return; 5722 } 5723 5724 static int 5725 qlnx_alloc_mem_load(qlnx_host_t *ha) 5726 { 5727 int rc = 0, rss_id; 5728 5729 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 5730 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 5731 5732 rc = qlnx_alloc_mem_fp(ha, fp); 5733 if (rc) 5734 break; 5735 } 5736 return (rc); 5737 } 5738 5739 static int 5740 qlnx_start_vport(struct ecore_dev *cdev, 5741 u8 vport_id, 5742 u16 mtu, 5743 u8 drop_ttl0_flg, 5744 u8 inner_vlan_removal_en_flg, 5745 u8 tx_switching, 5746 u8 hw_lro_enable) 5747 { 5748 int rc, i; 5749 struct ecore_sp_vport_start_params vport_start_params = { 0 }; 5750 qlnx_host_t *ha; 5751 5752 ha = (qlnx_host_t *)cdev; 5753 5754 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg; 5755 vport_start_params.tx_switching = 0; 5756 vport_start_params.handle_ptp_pkts = 0; 5757 vport_start_params.only_untagged = 0; 5758 vport_start_params.drop_ttl0 = drop_ttl0_flg; 5759 5760 vport_start_params.tpa_mode = 5761 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE); 5762 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 5763 5764 vport_start_params.vport_id = vport_id; 5765 vport_start_params.mtu = mtu; 5766 5767 5768 QL_DPRINT2(ha, (ha->pci_dev, "%s: setting mtu to %d\n", __func__, mtu)); 5769 5770 for_each_hwfn(cdev, i) { 5771 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 5772 5773 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid; 5774 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 5775 5776 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params); 5777 5778 if (rc) { 5779 QL_DPRINT1(ha, (ha->pci_dev, 5780 "%s: Failed to start VPORT V-PORT %d " 5781 "with MTU %d\n", __func__, vport_id, mtu)); 5782 return -ENOMEM; 5783 } 5784 5785 ecore_hw_start_fastpath(p_hwfn); 5786 5787 QL_DPRINT2(ha, (ha->pci_dev, 5788 "%s: Started V-PORT %d with MTU %d\n", 5789 __func__, vport_id, mtu)); 5790 } 5791 return 0; 5792 } 5793 5794 5795 static int 5796 qlnx_update_vport(struct ecore_dev *cdev, 5797 struct qlnx_update_vport_params *params) 5798 { 5799 struct ecore_sp_vport_update_params sp_params; 5800 int rc, i, j, fp_index; 5801 struct ecore_hwfn *p_hwfn; 5802 struct ecore_rss_params *rss; 5803 qlnx_host_t *ha = (qlnx_host_t *)cdev; 5804 struct qlnx_fastpath *fp; 5805 5806 memset(&sp_params, 0, sizeof(sp_params)); 5807 /* Translate protocol params into sp params */ 5808 sp_params.vport_id = params->vport_id; 5809 5810 sp_params.update_vport_active_rx_flg = 5811 params->update_vport_active_rx_flg; 5812 sp_params.vport_active_rx_flg = params->vport_active_rx_flg; 5813 5814 sp_params.update_vport_active_tx_flg = 5815 params->update_vport_active_tx_flg; 5816 sp_params.vport_active_tx_flg = params->vport_active_tx_flg; 5817 5818 sp_params.update_inner_vlan_removal_flg = 5819 params->update_inner_vlan_removal_flg; 5820 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; 5821 5822 sp_params.sge_tpa_params = params->sge_tpa_params; 5823 5824 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 5825 * We need to re-fix the rss values per engine for CMT. 5826 */ 5827 5828 sp_params.rss_params = params->rss_params; 5829 5830 for_each_hwfn(cdev, i) { 5831 5832 p_hwfn = &cdev->hwfns[i]; 5833 5834 if ((cdev->num_hwfns > 1) && 5835 params->rss_params->update_rss_config && 5836 params->rss_params->rss_enable) { 5837 5838 rss = params->rss_params; 5839 5840 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) { 5841 5842 fp_index = ((cdev->num_hwfns * j) + i) % 5843 ha->num_rss; 5844 5845 fp = &ha->fp_array[fp_index]; 5846 rss->rss_ind_table[i] = fp->rxq->handle; 5847 } 5848 5849 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) { 5850 QL_DPRINT3(ha, (ha->pci_dev, 5851 "%p %p %p %p %p %p %p %p \n", 5852 rss->rss_ind_table[j], 5853 rss->rss_ind_table[j+1], 5854 rss->rss_ind_table[j+2], 5855 rss->rss_ind_table[j+3], 5856 rss->rss_ind_table[j+4], 5857 rss->rss_ind_table[j+5], 5858 rss->rss_ind_table[j+6], 5859 rss->rss_ind_table[j+7])); 5860 j += 8; 5861 } 5862 } 5863 5864 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 5865 rc = ecore_sp_vport_update(p_hwfn, &sp_params, 5866 ECORE_SPQ_MODE_EBLOCK, NULL); 5867 if (rc) { 5868 QL_DPRINT1(ha, (ha->pci_dev, 5869 "%s:Failed to update VPORT\n", __func__)); 5870 return rc; 5871 } 5872 5873 QL_DPRINT2(ha, (ha->pci_dev, 5874 "%s: Updated V-PORT %d: tx_active_flag %d," 5875 "rx_active_flag %d [tx_update %d], [rx_update %d]\n", 5876 __func__, 5877 params->vport_id, params->vport_active_tx_flg, 5878 params->vport_active_rx_flg, 5879 params->update_vport_active_tx_flg, 5880 params->update_vport_active_rx_flg)); 5881 } 5882 5883 return 0; 5884 } 5885 5886 static void 5887 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq) 5888 { 5889 struct eth_rx_bd *rx_bd_cons = 5890 ecore_chain_consume(&rxq->rx_bd_ring); 5891 struct eth_rx_bd *rx_bd_prod = 5892 ecore_chain_produce(&rxq->rx_bd_ring); 5893 struct sw_rx_data *sw_rx_data_cons = 5894 &rxq->sw_rx_ring[rxq->sw_rx_cons]; 5895 struct sw_rx_data *sw_rx_data_prod = 5896 &rxq->sw_rx_ring[rxq->sw_rx_prod]; 5897 5898 sw_rx_data_prod->data = sw_rx_data_cons->data; 5899 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); 5900 5901 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 5902 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 5903 5904 return; 5905 } 5906 5907 static void 5908 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq) 5909 { 5910 5911 uint16_t bd_prod; 5912 uint16_t cqe_prod; 5913 struct eth_rx_prod_data rx_prods = {0}; 5914 5915 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); 5916 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); 5917 5918 /* Update producers */ 5919 rx_prods.bd_prod = htole16(bd_prod); 5920 rx_prods.cqe_prod = htole16(cqe_prod); 5921 5922 /* Make sure that the BD and SGE data is updated before updating the 5923 * producers since FW might read the BD/SGE right after the producer 5924 * is updated. 5925 */ 5926 wmb(); 5927 //bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ); 5928 //bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ); 5929 5930 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr, 5931 sizeof(rx_prods), (u32 *)&rx_prods); 5932 5933 /* mmiowb is needed to synchronize doorbell writes from more than one 5934 * processor. It guarantees that the write arrives to the device before 5935 * the napi lock is released and another qlnx_poll is called (possibly 5936 * on another CPU). Without this barrier, the next doorbell can bypass 5937 * this doorbell. This is applicable to IA64/Altix systems. 5938 */ 5939 wmb(); 5940 5941 return; 5942 } 5943 5944 static uint32_t qlnx_hash_key[] = { 5945 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda), 5946 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2), 5947 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d), 5948 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0), 5949 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb), 5950 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4), 5951 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3), 5952 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c), 5953 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b), 5954 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)}; 5955 5956 static int 5957 qlnx_start_queues(qlnx_host_t *ha) 5958 { 5959 int rc, tc, i, vport_id = 0, 5960 drop_ttl0_flg = 1, vlan_removal_en = 1, 5961 tx_switching = 0, hw_lro_enable = 0; 5962 struct ecore_dev *cdev = &ha->cdev; 5963 struct ecore_rss_params *rss_params = &ha->rss_params; 5964 struct qlnx_update_vport_params vport_update_params; 5965 struct ifnet *ifp; 5966 struct ecore_hwfn *p_hwfn; 5967 struct ecore_sge_tpa_params tpa_params; 5968 struct ecore_queue_start_common_params qparams; 5969 struct qlnx_fastpath *fp; 5970 5971 ifp = ha->ifp; 5972 5973 if (!ha->num_rss) { 5974 QL_DPRINT1(ha, (ha->pci_dev, 5975 "%s: Cannot update V-VPORT as active as there" 5976 " are no Rx queues\n", __func__)); 5977 return -EINVAL; 5978 } 5979 5980 #ifndef QLNX_SOFT_LRO 5981 hw_lro_enable = ifp->if_capenable & IFCAP_LRO; 5982 #endif /* #ifndef QLNX_SOFT_LRO */ 5983 5984 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg, 5985 vlan_removal_en, tx_switching, hw_lro_enable); 5986 5987 if (rc) { 5988 QL_DPRINT1(ha, (ha->pci_dev, 5989 "%s: Start V-PORT failed %d\n", __func__, rc)); 5990 return rc; 5991 } 5992 5993 QL_DPRINT2(ha, (ha->pci_dev, 5994 "%s: Start vport ramrod passed," 5995 " vport_id = %d, MTU = %d, vlan_removal_en = %d\n", __func__, 5996 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en)); 5997 5998 for_each_rss(i) { 5999 struct ecore_rxq_start_ret_params rx_ret_params; 6000 struct ecore_txq_start_ret_params tx_ret_params; 6001 6002 fp = &ha->fp_array[i]; 6003 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)]; 6004 6005 bzero(&qparams, sizeof(struct ecore_queue_start_common_params)); 6006 bzero(&rx_ret_params, 6007 sizeof (struct ecore_rxq_start_ret_params)); 6008 6009 qparams.queue_id = i ; 6010 qparams.vport_id = vport_id; 6011 qparams.stats_id = vport_id; 6012 qparams.p_sb = fp->sb_info; 6013 qparams.sb_idx = RX_PI; 6014 6015 6016 rc = ecore_eth_rx_queue_start(p_hwfn, 6017 p_hwfn->hw_info.opaque_fid, 6018 &qparams, 6019 fp->rxq->rx_buf_size, /* bd_max_bytes */ 6020 /* bd_chain_phys_addr */ 6021 fp->rxq->rx_bd_ring.p_phys_addr, 6022 /* cqe_pbl_addr */ 6023 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring), 6024 /* cqe_pbl_size */ 6025 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring), 6026 &rx_ret_params); 6027 6028 if (rc) { 6029 QL_DPRINT1(ha, (ha->pci_dev, 6030 "%s: Start RXQ #%d failed %d\n", __func__, 6031 i, rc)); 6032 return rc; 6033 } 6034 6035 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod; 6036 fp->rxq->handle = rx_ret_params.p_handle; 6037 fp->rxq->hw_cons_ptr = 6038 &fp->sb_info->sb_virt->pi_array[RX_PI]; 6039 6040 qlnx_update_rx_prod(p_hwfn, fp->rxq); 6041 6042 for (tc = 0; tc < ha->num_tc; tc++) { 6043 struct qlnx_tx_queue *txq = fp->txq[tc]; 6044 6045 bzero(&qparams, 6046 sizeof(struct ecore_queue_start_common_params)); 6047 bzero(&tx_ret_params, 6048 sizeof (struct ecore_txq_start_ret_params)); 6049 6050 qparams.queue_id = txq->index / cdev->num_hwfns ; 6051 qparams.vport_id = vport_id; 6052 qparams.stats_id = vport_id; 6053 qparams.p_sb = fp->sb_info; 6054 qparams.sb_idx = TX_PI(tc); 6055 6056 rc = ecore_eth_tx_queue_start(p_hwfn, 6057 p_hwfn->hw_info.opaque_fid, 6058 &qparams, tc, 6059 /* bd_chain_phys_addr */ 6060 ecore_chain_get_pbl_phys(&txq->tx_pbl), 6061 ecore_chain_get_page_cnt(&txq->tx_pbl), 6062 &tx_ret_params); 6063 6064 if (rc) { 6065 QL_DPRINT1(ha, (ha->pci_dev, 6066 "%s: Start TXQ #%d failed %d\n", 6067 __func__, txq->index, rc)); 6068 return rc; 6069 } 6070 6071 txq->doorbell_addr = tx_ret_params.p_doorbell; 6072 txq->handle = tx_ret_params.p_handle; 6073 6074 txq->hw_cons_ptr = 6075 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; 6076 SET_FIELD(txq->tx_db.data.params, 6077 ETH_DB_DATA_DEST, DB_DEST_XCM); 6078 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, 6079 DB_AGG_CMD_SET); 6080 SET_FIELD(txq->tx_db.data.params, 6081 ETH_DB_DATA_AGG_VAL_SEL, 6082 DQ_XCM_ETH_TX_BD_PROD_CMD); 6083 6084 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 6085 } 6086 } 6087 6088 /* Fill struct with RSS params */ 6089 if (ha->num_rss > 1) { 6090 6091 rss_params->update_rss_config = 1; 6092 rss_params->rss_enable = 1; 6093 rss_params->update_rss_capabilities = 1; 6094 rss_params->update_rss_ind_table = 1; 6095 rss_params->update_rss_key = 1; 6096 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | 6097 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; 6098 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */ 6099 6100 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 6101 fp = &ha->fp_array[(i % ha->num_rss)]; 6102 rss_params->rss_ind_table[i] = fp->rxq->handle; 6103 } 6104 6105 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 6106 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i]; 6107 6108 } else { 6109 memset(rss_params, 0, sizeof(*rss_params)); 6110 } 6111 6112 6113 /* Prepare and send the vport enable */ 6114 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6115 vport_update_params.vport_id = vport_id; 6116 vport_update_params.update_vport_active_tx_flg = 1; 6117 vport_update_params.vport_active_tx_flg = 1; 6118 vport_update_params.update_vport_active_rx_flg = 1; 6119 vport_update_params.vport_active_rx_flg = 1; 6120 vport_update_params.rss_params = rss_params; 6121 vport_update_params.update_inner_vlan_removal_flg = 1; 6122 vport_update_params.inner_vlan_removal_flg = 1; 6123 6124 if (hw_lro_enable) { 6125 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params)); 6126 6127 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6128 6129 tpa_params.update_tpa_en_flg = 1; 6130 tpa_params.tpa_ipv4_en_flg = 1; 6131 tpa_params.tpa_ipv6_en_flg = 1; 6132 6133 tpa_params.update_tpa_param_flg = 1; 6134 tpa_params.tpa_pkt_split_flg = 0; 6135 tpa_params.tpa_hdr_data_split_flg = 0; 6136 tpa_params.tpa_gro_consistent_flg = 0; 6137 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 6138 tpa_params.tpa_max_size = (uint16_t)(-1); 6139 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2; 6140 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2; 6141 6142 vport_update_params.sge_tpa_params = &tpa_params; 6143 } 6144 6145 rc = qlnx_update_vport(cdev, &vport_update_params); 6146 if (rc) { 6147 QL_DPRINT1(ha, (ha->pci_dev, 6148 "%s: Update V-PORT failed %d\n", __func__, rc)); 6149 return rc; 6150 } 6151 6152 return 0; 6153 } 6154 6155 static int 6156 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6157 struct qlnx_tx_queue *txq) 6158 { 6159 uint16_t hw_bd_cons; 6160 uint16_t ecore_cons_idx; 6161 6162 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 6163 6164 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6165 6166 while (hw_bd_cons != 6167 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 6168 6169 mtx_lock(&fp->tx_mtx); 6170 6171 (void)qlnx_tx_int(ha, fp, txq); 6172 6173 mtx_unlock(&fp->tx_mtx); 6174 6175 qlnx_mdelay(__func__, 2); 6176 6177 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6178 } 6179 6180 QL_DPRINT2(ha, (ha->pci_dev, "%s[%d, %d]: done\n", __func__, 6181 fp->rss_id, txq->index)); 6182 6183 return 0; 6184 } 6185 6186 static int 6187 qlnx_stop_queues(qlnx_host_t *ha) 6188 { 6189 struct qlnx_update_vport_params vport_update_params; 6190 struct ecore_dev *cdev; 6191 struct qlnx_fastpath *fp; 6192 int rc, tc, i; 6193 6194 cdev = &ha->cdev; 6195 6196 /* Disable the vport */ 6197 6198 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6199 6200 vport_update_params.vport_id = 0; 6201 vport_update_params.update_vport_active_tx_flg = 1; 6202 vport_update_params.vport_active_tx_flg = 0; 6203 vport_update_params.update_vport_active_rx_flg = 1; 6204 vport_update_params.vport_active_rx_flg = 0; 6205 vport_update_params.rss_params = &ha->rss_params; 6206 vport_update_params.rss_params->update_rss_config = 0; 6207 vport_update_params.rss_params->rss_enable = 0; 6208 vport_update_params.update_inner_vlan_removal_flg = 0; 6209 vport_update_params.inner_vlan_removal_flg = 0; 6210 6211 rc = qlnx_update_vport(cdev, &vport_update_params); 6212 if (rc) { 6213 QL_DPRINT1(ha, (ha->pci_dev, "%s:Failed to update vport\n", 6214 __func__)); 6215 return rc; 6216 } 6217 6218 /* Flush Tx queues. If needed, request drain from MCP */ 6219 for_each_rss(i) { 6220 fp = &ha->fp_array[i]; 6221 6222 for (tc = 0; tc < ha->num_tc; tc++) { 6223 struct qlnx_tx_queue *txq = fp->txq[tc]; 6224 6225 rc = qlnx_drain_txq(ha, fp, txq); 6226 if (rc) 6227 return rc; 6228 } 6229 } 6230 6231 /* Stop all Queues in reverse order*/ 6232 for (i = ha->num_rss - 1; i >= 0; i--) { 6233 6234 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)]; 6235 6236 fp = &ha->fp_array[i]; 6237 6238 /* Stop the Tx Queue(s)*/ 6239 for (tc = 0; tc < ha->num_tc; tc++) { 6240 int tx_queue_id; 6241 6242 tx_queue_id = tc * ha->num_rss + i; 6243 rc = ecore_eth_tx_queue_stop(p_hwfn, 6244 fp->txq[tc]->handle); 6245 6246 if (rc) { 6247 QL_DPRINT1(ha, (ha->pci_dev, 6248 "%s: Failed to stop TXQ #%d\n", 6249 __func__, tx_queue_id)); 6250 return rc; 6251 } 6252 } 6253 6254 /* Stop the Rx Queue*/ 6255 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false, 6256 false); 6257 if (rc) { 6258 QL_DPRINT1(ha, (ha->pci_dev, 6259 "%s: Failed to stop RXQ #%d\n", __func__, i)); 6260 return rc; 6261 } 6262 } 6263 6264 /* Stop the vport */ 6265 for_each_hwfn(cdev, i) { 6266 6267 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6268 6269 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0); 6270 6271 if (rc) { 6272 QL_DPRINT1(ha, (ha->pci_dev, 6273 "%s: Failed to stop VPORT\n", __func__)); 6274 return rc; 6275 } 6276 } 6277 6278 return rc; 6279 } 6280 6281 static int 6282 qlnx_set_ucast_rx_mac(qlnx_host_t *ha, 6283 enum ecore_filter_opcode opcode, 6284 unsigned char mac[ETH_ALEN]) 6285 { 6286 struct ecore_filter_ucast ucast; 6287 struct ecore_dev *cdev; 6288 int rc; 6289 6290 cdev = &ha->cdev; 6291 6292 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6293 6294 ucast.opcode = opcode; 6295 ucast.type = ECORE_FILTER_MAC; 6296 ucast.is_rx_filter = 1; 6297 ucast.vport_to_add_to = 0; 6298 memcpy(&ucast.mac[0], mac, ETH_ALEN); 6299 6300 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6301 6302 return (rc); 6303 } 6304 6305 static int 6306 qlnx_remove_all_ucast_mac(qlnx_host_t *ha) 6307 { 6308 struct ecore_filter_ucast ucast; 6309 struct ecore_dev *cdev; 6310 int rc; 6311 6312 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6313 6314 ucast.opcode = ECORE_FILTER_REPLACE; 6315 ucast.type = ECORE_FILTER_MAC; 6316 ucast.is_rx_filter = 1; 6317 6318 cdev = &ha->cdev; 6319 6320 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6321 6322 return (rc); 6323 } 6324 6325 static int 6326 qlnx_remove_all_mcast_mac(qlnx_host_t *ha) 6327 { 6328 struct ecore_filter_mcast *mcast; 6329 struct ecore_dev *cdev; 6330 int rc, i; 6331 6332 cdev = &ha->cdev; 6333 6334 mcast = &ha->ecore_mcast; 6335 bzero(mcast, sizeof(struct ecore_filter_mcast)); 6336 6337 mcast->opcode = ECORE_FILTER_REMOVE; 6338 6339 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 6340 6341 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] || 6342 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] || 6343 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) { 6344 6345 memcpy(&mcast->mac[0], &ha->mcast[i].addr[0], ETH_ALEN); 6346 mcast->num_mc_addrs++; 6347 mcast++; 6348 } 6349 } 6350 mcast = &ha->ecore_mcast; 6351 6352 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 6353 6354 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS)); 6355 ha->nmcast = 0; 6356 6357 return (rc); 6358 } 6359 6360 static int 6361 qlnx_clean_filters(qlnx_host_t *ha) 6362 { 6363 int rc = 0; 6364 6365 /* Remove all unicast macs */ 6366 qlnx_remove_all_ucast_mac(ha); 6367 if (rc) 6368 return rc; 6369 6370 /* Remove all multicast macs */ 6371 rc = qlnx_remove_all_mcast_mac(ha); 6372 if (rc) 6373 return rc; 6374 6375 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac); 6376 6377 return (rc); 6378 } 6379 6380 static int 6381 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter) 6382 { 6383 struct ecore_filter_accept_flags accept; 6384 int rc = 0; 6385 struct ecore_dev *cdev; 6386 6387 cdev = &ha->cdev; 6388 6389 bzero(&accept, sizeof(struct ecore_filter_accept_flags)); 6390 6391 accept.update_rx_mode_config = 1; 6392 accept.rx_accept_filter = filter; 6393 6394 accept.update_tx_mode_config = 1; 6395 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 6396 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST; 6397 6398 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false, 6399 ECORE_SPQ_MODE_CB, NULL); 6400 6401 return (rc); 6402 } 6403 6404 static int 6405 qlnx_set_rx_mode(qlnx_host_t *ha) 6406 { 6407 int rc = 0; 6408 uint8_t filter; 6409 6410 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac); 6411 if (rc) 6412 return rc; 6413 6414 rc = qlnx_remove_all_mcast_mac(ha); 6415 if (rc) 6416 return rc; 6417 6418 filter = ECORE_ACCEPT_UCAST_MATCHED | 6419 ECORE_ACCEPT_MCAST_MATCHED | 6420 ECORE_ACCEPT_BCAST; 6421 ha->filter = filter; 6422 6423 rc = qlnx_set_rx_accept_filter(ha, filter); 6424 6425 return (rc); 6426 } 6427 6428 static int 6429 qlnx_set_link(qlnx_host_t *ha, bool link_up) 6430 { 6431 int i, rc = 0; 6432 struct ecore_dev *cdev; 6433 struct ecore_hwfn *hwfn; 6434 struct ecore_ptt *ptt; 6435 6436 cdev = &ha->cdev; 6437 6438 for_each_hwfn(cdev, i) { 6439 6440 hwfn = &cdev->hwfns[i]; 6441 6442 ptt = ecore_ptt_acquire(hwfn); 6443 if (!ptt) 6444 return -EBUSY; 6445 6446 rc = ecore_mcp_set_link(hwfn, ptt, link_up); 6447 6448 ecore_ptt_release(hwfn, ptt); 6449 6450 if (rc) 6451 return rc; 6452 } 6453 return (rc); 6454 } 6455 6456 #if __FreeBSD_version >= 1100000 6457 static uint64_t 6458 qlnx_get_counter(if_t ifp, ift_counter cnt) 6459 { 6460 qlnx_host_t *ha; 6461 uint64_t count; 6462 6463 ha = (qlnx_host_t *)if_getsoftc(ifp); 6464 6465 switch (cnt) { 6466 6467 case IFCOUNTER_IPACKETS: 6468 count = ha->hw_stats.common.rx_ucast_pkts + 6469 ha->hw_stats.common.rx_mcast_pkts + 6470 ha->hw_stats.common.rx_bcast_pkts; 6471 break; 6472 6473 case IFCOUNTER_IERRORS: 6474 count = ha->hw_stats.common.rx_crc_errors + 6475 ha->hw_stats.common.rx_align_errors + 6476 ha->hw_stats.common.rx_oversize_packets + 6477 ha->hw_stats.common.rx_undersize_packets; 6478 break; 6479 6480 case IFCOUNTER_OPACKETS: 6481 count = ha->hw_stats.common.tx_ucast_pkts + 6482 ha->hw_stats.common.tx_mcast_pkts + 6483 ha->hw_stats.common.tx_bcast_pkts; 6484 break; 6485 6486 case IFCOUNTER_OERRORS: 6487 count = ha->hw_stats.common.tx_err_drop_pkts; 6488 break; 6489 6490 case IFCOUNTER_COLLISIONS: 6491 return (0); 6492 6493 case IFCOUNTER_IBYTES: 6494 count = ha->hw_stats.common.rx_ucast_bytes + 6495 ha->hw_stats.common.rx_mcast_bytes + 6496 ha->hw_stats.common.rx_bcast_bytes; 6497 break; 6498 6499 case IFCOUNTER_OBYTES: 6500 count = ha->hw_stats.common.tx_ucast_bytes + 6501 ha->hw_stats.common.tx_mcast_bytes + 6502 ha->hw_stats.common.tx_bcast_bytes; 6503 break; 6504 6505 case IFCOUNTER_IMCASTS: 6506 count = ha->hw_stats.common.rx_mcast_bytes; 6507 break; 6508 6509 case IFCOUNTER_OMCASTS: 6510 count = ha->hw_stats.common.tx_mcast_bytes; 6511 break; 6512 6513 case IFCOUNTER_IQDROPS: 6514 case IFCOUNTER_OQDROPS: 6515 case IFCOUNTER_NOPROTO: 6516 6517 default: 6518 return (if_get_counter_default(ifp, cnt)); 6519 } 6520 return (count); 6521 } 6522 #endif 6523 6524 6525 static void 6526 qlnx_timer(void *arg) 6527 { 6528 qlnx_host_t *ha; 6529 6530 ha = (qlnx_host_t *)arg; 6531 6532 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats); 6533 6534 if (ha->storm_stats_enable) 6535 qlnx_sample_storm_stats(ha); 6536 6537 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 6538 6539 return; 6540 } 6541 6542 static int 6543 qlnx_load(qlnx_host_t *ha) 6544 { 6545 int i; 6546 int rc = 0; 6547 struct ecore_dev *cdev; 6548 device_t dev; 6549 6550 cdev = &ha->cdev; 6551 dev = ha->pci_dev; 6552 6553 QL_DPRINT2(ha, (dev, "%s: enter\n", __func__)); 6554 6555 rc = qlnx_alloc_mem_arrays(ha); 6556 if (rc) 6557 goto qlnx_load_exit0; 6558 6559 qlnx_init_fp(ha); 6560 6561 rc = qlnx_alloc_mem_load(ha); 6562 if (rc) 6563 goto qlnx_load_exit1; 6564 6565 QL_DPRINT2(ha, (dev, "%s: Allocated %d RSS queues on %d TC/s\n", 6566 __func__, ha->num_rss, ha->num_tc)); 6567 6568 for (i = 0; i < ha->num_rss; i++) { 6569 6570 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq, 6571 (INTR_TYPE_NET | INTR_MPSAFE), 6572 NULL, qlnx_fp_isr, &ha->irq_vec[i], 6573 &ha->irq_vec[i].handle))) { 6574 6575 QL_DPRINT1(ha, (dev, "could not setup interrupt\n")); 6576 6577 goto qlnx_load_exit2; 6578 } 6579 6580 QL_DPRINT2(ha, (dev, "%s: rss_id = %d irq_rid %d" 6581 " irq %p handle %p\n", __func__, i, 6582 ha->irq_vec[i].irq_rid, 6583 ha->irq_vec[i].irq, ha->irq_vec[i].handle)); 6584 6585 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus)); 6586 } 6587 6588 rc = qlnx_start_queues(ha); 6589 if (rc) 6590 goto qlnx_load_exit2; 6591 6592 QL_DPRINT2(ha, (dev, "%s: Start VPORT, RXQ and TXQ succeeded\n", 6593 __func__)); 6594 6595 /* Add primary mac and set Rx filters */ 6596 rc = qlnx_set_rx_mode(ha); 6597 if (rc) 6598 goto qlnx_load_exit2; 6599 6600 /* Ask for link-up using current configuration */ 6601 qlnx_set_link(ha, true); 6602 6603 ha->state = QLNX_STATE_OPEN; 6604 6605 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats)); 6606 6607 if (ha->flags.callout_init) 6608 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 6609 6610 goto qlnx_load_exit0; 6611 6612 qlnx_load_exit2: 6613 qlnx_free_mem_load(ha); 6614 6615 qlnx_load_exit1: 6616 ha->num_rss = 0; 6617 6618 qlnx_load_exit0: 6619 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit [%d]\n", __func__, rc)); 6620 return rc; 6621 } 6622 6623 static void 6624 qlnx_drain_soft_lro(qlnx_host_t *ha) 6625 { 6626 #ifdef QLNX_SOFT_LRO 6627 6628 struct ifnet *ifp; 6629 int i; 6630 6631 ifp = ha->ifp; 6632 6633 6634 if (ifp->if_capenable & IFCAP_LRO) { 6635 6636 for (i = 0; i < ha->num_rss; i++) { 6637 6638 struct qlnx_fastpath *fp = &ha->fp_array[i]; 6639 struct lro_ctrl *lro; 6640 6641 lro = &fp->rxq->lro; 6642 6643 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 6644 6645 tcp_lro_flush_all(lro); 6646 6647 #else 6648 struct lro_entry *queued; 6649 6650 while ((!SLIST_EMPTY(&lro->lro_active))){ 6651 queued = SLIST_FIRST(&lro->lro_active); 6652 SLIST_REMOVE_HEAD(&lro->lro_active, next); 6653 tcp_lro_flush(lro, queued); 6654 } 6655 6656 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 6657 6658 } 6659 } 6660 6661 #endif /* #ifdef QLNX_SOFT_LRO */ 6662 6663 return; 6664 } 6665 6666 static void 6667 qlnx_unload(qlnx_host_t *ha) 6668 { 6669 struct ecore_dev *cdev; 6670 device_t dev; 6671 int i; 6672 6673 cdev = &ha->cdev; 6674 dev = ha->pci_dev; 6675 6676 QL_DPRINT2(ha, (ha->pci_dev, "%s: enter\n", __func__)); 6677 6678 if (ha->state == QLNX_STATE_OPEN) { 6679 6680 qlnx_set_link(ha, false); 6681 qlnx_clean_filters(ha); 6682 qlnx_stop_queues(ha); 6683 ecore_hw_stop_fastpath(cdev); 6684 6685 for (i = 0; i < ha->num_rss; i++) { 6686 if (ha->irq_vec[i].handle) { 6687 (void)bus_teardown_intr(dev, 6688 ha->irq_vec[i].irq, 6689 ha->irq_vec[i].handle); 6690 ha->irq_vec[i].handle = NULL; 6691 } 6692 } 6693 6694 qlnx_drain_fp_taskqueues(ha); 6695 qlnx_drain_soft_lro(ha); 6696 qlnx_free_mem_load(ha); 6697 } 6698 6699 if (ha->flags.callout_init) 6700 callout_drain(&ha->qlnx_callout); 6701 6702 qlnx_mdelay(__func__, 1000); 6703 6704 ha->state = QLNX_STATE_CLOSED; 6705 6706 QL_DPRINT2(ha, (ha->pci_dev, "%s: exit\n", __func__)); 6707 return; 6708 } 6709 6710 static int 6711 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 6712 { 6713 int rval = -1; 6714 struct ecore_hwfn *p_hwfn; 6715 struct ecore_ptt *p_ptt; 6716 6717 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 6718 6719 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 6720 p_ptt = ecore_ptt_acquire(p_hwfn); 6721 6722 if (!p_ptt) { 6723 QL_DPRINT1(ha, (ha->pci_dev, "%s: ecore_ptt_acquire failed\n", 6724 __func__)); 6725 return (rval); 6726 } 6727 6728 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 6729 6730 if (rval == DBG_STATUS_OK) 6731 rval = 0; 6732 else { 6733 QL_DPRINT1(ha, (ha->pci_dev, 6734 "%s : ecore_dbg_grc_get_dump_buf_size failed [0x%x]\n", 6735 __func__, rval)); 6736 } 6737 6738 ecore_ptt_release(p_hwfn, p_ptt); 6739 6740 return (rval); 6741 } 6742 6743 static int 6744 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 6745 { 6746 int rval = -1; 6747 struct ecore_hwfn *p_hwfn; 6748 struct ecore_ptt *p_ptt; 6749 6750 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 6751 6752 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 6753 p_ptt = ecore_ptt_acquire(p_hwfn); 6754 6755 if (!p_ptt) { 6756 QL_DPRINT1(ha, (ha->pci_dev, "%s: ecore_ptt_acquire failed\n", 6757 __func__)); 6758 return (rval); 6759 } 6760 6761 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 6762 6763 if (rval == DBG_STATUS_OK) 6764 rval = 0; 6765 else { 6766 QL_DPRINT1(ha, (ha->pci_dev, "%s : " 6767 "ecore_dbg_idle_chk_get_dump_buf_size failed [0x%x]\n", 6768 __func__, rval)); 6769 } 6770 6771 ecore_ptt_release(p_hwfn, p_ptt); 6772 6773 return (rval); 6774 } 6775 6776 6777 static void 6778 qlnx_sample_storm_stats(qlnx_host_t *ha) 6779 { 6780 int i, index; 6781 struct ecore_dev *cdev; 6782 qlnx_storm_stats_t *s_stats; 6783 uint32_t reg; 6784 struct ecore_ptt *p_ptt; 6785 struct ecore_hwfn *hwfn; 6786 6787 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) { 6788 ha->storm_stats_enable = 0; 6789 return; 6790 } 6791 6792 cdev = &ha->cdev; 6793 6794 for_each_hwfn(cdev, i) { 6795 6796 hwfn = &cdev->hwfns[i]; 6797 6798 p_ptt = ecore_ptt_acquire(hwfn); 6799 if (!p_ptt) 6800 return; 6801 6802 index = ha->storm_stats_index + 6803 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN); 6804 6805 s_stats = &ha->storm_stats[index]; 6806 6807 /* XSTORM */ 6808 reg = XSEM_REG_FAST_MEMORY + 6809 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6810 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6811 6812 reg = XSEM_REG_FAST_MEMORY + 6813 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6814 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6815 6816 reg = XSEM_REG_FAST_MEMORY + 6817 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6818 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6819 6820 reg = XSEM_REG_FAST_MEMORY + 6821 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6822 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6823 6824 /* YSTORM */ 6825 reg = YSEM_REG_FAST_MEMORY + 6826 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6827 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6828 6829 reg = YSEM_REG_FAST_MEMORY + 6830 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6831 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6832 6833 reg = YSEM_REG_FAST_MEMORY + 6834 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6835 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6836 6837 reg = YSEM_REG_FAST_MEMORY + 6838 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6839 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6840 6841 /* PSTORM */ 6842 reg = PSEM_REG_FAST_MEMORY + 6843 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6844 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6845 6846 reg = PSEM_REG_FAST_MEMORY + 6847 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6848 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6849 6850 reg = PSEM_REG_FAST_MEMORY + 6851 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6852 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6853 6854 reg = PSEM_REG_FAST_MEMORY + 6855 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6856 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6857 6858 /* TSTORM */ 6859 reg = TSEM_REG_FAST_MEMORY + 6860 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6861 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6862 6863 reg = TSEM_REG_FAST_MEMORY + 6864 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6865 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6866 6867 reg = TSEM_REG_FAST_MEMORY + 6868 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6869 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6870 6871 reg = TSEM_REG_FAST_MEMORY + 6872 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6873 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6874 6875 /* MSTORM */ 6876 reg = MSEM_REG_FAST_MEMORY + 6877 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6878 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6879 6880 reg = MSEM_REG_FAST_MEMORY + 6881 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6882 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6883 6884 reg = MSEM_REG_FAST_MEMORY + 6885 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6886 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6887 6888 reg = MSEM_REG_FAST_MEMORY + 6889 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6890 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6891 6892 /* USTORM */ 6893 reg = USEM_REG_FAST_MEMORY + 6894 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6895 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6896 6897 reg = USEM_REG_FAST_MEMORY + 6898 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6899 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6900 6901 reg = USEM_REG_FAST_MEMORY + 6902 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6903 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6904 6905 reg = USEM_REG_FAST_MEMORY + 6906 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6907 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6908 6909 ecore_ptt_release(hwfn, p_ptt); 6910 } 6911 6912 ha->storm_stats_index++; 6913 6914 return; 6915 } 6916 6917 /* 6918 * Name: qlnx_dump_buf8 6919 * Function: dumps a buffer as bytes 6920 */ 6921 static void 6922 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len) 6923 { 6924 device_t dev; 6925 uint32_t i = 0; 6926 uint8_t *buf; 6927 6928 dev = ha->pci_dev; 6929 buf = dbuf; 6930 6931 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len); 6932 6933 while (len >= 16) { 6934 device_printf(dev,"0x%08x:" 6935 " %02x %02x %02x %02x %02x %02x %02x %02x" 6936 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 6937 buf[0], buf[1], buf[2], buf[3], 6938 buf[4], buf[5], buf[6], buf[7], 6939 buf[8], buf[9], buf[10], buf[11], 6940 buf[12], buf[13], buf[14], buf[15]); 6941 i += 16; 6942 len -= 16; 6943 buf += 16; 6944 } 6945 switch (len) { 6946 case 1: 6947 device_printf(dev,"0x%08x: %02x\n", i, buf[0]); 6948 break; 6949 case 2: 6950 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]); 6951 break; 6952 case 3: 6953 device_printf(dev,"0x%08x: %02x %02x %02x\n", 6954 i, buf[0], buf[1], buf[2]); 6955 break; 6956 case 4: 6957 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i, 6958 buf[0], buf[1], buf[2], buf[3]); 6959 break; 6960 case 5: 6961 device_printf(dev,"0x%08x:" 6962 " %02x %02x %02x %02x %02x\n", i, 6963 buf[0], buf[1], buf[2], buf[3], buf[4]); 6964 break; 6965 case 6: 6966 device_printf(dev,"0x%08x:" 6967 " %02x %02x %02x %02x %02x %02x\n", i, 6968 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); 6969 break; 6970 case 7: 6971 device_printf(dev,"0x%08x:" 6972 " %02x %02x %02x %02x %02x %02x %02x\n", i, 6973 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); 6974 break; 6975 case 8: 6976 device_printf(dev,"0x%08x:" 6977 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 6978 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 6979 buf[7]); 6980 break; 6981 case 9: 6982 device_printf(dev,"0x%08x:" 6983 " %02x %02x %02x %02x %02x %02x %02x %02x" 6984 " %02x\n", i, 6985 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 6986 buf[7], buf[8]); 6987 break; 6988 case 10: 6989 device_printf(dev,"0x%08x:" 6990 " %02x %02x %02x %02x %02x %02x %02x %02x" 6991 " %02x %02x\n", i, 6992 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 6993 buf[7], buf[8], buf[9]); 6994 break; 6995 case 11: 6996 device_printf(dev,"0x%08x:" 6997 " %02x %02x %02x %02x %02x %02x %02x %02x" 6998 " %02x %02x %02x\n", i, 6999 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7000 buf[7], buf[8], buf[9], buf[10]); 7001 break; 7002 case 12: 7003 device_printf(dev,"0x%08x:" 7004 " %02x %02x %02x %02x %02x %02x %02x %02x" 7005 " %02x %02x %02x %02x\n", i, 7006 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7007 buf[7], buf[8], buf[9], buf[10], buf[11]); 7008 break; 7009 case 13: 7010 device_printf(dev,"0x%08x:" 7011 " %02x %02x %02x %02x %02x %02x %02x %02x" 7012 " %02x %02x %02x %02x %02x\n", i, 7013 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7014 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]); 7015 break; 7016 case 14: 7017 device_printf(dev,"0x%08x:" 7018 " %02x %02x %02x %02x %02x %02x %02x %02x" 7019 " %02x %02x %02x %02x %02x %02x\n", i, 7020 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7021 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7022 buf[13]); 7023 break; 7024 case 15: 7025 device_printf(dev,"0x%08x:" 7026 " %02x %02x %02x %02x %02x %02x %02x %02x" 7027 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7028 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7029 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7030 buf[13], buf[14]); 7031 break; 7032 default: 7033 break; 7034 } 7035 7036 device_printf(dev, "%s: %s dump end\n", __func__, msg); 7037 7038 return; 7039 } 7040 7041