1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 29 /* 30 * File: qlnx_os.c 31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "qlnx_os.h" 38 #include "bcm_osal.h" 39 #include "reg_addr.h" 40 #include "ecore_gtt_reg_addr.h" 41 #include "ecore.h" 42 #include "ecore_chain.h" 43 #include "ecore_status.h" 44 #include "ecore_hw.h" 45 #include "ecore_rt_defs.h" 46 #include "ecore_init_ops.h" 47 #include "ecore_int.h" 48 #include "ecore_cxt.h" 49 #include "ecore_spq.h" 50 #include "ecore_init_fw_funcs.h" 51 #include "ecore_sp_commands.h" 52 #include "ecore_dev_api.h" 53 #include "ecore_l2_api.h" 54 #include "ecore_mcp.h" 55 #include "ecore_hw_defs.h" 56 #include "mcp_public.h" 57 #include "ecore_iro.h" 58 #include "nvm_cfg.h" 59 #include "ecore_dev_api.h" 60 #include "ecore_dbg_fw_funcs.h" 61 62 #include "qlnx_ioctl.h" 63 #include "qlnx_def.h" 64 #include "qlnx_ver.h" 65 #include <sys/smp.h> 66 67 68 /* 69 * static functions 70 */ 71 /* 72 * ioctl related functions 73 */ 74 static void qlnx_add_sysctls(qlnx_host_t *ha); 75 76 /* 77 * main driver 78 */ 79 static void qlnx_release(qlnx_host_t *ha); 80 static void qlnx_fp_isr(void *arg); 81 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha); 82 static void qlnx_init(void *arg); 83 static void qlnx_init_locked(qlnx_host_t *ha); 84 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi); 85 static int qlnx_set_promisc(qlnx_host_t *ha); 86 static int qlnx_set_allmulti(qlnx_host_t *ha); 87 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 88 static int qlnx_media_change(struct ifnet *ifp); 89 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 90 static void qlnx_stop(qlnx_host_t *ha); 91 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, 92 struct mbuf **m_headp); 93 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha); 94 static uint32_t qlnx_get_optics(qlnx_host_t *ha, 95 struct qlnx_link_output *if_link); 96 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp); 97 static void qlnx_qflush(struct ifnet *ifp); 98 99 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha); 100 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha); 101 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha); 102 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha); 103 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha); 104 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha); 105 106 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver); 107 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size); 108 109 static int qlnx_nic_setup(struct ecore_dev *cdev, 110 struct ecore_pf_params *func_params); 111 static int qlnx_nic_start(struct ecore_dev *cdev); 112 static int qlnx_slowpath_start(qlnx_host_t *ha); 113 static int qlnx_slowpath_stop(qlnx_host_t *ha); 114 static int qlnx_init_hw(qlnx_host_t *ha); 115 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 116 char ver_str[VER_SIZE]); 117 static void qlnx_unload(qlnx_host_t *ha); 118 static int qlnx_load(qlnx_host_t *ha); 119 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 120 uint32_t add_mac); 121 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, 122 uint32_t len); 123 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq); 124 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq); 125 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, 126 struct qlnx_rx_queue *rxq); 127 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter); 128 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, 129 int hwfn_index); 130 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, 131 int hwfn_index); 132 static void qlnx_timer(void *arg); 133 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 134 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 135 static void qlnx_trigger_dump(qlnx_host_t *ha); 136 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 137 struct qlnx_tx_queue *txq); 138 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 139 int lro_enable); 140 static void qlnx_fp_taskqueue(void *context, int pending); 141 static void qlnx_sample_storm_stats(qlnx_host_t *ha); 142 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 143 struct qlnx_agg_info *tpa); 144 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa); 145 146 #if __FreeBSD_version >= 1100000 147 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt); 148 #endif 149 150 151 /* 152 * Hooks to the Operating Systems 153 */ 154 static int qlnx_pci_probe (device_t); 155 static int qlnx_pci_attach (device_t); 156 static int qlnx_pci_detach (device_t); 157 158 static device_method_t qlnx_pci_methods[] = { 159 /* Device interface */ 160 DEVMETHOD(device_probe, qlnx_pci_probe), 161 DEVMETHOD(device_attach, qlnx_pci_attach), 162 DEVMETHOD(device_detach, qlnx_pci_detach), 163 { 0, 0 } 164 }; 165 166 static driver_t qlnx_pci_driver = { 167 "ql", qlnx_pci_methods, sizeof (qlnx_host_t), 168 }; 169 170 static devclass_t qlnx_devclass; 171 172 MODULE_VERSION(if_qlnxe,1); 173 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0); 174 175 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1); 176 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1); 177 178 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver"); 179 180 181 char qlnx_dev_str[64]; 182 char qlnx_ver_str[VER_SIZE]; 183 char qlnx_name_str[NAME_SIZE]; 184 185 /* 186 * Some PCI Configuration Space Related Defines 187 */ 188 189 #ifndef PCI_VENDOR_QLOGIC 190 #define PCI_VENDOR_QLOGIC 0x1077 191 #endif 192 193 /* 40G Adapter QLE45xxx*/ 194 #ifndef QLOGIC_PCI_DEVICE_ID_1634 195 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634 196 #endif 197 198 /* 100G Adapter QLE45xxx*/ 199 #ifndef QLOGIC_PCI_DEVICE_ID_1644 200 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644 201 #endif 202 203 /* 25G Adapter QLE45xxx*/ 204 #ifndef QLOGIC_PCI_DEVICE_ID_1656 205 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656 206 #endif 207 208 /* 50G Adapter QLE45xxx*/ 209 #ifndef QLOGIC_PCI_DEVICE_ID_1654 210 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654 211 #endif 212 213 static int 214 qlnx_valid_device(device_t dev) 215 { 216 uint16_t device_id; 217 218 device_id = pci_get_device(dev); 219 220 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 221 (device_id == QLOGIC_PCI_DEVICE_ID_1644) || 222 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 223 (device_id == QLOGIC_PCI_DEVICE_ID_1654)) 224 return 0; 225 226 return -1; 227 } 228 229 /* 230 * Name: qlnx_pci_probe 231 * Function: Validate the PCI device to be a QLA80XX device 232 */ 233 static int 234 qlnx_pci_probe(device_t dev) 235 { 236 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d", 237 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); 238 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx"); 239 240 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) { 241 return (ENXIO); 242 } 243 244 switch (pci_get_device(dev)) { 245 246 case QLOGIC_PCI_DEVICE_ID_1644: 247 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 248 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function", 249 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 250 QLNX_VERSION_BUILD); 251 device_set_desc_copy(dev, qlnx_dev_str); 252 253 break; 254 255 case QLOGIC_PCI_DEVICE_ID_1634: 256 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 257 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function", 258 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 259 QLNX_VERSION_BUILD); 260 device_set_desc_copy(dev, qlnx_dev_str); 261 262 break; 263 264 case QLOGIC_PCI_DEVICE_ID_1656: 265 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 266 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function", 267 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 268 QLNX_VERSION_BUILD); 269 device_set_desc_copy(dev, qlnx_dev_str); 270 271 break; 272 273 case QLOGIC_PCI_DEVICE_ID_1654: 274 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 275 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function", 276 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 277 QLNX_VERSION_BUILD); 278 device_set_desc_copy(dev, qlnx_dev_str); 279 280 break; 281 282 default: 283 return (ENXIO); 284 } 285 286 return (BUS_PROBE_DEFAULT); 287 } 288 289 290 static void 291 qlnx_sp_intr(void *arg) 292 { 293 struct ecore_hwfn *p_hwfn; 294 qlnx_host_t *ha; 295 int i; 296 297 p_hwfn = arg; 298 299 if (p_hwfn == NULL) { 300 printf("%s: spurious slowpath intr\n", __func__); 301 return; 302 } 303 304 ha = (qlnx_host_t *)p_hwfn->p_dev; 305 306 QL_DPRINT2(ha, "enter\n"); 307 308 for (i = 0; i < ha->cdev.num_hwfns; i++) { 309 if (&ha->cdev.hwfns[i] == p_hwfn) { 310 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]); 311 break; 312 } 313 } 314 QL_DPRINT2(ha, "exit\n"); 315 316 return; 317 } 318 319 static void 320 qlnx_sp_taskqueue(void *context, int pending) 321 { 322 struct ecore_hwfn *p_hwfn; 323 324 p_hwfn = context; 325 326 if (p_hwfn != NULL) { 327 qlnx_sp_isr(p_hwfn); 328 } 329 return; 330 } 331 332 static int 333 qlnx_create_sp_taskqueues(qlnx_host_t *ha) 334 { 335 int i; 336 uint8_t tq_name[32]; 337 338 for (i = 0; i < ha->cdev.num_hwfns; i++) { 339 340 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 341 342 bzero(tq_name, sizeof (tq_name)); 343 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i); 344 345 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn); 346 347 ha->sp_taskqueue[i] = taskqueue_create_fast(tq_name, M_NOWAIT, 348 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]); 349 350 if (ha->sp_taskqueue[i] == NULL) 351 return (-1); 352 353 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s", 354 tq_name); 355 356 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]); 357 } 358 359 return (0); 360 } 361 362 static void 363 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha) 364 { 365 int i; 366 367 for (i = 0; i < ha->cdev.num_hwfns; i++) { 368 if (ha->sp_taskqueue[i] != NULL) { 369 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]); 370 taskqueue_free(ha->sp_taskqueue[i]); 371 } 372 } 373 return; 374 } 375 376 static void 377 qlnx_fp_taskqueue(void *context, int pending) 378 { 379 struct qlnx_fastpath *fp; 380 qlnx_host_t *ha; 381 struct ifnet *ifp; 382 struct mbuf *mp; 383 int ret; 384 int lro_enable, tc; 385 int rx_int = 0, total_rx_count = 0; 386 struct thread *cthread; 387 388 fp = context; 389 390 if (fp == NULL) 391 return; 392 393 cthread = curthread; 394 395 thread_lock(cthread); 396 397 if (!sched_is_bound(cthread)) 398 sched_bind(cthread, fp->rss_id); 399 400 thread_unlock(cthread); 401 402 ha = (qlnx_host_t *)fp->edev; 403 404 ifp = ha->ifp; 405 406 lro_enable = ha->ifp->if_capenable & IFCAP_LRO; 407 408 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, lro_enable); 409 410 if (rx_int) { 411 fp->rx_pkts += rx_int; 412 total_rx_count += rx_int; 413 } 414 415 #ifdef QLNX_SOFT_LRO 416 { 417 struct lro_ctrl *lro; 418 419 lro = &fp->rxq->lro; 420 421 if (lro_enable && total_rx_count) { 422 423 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 424 425 if (ha->dbg_trace_lro_cnt) { 426 if (lro->lro_mbuf_count & ~1023) 427 fp->lro_cnt_1024++; 428 else if (lro->lro_mbuf_count & ~511) 429 fp->lro_cnt_512++; 430 else if (lro->lro_mbuf_count & ~255) 431 fp->lro_cnt_256++; 432 else if (lro->lro_mbuf_count & ~127) 433 fp->lro_cnt_128++; 434 else if (lro->lro_mbuf_count & ~63) 435 fp->lro_cnt_64++; 436 } 437 tcp_lro_flush_all(lro); 438 439 #else 440 struct lro_entry *queued; 441 442 while ((!SLIST_EMPTY(&lro->lro_active))) { 443 queued = SLIST_FIRST(&lro->lro_active); 444 SLIST_REMOVE_HEAD(&lro->lro_active, next); 445 tcp_lro_flush(lro, queued); 446 } 447 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 448 } 449 } 450 #endif /* #ifdef QLNX_SOFT_LRO */ 451 452 ecore_sb_update_sb_idx(fp->sb_info); 453 rmb(); 454 455 mtx_lock(&fp->tx_mtx); 456 457 if (((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != 458 IFF_DRV_RUNNING) || (!ha->link_up)) { 459 460 mtx_unlock(&fp->tx_mtx); 461 goto qlnx_fp_taskqueue_exit; 462 } 463 464 for (tc = 0; tc < ha->num_tc; tc++) { 465 (void)qlnx_tx_int(ha, fp, fp->txq[tc]); 466 } 467 468 mp = drbr_peek(ifp, fp->tx_br); 469 470 while (mp != NULL) { 471 472 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 473 ret = qlnx_send(ha, fp, &mp); 474 } else { 475 ret = -1; 476 } 477 478 if (ret) { 479 480 if (mp != NULL) { 481 drbr_putback(ifp, fp->tx_br, mp); 482 } else { 483 fp->tx_pkts_processed++; 484 drbr_advance(ifp, fp->tx_br); 485 } 486 487 mtx_unlock(&fp->tx_mtx); 488 489 goto qlnx_fp_taskqueue_exit; 490 491 } else { 492 drbr_advance(ifp, fp->tx_br); 493 fp->tx_pkts_transmitted++; 494 fp->tx_pkts_processed++; 495 } 496 497 if (fp->tx_ring_full) 498 break; 499 500 mp = drbr_peek(ifp, fp->tx_br); 501 } 502 503 for (tc = 0; tc < ha->num_tc; tc++) { 504 (void)qlnx_tx_int(ha, fp, fp->txq[tc]); 505 } 506 507 mtx_unlock(&fp->tx_mtx); 508 509 qlnx_fp_taskqueue_exit: 510 if (rx_int) { 511 if (fp->fp_taskqueue != NULL) 512 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 513 } else { 514 if (fp->tx_ring_full) { 515 qlnx_mdelay(__func__, 100); 516 } 517 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 518 } 519 520 QL_DPRINT2(ha, "exit ret = %d\n", ret); 521 return; 522 } 523 524 static int 525 qlnx_create_fp_taskqueues(qlnx_host_t *ha) 526 { 527 int i; 528 uint8_t tq_name[32]; 529 struct qlnx_fastpath *fp; 530 531 for (i = 0; i < ha->num_rss; i++) { 532 533 fp = &ha->fp_array[i]; 534 535 bzero(tq_name, sizeof (tq_name)); 536 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 537 538 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp); 539 540 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, 541 taskqueue_thread_enqueue, 542 &fp->fp_taskqueue); 543 544 if (fp->fp_taskqueue == NULL) 545 return (-1); 546 547 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 548 tq_name); 549 550 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue); 551 } 552 553 return (0); 554 } 555 556 static void 557 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha) 558 { 559 int i; 560 struct qlnx_fastpath *fp; 561 562 for (i = 0; i < ha->num_rss; i++) { 563 564 fp = &ha->fp_array[i]; 565 566 if (fp->fp_taskqueue != NULL) { 567 568 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 569 taskqueue_free(fp->fp_taskqueue); 570 fp->fp_taskqueue = NULL; 571 } 572 } 573 return; 574 } 575 576 static void 577 qlnx_drain_fp_taskqueues(qlnx_host_t *ha) 578 { 579 int i; 580 struct qlnx_fastpath *fp; 581 582 for (i = 0; i < ha->num_rss; i++) { 583 fp = &ha->fp_array[i]; 584 585 if (fp->fp_taskqueue != NULL) { 586 QLNX_UNLOCK(ha); 587 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 588 QLNX_LOCK(ha); 589 } 590 } 591 return; 592 } 593 594 /* 595 * Name: qlnx_pci_attach 596 * Function: attaches the device to the operating system 597 */ 598 static int 599 qlnx_pci_attach(device_t dev) 600 { 601 qlnx_host_t *ha = NULL; 602 uint32_t rsrc_len_reg = 0; 603 uint32_t rsrc_len_dbells = 0; 604 uint32_t rsrc_len_msix = 0; 605 int i; 606 uint32_t mfw_ver; 607 608 if ((ha = device_get_softc(dev)) == NULL) { 609 device_printf(dev, "cannot get softc\n"); 610 return (ENOMEM); 611 } 612 613 memset(ha, 0, sizeof (qlnx_host_t)); 614 615 if (qlnx_valid_device(dev) != 0) { 616 device_printf(dev, "device is not valid device\n"); 617 return (ENXIO); 618 } 619 ha->pci_func = pci_get_function(dev); 620 621 ha->pci_dev = dev; 622 623 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 624 625 ha->flags.lock_init = 1; 626 627 pci_enable_busmaster(dev); 628 629 /* 630 * map the PCI BARs 631 */ 632 633 ha->reg_rid = PCIR_BAR(0); 634 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 635 RF_ACTIVE); 636 637 if (ha->pci_reg == NULL) { 638 device_printf(dev, "unable to map BAR0\n"); 639 goto qlnx_pci_attach_err; 640 } 641 642 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 643 ha->reg_rid); 644 645 ha->dbells_rid = PCIR_BAR(2); 646 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 647 &ha->dbells_rid, RF_ACTIVE); 648 649 if (ha->pci_dbells == NULL) { 650 device_printf(dev, "unable to map BAR1\n"); 651 goto qlnx_pci_attach_err; 652 } 653 654 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 655 ha->dbells_rid); 656 657 ha->dbells_phys_addr = (uint64_t) 658 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);; 659 ha->dbells_size = rsrc_len_dbells; 660 661 ha->msix_rid = PCIR_BAR(4); 662 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 663 &ha->msix_rid, RF_ACTIVE); 664 665 if (ha->msix_bar == NULL) { 666 device_printf(dev, "unable to map BAR2\n"); 667 goto qlnx_pci_attach_err; 668 } 669 670 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 671 ha->msix_rid); 672 /* 673 * allocate dma tags 674 */ 675 676 if (qlnx_alloc_parent_dma_tag(ha)) 677 goto qlnx_pci_attach_err; 678 679 if (qlnx_alloc_tx_dma_tag(ha)) 680 goto qlnx_pci_attach_err; 681 682 if (qlnx_alloc_rx_dma_tag(ha)) 683 goto qlnx_pci_attach_err; 684 685 686 if (qlnx_init_hw(ha) != 0) 687 goto qlnx_pci_attach_err; 688 689 /* 690 * Allocate MSI-x vectors 691 */ 692 ha->num_rss = QLNX_MAX_RSS; 693 ha->num_tc = QLNX_MAX_TC; 694 695 ha->msix_count = pci_msix_count(dev); 696 697 if (ha->msix_count > (mp_ncpus + ha->cdev.num_hwfns)) 698 ha->msix_count = mp_ncpus + ha->cdev.num_hwfns; 699 700 if (!ha->msix_count || 701 (ha->msix_count < (ha->cdev.num_hwfns + 1 ))) { 702 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 703 ha->msix_count); 704 goto qlnx_pci_attach_err; 705 } 706 707 if (ha->msix_count > (ha->num_rss + ha->cdev.num_hwfns )) 708 ha->msix_count = ha->num_rss + ha->cdev.num_hwfns; 709 else 710 ha->num_rss = ha->msix_count - ha->cdev.num_hwfns; 711 712 QL_DPRINT1(ha, "\n\t\t\tpci_reg [%p, 0x%08x 0x%08x]" 713 "\n\t\t\tdbells [%p, 0x%08x 0x%08x]" 714 "\n\t\t\tmsix [%p, 0x%08x 0x%08x 0x%x 0x%x]" 715 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n", 716 ha->pci_reg, rsrc_len_reg, 717 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 718 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 719 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc); 720 if (pci_alloc_msix(dev, &ha->msix_count)) { 721 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__, 722 ha->msix_count); 723 ha->msix_count = 0; 724 goto qlnx_pci_attach_err; 725 } 726 727 /* 728 * Initialize slow path interrupt and task queue 729 */ 730 if (qlnx_create_sp_taskqueues(ha) != 0) 731 goto qlnx_pci_attach_err; 732 733 for (i = 0; i < ha->cdev.num_hwfns; i++) { 734 735 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 736 737 ha->sp_irq_rid[i] = i + 1; 738 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, 739 &ha->sp_irq_rid[i], 740 (RF_ACTIVE | RF_SHAREABLE)); 741 if (ha->sp_irq[i] == NULL) { 742 device_printf(dev, 743 "could not allocate mbx interrupt\n"); 744 goto qlnx_pci_attach_err; 745 } 746 747 if (bus_setup_intr(dev, ha->sp_irq[i], 748 (INTR_TYPE_NET | INTR_MPSAFE), NULL, 749 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) { 750 device_printf(dev, 751 "could not setup slow path interrupt\n"); 752 goto qlnx_pci_attach_err; 753 } 754 755 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d" 756 " sp_irq %p sp_handle %p\n", p_hwfn, 757 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]); 758 759 } 760 761 /* 762 * initialize fast path interrupt 763 */ 764 if (qlnx_create_fp_taskqueues(ha) != 0) 765 goto qlnx_pci_attach_err; 766 767 for (i = 0; i < ha->num_rss; i++) { 768 ha->irq_vec[i].rss_idx = i; 769 ha->irq_vec[i].ha = ha; 770 ha->irq_vec[i].irq_rid = (1 + ha->cdev.num_hwfns) + i; 771 772 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 773 &ha->irq_vec[i].irq_rid, 774 (RF_ACTIVE | RF_SHAREABLE)); 775 776 if (ha->irq_vec[i].irq == NULL) { 777 device_printf(dev, 778 "could not allocate interrupt[%d]\n", i); 779 goto qlnx_pci_attach_err; 780 } 781 782 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) { 783 device_printf(dev, "could not allocate tx_br[%d]\n", i); 784 goto qlnx_pci_attach_err; 785 786 } 787 } 788 789 callout_init(&ha->qlnx_callout, 1); 790 ha->flags.callout_init = 1; 791 792 for (i = 0; i < ha->cdev.num_hwfns; i++) { 793 794 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0) 795 goto qlnx_pci_attach_err; 796 if (ha->grcdump_size[i] == 0) 797 goto qlnx_pci_attach_err; 798 799 ha->grcdump_size[i] = ha->grcdump_size[i] << 2; 800 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n", 801 i, ha->grcdump_size[i]); 802 803 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]); 804 if (ha->grcdump[i] == NULL) { 805 device_printf(dev, "grcdump alloc[%d] failed\n", i); 806 goto qlnx_pci_attach_err; 807 } 808 809 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0) 810 goto qlnx_pci_attach_err; 811 if (ha->idle_chk_size[i] == 0) 812 goto qlnx_pci_attach_err; 813 814 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2; 815 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n", 816 i, ha->idle_chk_size[i]); 817 818 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]); 819 820 if (ha->idle_chk[i] == NULL) { 821 device_printf(dev, "idle_chk alloc failed\n"); 822 goto qlnx_pci_attach_err; 823 } 824 } 825 826 if (qlnx_slowpath_start(ha) != 0) { 827 828 qlnx_mdelay(__func__, 1000); 829 qlnx_trigger_dump(ha); 830 831 goto qlnx_pci_attach_err0; 832 } else 833 ha->flags.slowpath_start = 1; 834 835 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) { 836 qlnx_mdelay(__func__, 1000); 837 qlnx_trigger_dump(ha); 838 839 goto qlnx_pci_attach_err0; 840 } 841 842 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) { 843 qlnx_mdelay(__func__, 1000); 844 qlnx_trigger_dump(ha); 845 846 goto qlnx_pci_attach_err0; 847 } 848 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d", 849 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF), 850 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF)); 851 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d", 852 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, 853 FW_ENGINEERING_VERSION); 854 855 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n", 856 ha->stormfw_ver, ha->mfw_ver); 857 858 qlnx_init_ifnet(dev, ha); 859 860 /* 861 * add sysctls 862 */ 863 qlnx_add_sysctls(ha); 864 865 qlnx_pci_attach_err0: 866 /* 867 * create ioctl device interface 868 */ 869 if (qlnx_make_cdev(ha)) { 870 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 871 goto qlnx_pci_attach_err; 872 } 873 874 QL_DPRINT2(ha, "success\n"); 875 876 return (0); 877 878 qlnx_pci_attach_err: 879 880 qlnx_release(ha); 881 882 return (ENXIO); 883 } 884 885 /* 886 * Name: qlnx_pci_detach 887 * Function: Unhooks the device from the operating system 888 */ 889 static int 890 qlnx_pci_detach(device_t dev) 891 { 892 qlnx_host_t *ha = NULL; 893 894 if ((ha = device_get_softc(dev)) == NULL) { 895 device_printf(dev, "cannot get softc\n"); 896 return (ENOMEM); 897 } 898 899 QLNX_LOCK(ha); 900 qlnx_stop(ha); 901 QLNX_UNLOCK(ha); 902 903 qlnx_release(ha); 904 905 return (0); 906 } 907 908 static int 909 qlnx_init_hw(qlnx_host_t *ha) 910 { 911 int rval = 0; 912 struct ecore_hw_prepare_params params; 913 914 ecore_init_struct(&ha->cdev); 915 916 /* ha->dp_module = ECORE_MSG_PROBE | 917 ECORE_MSG_INTR | 918 ECORE_MSG_SP | 919 ECORE_MSG_LINK | 920 ECORE_MSG_SPQ | 921 ECORE_MSG_RDMA; 922 ha->dp_level = ECORE_LEVEL_VERBOSE;*/ 923 ha->dp_level = ECORE_LEVEL_NOTICE; 924 925 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev); 926 927 ha->cdev.regview = ha->pci_reg; 928 ha->cdev.doorbells = ha->pci_dbells; 929 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 930 ha->cdev.db_size = ha->dbells_size; 931 932 bzero(¶ms, sizeof (struct ecore_hw_prepare_params)); 933 934 ha->personality = ECORE_PCI_DEFAULT; 935 936 params.personality = ha->personality; 937 938 params.drv_resc_alloc = false; 939 params.chk_reg_fifo = false; 940 params.initiate_pf_flr = true; 941 params.epoch = 0; 942 943 ecore_hw_prepare(&ha->cdev, ¶ms); 944 945 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str); 946 947 return (rval); 948 } 949 950 static void 951 qlnx_release(qlnx_host_t *ha) 952 { 953 device_t dev; 954 int i; 955 956 dev = ha->pci_dev; 957 958 QL_DPRINT2(ha, "enter\n"); 959 960 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) { 961 if (ha->idle_chk[i] != NULL) { 962 free(ha->idle_chk[i], M_QLNXBUF); 963 ha->idle_chk[i] = NULL; 964 } 965 966 if (ha->grcdump[i] != NULL) { 967 free(ha->grcdump[i], M_QLNXBUF); 968 ha->grcdump[i] = NULL; 969 } 970 } 971 972 if (ha->flags.callout_init) 973 callout_drain(&ha->qlnx_callout); 974 975 if (ha->flags.slowpath_start) { 976 qlnx_slowpath_stop(ha); 977 } 978 979 ecore_hw_remove(&ha->cdev); 980 981 qlnx_del_cdev(ha); 982 983 if (ha->ifp != NULL) 984 ether_ifdetach(ha->ifp); 985 986 qlnx_free_tx_dma_tag(ha); 987 988 qlnx_free_rx_dma_tag(ha); 989 990 qlnx_free_parent_dma_tag(ha); 991 992 for (i = 0; i < ha->num_rss; i++) { 993 struct qlnx_fastpath *fp = &ha->fp_array[i]; 994 995 if (ha->irq_vec[i].handle) { 996 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 997 ha->irq_vec[i].handle); 998 } 999 1000 if (ha->irq_vec[i].irq) { 1001 (void)bus_release_resource(dev, SYS_RES_IRQ, 1002 ha->irq_vec[i].irq_rid, 1003 ha->irq_vec[i].irq); 1004 } 1005 1006 qlnx_free_tx_br(ha, fp); 1007 } 1008 qlnx_destroy_fp_taskqueues(ha); 1009 1010 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1011 if (ha->sp_handle[i]) 1012 (void)bus_teardown_intr(dev, ha->sp_irq[i], 1013 ha->sp_handle[i]); 1014 1015 if (ha->sp_irq[i]) 1016 (void) bus_release_resource(dev, SYS_RES_IRQ, 1017 ha->sp_irq_rid[i], ha->sp_irq[i]); 1018 } 1019 1020 qlnx_destroy_sp_taskqueues(ha); 1021 1022 if (ha->msix_count) 1023 pci_release_msi(dev); 1024 1025 if (ha->flags.lock_init) { 1026 mtx_destroy(&ha->hw_lock); 1027 } 1028 1029 if (ha->pci_reg) 1030 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 1031 ha->pci_reg); 1032 1033 if (ha->pci_dbells) 1034 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid, 1035 ha->pci_dbells); 1036 1037 if (ha->msix_bar) 1038 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid, 1039 ha->msix_bar); 1040 1041 QL_DPRINT2(ha, "exit\n"); 1042 return; 1043 } 1044 1045 static void 1046 qlnx_trigger_dump(qlnx_host_t *ha) 1047 { 1048 int i; 1049 1050 if (ha->ifp != NULL) 1051 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 1052 1053 QL_DPRINT2(ha, "enter\n"); 1054 1055 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1056 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i); 1057 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i); 1058 } 1059 1060 QL_DPRINT2(ha, "exit\n"); 1061 1062 return; 1063 } 1064 1065 static int 1066 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS) 1067 { 1068 int err, ret = 0; 1069 qlnx_host_t *ha; 1070 1071 err = sysctl_handle_int(oidp, &ret, 0, req); 1072 1073 if (err || !req->newptr) 1074 return (err); 1075 1076 if (ret == 1) { 1077 ha = (qlnx_host_t *)arg1; 1078 qlnx_trigger_dump(ha); 1079 } 1080 return (err); 1081 } 1082 1083 static int 1084 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS) 1085 { 1086 int err, i, ret = 0, usecs = 0; 1087 qlnx_host_t *ha; 1088 struct ecore_hwfn *p_hwfn; 1089 struct qlnx_fastpath *fp; 1090 1091 err = sysctl_handle_int(oidp, &usecs, 0, req); 1092 1093 if (err || !req->newptr || !usecs || (usecs > 255)) 1094 return (err); 1095 1096 ha = (qlnx_host_t *)arg1; 1097 1098 for (i = 0; i < ha->num_rss; i++) { 1099 1100 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1101 1102 fp = &ha->fp_array[i]; 1103 1104 if (fp->txq[0]->handle != NULL) { 1105 ret = ecore_set_queue_coalesce(p_hwfn, 0, 1106 (uint16_t)usecs, fp->txq[0]->handle); 1107 } 1108 } 1109 1110 if (!ret) 1111 ha->tx_coalesce_usecs = (uint8_t)usecs; 1112 1113 return (err); 1114 } 1115 1116 static int 1117 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS) 1118 { 1119 int err, i, ret = 0, usecs = 0; 1120 qlnx_host_t *ha; 1121 struct ecore_hwfn *p_hwfn; 1122 struct qlnx_fastpath *fp; 1123 1124 err = sysctl_handle_int(oidp, &usecs, 0, req); 1125 1126 if (err || !req->newptr || !usecs || (usecs > 255)) 1127 return (err); 1128 1129 ha = (qlnx_host_t *)arg1; 1130 1131 for (i = 0; i < ha->num_rss; i++) { 1132 1133 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1134 1135 fp = &ha->fp_array[i]; 1136 1137 if (fp->rxq->handle != NULL) { 1138 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs, 1139 0, fp->rxq->handle); 1140 } 1141 } 1142 1143 if (!ret) 1144 ha->rx_coalesce_usecs = (uint8_t)usecs; 1145 1146 return (err); 1147 } 1148 1149 static void 1150 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha) 1151 { 1152 struct sysctl_ctx_list *ctx; 1153 struct sysctl_oid_list *children; 1154 struct sysctl_oid *ctx_oid; 1155 1156 ctx = device_get_sysctl_ctx(ha->pci_dev); 1157 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1158 1159 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat", 1160 CTLFLAG_RD, NULL, "spstat"); 1161 children = SYSCTL_CHILDREN(ctx_oid); 1162 1163 SYSCTL_ADD_QUAD(ctx, children, 1164 OID_AUTO, "sp_interrupts", 1165 CTLFLAG_RD, &ha->sp_interrupts, 1166 "No. of slowpath interrupts"); 1167 1168 return; 1169 } 1170 1171 static void 1172 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha) 1173 { 1174 struct sysctl_ctx_list *ctx; 1175 struct sysctl_oid_list *children; 1176 struct sysctl_oid_list *node_children; 1177 struct sysctl_oid *ctx_oid; 1178 int i, j; 1179 uint8_t name_str[16]; 1180 1181 ctx = device_get_sysctl_ctx(ha->pci_dev); 1182 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1183 1184 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat", 1185 CTLFLAG_RD, NULL, "fpstat"); 1186 children = SYSCTL_CHILDREN(ctx_oid); 1187 1188 for (i = 0; i < ha->num_rss; i++) { 1189 1190 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1191 snprintf(name_str, sizeof(name_str), "%d", i); 1192 1193 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 1194 CTLFLAG_RD, NULL, name_str); 1195 node_children = SYSCTL_CHILDREN(ctx_oid); 1196 1197 /* Tx Related */ 1198 1199 SYSCTL_ADD_QUAD(ctx, node_children, 1200 OID_AUTO, "tx_pkts_processed", 1201 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed, 1202 "No. of packets processed for transmission"); 1203 1204 SYSCTL_ADD_QUAD(ctx, node_children, 1205 OID_AUTO, "tx_pkts_freed", 1206 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed, 1207 "No. of freed packets"); 1208 1209 SYSCTL_ADD_QUAD(ctx, node_children, 1210 OID_AUTO, "tx_pkts_transmitted", 1211 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted, 1212 "No. of transmitted packets"); 1213 1214 SYSCTL_ADD_QUAD(ctx, node_children, 1215 OID_AUTO, "tx_pkts_completed", 1216 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed, 1217 "No. of transmit completions"); 1218 1219 SYSCTL_ADD_QUAD(ctx, node_children, 1220 OID_AUTO, "tx_lso_wnd_min_len", 1221 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len, 1222 "tx_lso_wnd_min_len"); 1223 1224 SYSCTL_ADD_QUAD(ctx, node_children, 1225 OID_AUTO, "tx_defrag", 1226 CTLFLAG_RD, &ha->fp_array[i].tx_defrag, 1227 "tx_defrag"); 1228 1229 SYSCTL_ADD_QUAD(ctx, node_children, 1230 OID_AUTO, "tx_nsegs_gt_elem_left", 1231 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left, 1232 "tx_nsegs_gt_elem_left"); 1233 1234 SYSCTL_ADD_UINT(ctx, node_children, 1235 OID_AUTO, "tx_tso_max_nsegs", 1236 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs, 1237 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs"); 1238 1239 SYSCTL_ADD_UINT(ctx, node_children, 1240 OID_AUTO, "tx_tso_min_nsegs", 1241 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs, 1242 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs"); 1243 1244 SYSCTL_ADD_UINT(ctx, node_children, 1245 OID_AUTO, "tx_tso_max_pkt_len", 1246 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len, 1247 ha->fp_array[i].tx_tso_max_pkt_len, 1248 "tx_tso_max_pkt_len"); 1249 1250 SYSCTL_ADD_UINT(ctx, node_children, 1251 OID_AUTO, "tx_tso_min_pkt_len", 1252 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len, 1253 ha->fp_array[i].tx_tso_min_pkt_len, 1254 "tx_tso_min_pkt_len"); 1255 1256 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) { 1257 1258 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1259 snprintf(name_str, sizeof(name_str), 1260 "tx_pkts_nseg_%02d", (j+1)); 1261 1262 SYSCTL_ADD_QUAD(ctx, node_children, 1263 OID_AUTO, name_str, CTLFLAG_RD, 1264 &ha->fp_array[i].tx_pkts[j], name_str); 1265 } 1266 1267 SYSCTL_ADD_QUAD(ctx, node_children, 1268 OID_AUTO, "err_tx_nsegs_gt_elem_left", 1269 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left, 1270 "err_tx_nsegs_gt_elem_left"); 1271 1272 SYSCTL_ADD_QUAD(ctx, node_children, 1273 OID_AUTO, "err_tx_dmamap_create", 1274 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create, 1275 "err_tx_dmamap_create"); 1276 1277 SYSCTL_ADD_QUAD(ctx, node_children, 1278 OID_AUTO, "err_tx_defrag_dmamap_load", 1279 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load, 1280 "err_tx_defrag_dmamap_load"); 1281 1282 SYSCTL_ADD_QUAD(ctx, node_children, 1283 OID_AUTO, "err_tx_non_tso_max_seg", 1284 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg, 1285 "err_tx_non_tso_max_seg"); 1286 1287 SYSCTL_ADD_QUAD(ctx, node_children, 1288 OID_AUTO, "err_tx_dmamap_load", 1289 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load, 1290 "err_tx_dmamap_load"); 1291 1292 SYSCTL_ADD_QUAD(ctx, node_children, 1293 OID_AUTO, "err_tx_defrag", 1294 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag, 1295 "err_tx_defrag"); 1296 1297 SYSCTL_ADD_QUAD(ctx, node_children, 1298 OID_AUTO, "err_tx_free_pkt_null", 1299 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null, 1300 "err_tx_free_pkt_null"); 1301 1302 SYSCTL_ADD_QUAD(ctx, node_children, 1303 OID_AUTO, "err_tx_cons_idx_conflict", 1304 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict, 1305 "err_tx_cons_idx_conflict"); 1306 1307 SYSCTL_ADD_QUAD(ctx, node_children, 1308 OID_AUTO, "lro_cnt_64", 1309 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64, 1310 "lro_cnt_64"); 1311 1312 SYSCTL_ADD_QUAD(ctx, node_children, 1313 OID_AUTO, "lro_cnt_128", 1314 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128, 1315 "lro_cnt_128"); 1316 1317 SYSCTL_ADD_QUAD(ctx, node_children, 1318 OID_AUTO, "lro_cnt_256", 1319 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256, 1320 "lro_cnt_256"); 1321 1322 SYSCTL_ADD_QUAD(ctx, node_children, 1323 OID_AUTO, "lro_cnt_512", 1324 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512, 1325 "lro_cnt_512"); 1326 1327 SYSCTL_ADD_QUAD(ctx, node_children, 1328 OID_AUTO, "lro_cnt_1024", 1329 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024, 1330 "lro_cnt_1024"); 1331 1332 /* Rx Related */ 1333 1334 SYSCTL_ADD_QUAD(ctx, node_children, 1335 OID_AUTO, "rx_pkts", 1336 CTLFLAG_RD, &ha->fp_array[i].rx_pkts, 1337 "No. of received packets"); 1338 1339 SYSCTL_ADD_QUAD(ctx, node_children, 1340 OID_AUTO, "tpa_start", 1341 CTLFLAG_RD, &ha->fp_array[i].tpa_start, 1342 "No. of tpa_start packets"); 1343 1344 SYSCTL_ADD_QUAD(ctx, node_children, 1345 OID_AUTO, "tpa_cont", 1346 CTLFLAG_RD, &ha->fp_array[i].tpa_cont, 1347 "No. of tpa_cont packets"); 1348 1349 SYSCTL_ADD_QUAD(ctx, node_children, 1350 OID_AUTO, "tpa_end", 1351 CTLFLAG_RD, &ha->fp_array[i].tpa_end, 1352 "No. of tpa_end packets"); 1353 1354 SYSCTL_ADD_QUAD(ctx, node_children, 1355 OID_AUTO, "err_m_getcl", 1356 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl, 1357 "err_m_getcl"); 1358 1359 SYSCTL_ADD_QUAD(ctx, node_children, 1360 OID_AUTO, "err_m_getjcl", 1361 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl, 1362 "err_m_getjcl"); 1363 1364 SYSCTL_ADD_QUAD(ctx, node_children, 1365 OID_AUTO, "err_rx_hw_errors", 1366 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors, 1367 "err_rx_hw_errors"); 1368 1369 SYSCTL_ADD_QUAD(ctx, node_children, 1370 OID_AUTO, "err_rx_alloc_errors", 1371 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors, 1372 "err_rx_alloc_errors"); 1373 } 1374 1375 return; 1376 } 1377 1378 static void 1379 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha) 1380 { 1381 struct sysctl_ctx_list *ctx; 1382 struct sysctl_oid_list *children; 1383 struct sysctl_oid *ctx_oid; 1384 1385 ctx = device_get_sysctl_ctx(ha->pci_dev); 1386 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1387 1388 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat", 1389 CTLFLAG_RD, NULL, "hwstat"); 1390 children = SYSCTL_CHILDREN(ctx_oid); 1391 1392 SYSCTL_ADD_QUAD(ctx, children, 1393 OID_AUTO, "no_buff_discards", 1394 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards, 1395 "No. of packets discarded due to lack of buffer"); 1396 1397 SYSCTL_ADD_QUAD(ctx, children, 1398 OID_AUTO, "packet_too_big_discard", 1399 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard, 1400 "No. of packets discarded because packet was too big"); 1401 1402 SYSCTL_ADD_QUAD(ctx, children, 1403 OID_AUTO, "ttl0_discard", 1404 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard, 1405 "ttl0_discard"); 1406 1407 SYSCTL_ADD_QUAD(ctx, children, 1408 OID_AUTO, "rx_ucast_bytes", 1409 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes, 1410 "rx_ucast_bytes"); 1411 1412 SYSCTL_ADD_QUAD(ctx, children, 1413 OID_AUTO, "rx_mcast_bytes", 1414 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes, 1415 "rx_mcast_bytes"); 1416 1417 SYSCTL_ADD_QUAD(ctx, children, 1418 OID_AUTO, "rx_bcast_bytes", 1419 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes, 1420 "rx_bcast_bytes"); 1421 1422 SYSCTL_ADD_QUAD(ctx, children, 1423 OID_AUTO, "rx_ucast_pkts", 1424 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts, 1425 "rx_ucast_pkts"); 1426 1427 SYSCTL_ADD_QUAD(ctx, children, 1428 OID_AUTO, "rx_mcast_pkts", 1429 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts, 1430 "rx_mcast_pkts"); 1431 1432 SYSCTL_ADD_QUAD(ctx, children, 1433 OID_AUTO, "rx_bcast_pkts", 1434 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts, 1435 "rx_bcast_pkts"); 1436 1437 SYSCTL_ADD_QUAD(ctx, children, 1438 OID_AUTO, "mftag_filter_discards", 1439 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards, 1440 "mftag_filter_discards"); 1441 1442 SYSCTL_ADD_QUAD(ctx, children, 1443 OID_AUTO, "mac_filter_discards", 1444 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards, 1445 "mac_filter_discards"); 1446 1447 SYSCTL_ADD_QUAD(ctx, children, 1448 OID_AUTO, "tx_ucast_bytes", 1449 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes, 1450 "tx_ucast_bytes"); 1451 1452 SYSCTL_ADD_QUAD(ctx, children, 1453 OID_AUTO, "tx_mcast_bytes", 1454 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes, 1455 "tx_mcast_bytes"); 1456 1457 SYSCTL_ADD_QUAD(ctx, children, 1458 OID_AUTO, "tx_bcast_bytes", 1459 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes, 1460 "tx_bcast_bytes"); 1461 1462 SYSCTL_ADD_QUAD(ctx, children, 1463 OID_AUTO, "tx_ucast_pkts", 1464 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts, 1465 "tx_ucast_pkts"); 1466 1467 SYSCTL_ADD_QUAD(ctx, children, 1468 OID_AUTO, "tx_mcast_pkts", 1469 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts, 1470 "tx_mcast_pkts"); 1471 1472 SYSCTL_ADD_QUAD(ctx, children, 1473 OID_AUTO, "tx_bcast_pkts", 1474 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts, 1475 "tx_bcast_pkts"); 1476 1477 SYSCTL_ADD_QUAD(ctx, children, 1478 OID_AUTO, "tx_err_drop_pkts", 1479 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts, 1480 "tx_err_drop_pkts"); 1481 1482 SYSCTL_ADD_QUAD(ctx, children, 1483 OID_AUTO, "tpa_coalesced_pkts", 1484 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts, 1485 "tpa_coalesced_pkts"); 1486 1487 SYSCTL_ADD_QUAD(ctx, children, 1488 OID_AUTO, "tpa_coalesced_events", 1489 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events, 1490 "tpa_coalesced_events"); 1491 1492 SYSCTL_ADD_QUAD(ctx, children, 1493 OID_AUTO, "tpa_aborts_num", 1494 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num, 1495 "tpa_aborts_num"); 1496 1497 SYSCTL_ADD_QUAD(ctx, children, 1498 OID_AUTO, "tpa_not_coalesced_pkts", 1499 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts, 1500 "tpa_not_coalesced_pkts"); 1501 1502 SYSCTL_ADD_QUAD(ctx, children, 1503 OID_AUTO, "tpa_coalesced_bytes", 1504 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes, 1505 "tpa_coalesced_bytes"); 1506 1507 SYSCTL_ADD_QUAD(ctx, children, 1508 OID_AUTO, "rx_64_byte_packets", 1509 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets, 1510 "rx_64_byte_packets"); 1511 1512 SYSCTL_ADD_QUAD(ctx, children, 1513 OID_AUTO, "rx_65_to_127_byte_packets", 1514 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets, 1515 "rx_65_to_127_byte_packets"); 1516 1517 SYSCTL_ADD_QUAD(ctx, children, 1518 OID_AUTO, "rx_128_to_255_byte_packets", 1519 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets, 1520 "rx_128_to_255_byte_packets"); 1521 1522 SYSCTL_ADD_QUAD(ctx, children, 1523 OID_AUTO, "rx_256_to_511_byte_packets", 1524 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets, 1525 "rx_256_to_511_byte_packets"); 1526 1527 SYSCTL_ADD_QUAD(ctx, children, 1528 OID_AUTO, "rx_512_to_1023_byte_packets", 1529 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets, 1530 "rx_512_to_1023_byte_packets"); 1531 1532 SYSCTL_ADD_QUAD(ctx, children, 1533 OID_AUTO, "rx_1024_to_1518_byte_packets", 1534 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets, 1535 "rx_1024_to_1518_byte_packets"); 1536 1537 SYSCTL_ADD_QUAD(ctx, children, 1538 OID_AUTO, "rx_1519_to_1522_byte_packets", 1539 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets, 1540 "rx_1519_to_1522_byte_packets"); 1541 1542 SYSCTL_ADD_QUAD(ctx, children, 1543 OID_AUTO, "rx_1523_to_2047_byte_packets", 1544 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets, 1545 "rx_1523_to_2047_byte_packets"); 1546 1547 SYSCTL_ADD_QUAD(ctx, children, 1548 OID_AUTO, "rx_2048_to_4095_byte_packets", 1549 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets, 1550 "rx_2048_to_4095_byte_packets"); 1551 1552 SYSCTL_ADD_QUAD(ctx, children, 1553 OID_AUTO, "rx_4096_to_9216_byte_packets", 1554 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets, 1555 "rx_4096_to_9216_byte_packets"); 1556 1557 SYSCTL_ADD_QUAD(ctx, children, 1558 OID_AUTO, "rx_9217_to_16383_byte_packets", 1559 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets, 1560 "rx_9217_to_16383_byte_packets"); 1561 1562 SYSCTL_ADD_QUAD(ctx, children, 1563 OID_AUTO, "rx_crc_errors", 1564 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors, 1565 "rx_crc_errors"); 1566 1567 SYSCTL_ADD_QUAD(ctx, children, 1568 OID_AUTO, "rx_mac_crtl_frames", 1569 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames, 1570 "rx_mac_crtl_frames"); 1571 1572 SYSCTL_ADD_QUAD(ctx, children, 1573 OID_AUTO, "rx_pause_frames", 1574 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames, 1575 "rx_pause_frames"); 1576 1577 SYSCTL_ADD_QUAD(ctx, children, 1578 OID_AUTO, "rx_pfc_frames", 1579 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames, 1580 "rx_pfc_frames"); 1581 1582 SYSCTL_ADD_QUAD(ctx, children, 1583 OID_AUTO, "rx_align_errors", 1584 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors, 1585 "rx_align_errors"); 1586 1587 SYSCTL_ADD_QUAD(ctx, children, 1588 OID_AUTO, "rx_carrier_errors", 1589 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors, 1590 "rx_carrier_errors"); 1591 1592 SYSCTL_ADD_QUAD(ctx, children, 1593 OID_AUTO, "rx_oversize_packets", 1594 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets, 1595 "rx_oversize_packets"); 1596 1597 SYSCTL_ADD_QUAD(ctx, children, 1598 OID_AUTO, "rx_jabbers", 1599 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers, 1600 "rx_jabbers"); 1601 1602 SYSCTL_ADD_QUAD(ctx, children, 1603 OID_AUTO, "rx_undersize_packets", 1604 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets, 1605 "rx_undersize_packets"); 1606 1607 SYSCTL_ADD_QUAD(ctx, children, 1608 OID_AUTO, "rx_fragments", 1609 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments, 1610 "rx_fragments"); 1611 1612 SYSCTL_ADD_QUAD(ctx, children, 1613 OID_AUTO, "tx_64_byte_packets", 1614 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets, 1615 "tx_64_byte_packets"); 1616 1617 SYSCTL_ADD_QUAD(ctx, children, 1618 OID_AUTO, "tx_65_to_127_byte_packets", 1619 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets, 1620 "tx_65_to_127_byte_packets"); 1621 1622 SYSCTL_ADD_QUAD(ctx, children, 1623 OID_AUTO, "tx_128_to_255_byte_packets", 1624 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets, 1625 "tx_128_to_255_byte_packets"); 1626 1627 SYSCTL_ADD_QUAD(ctx, children, 1628 OID_AUTO, "tx_256_to_511_byte_packets", 1629 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets, 1630 "tx_256_to_511_byte_packets"); 1631 1632 SYSCTL_ADD_QUAD(ctx, children, 1633 OID_AUTO, "tx_512_to_1023_byte_packets", 1634 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets, 1635 "tx_512_to_1023_byte_packets"); 1636 1637 SYSCTL_ADD_QUAD(ctx, children, 1638 OID_AUTO, "tx_1024_to_1518_byte_packets", 1639 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets, 1640 "tx_1024_to_1518_byte_packets"); 1641 1642 SYSCTL_ADD_QUAD(ctx, children, 1643 OID_AUTO, "tx_1519_to_2047_byte_packets", 1644 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets, 1645 "tx_1519_to_2047_byte_packets"); 1646 1647 SYSCTL_ADD_QUAD(ctx, children, 1648 OID_AUTO, "tx_2048_to_4095_byte_packets", 1649 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets, 1650 "tx_2048_to_4095_byte_packets"); 1651 1652 SYSCTL_ADD_QUAD(ctx, children, 1653 OID_AUTO, "tx_4096_to_9216_byte_packets", 1654 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets, 1655 "tx_4096_to_9216_byte_packets"); 1656 1657 SYSCTL_ADD_QUAD(ctx, children, 1658 OID_AUTO, "tx_9217_to_16383_byte_packets", 1659 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets, 1660 "tx_9217_to_16383_byte_packets"); 1661 1662 SYSCTL_ADD_QUAD(ctx, children, 1663 OID_AUTO, "tx_pause_frames", 1664 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames, 1665 "tx_pause_frames"); 1666 1667 SYSCTL_ADD_QUAD(ctx, children, 1668 OID_AUTO, "tx_pfc_frames", 1669 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames, 1670 "tx_pfc_frames"); 1671 1672 SYSCTL_ADD_QUAD(ctx, children, 1673 OID_AUTO, "tx_lpi_entry_count", 1674 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count, 1675 "tx_lpi_entry_count"); 1676 1677 SYSCTL_ADD_QUAD(ctx, children, 1678 OID_AUTO, "tx_total_collisions", 1679 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions, 1680 "tx_total_collisions"); 1681 1682 SYSCTL_ADD_QUAD(ctx, children, 1683 OID_AUTO, "brb_truncates", 1684 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates, 1685 "brb_truncates"); 1686 1687 SYSCTL_ADD_QUAD(ctx, children, 1688 OID_AUTO, "brb_discards", 1689 CTLFLAG_RD, &ha->hw_stats.common.brb_discards, 1690 "brb_discards"); 1691 1692 SYSCTL_ADD_QUAD(ctx, children, 1693 OID_AUTO, "rx_mac_bytes", 1694 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes, 1695 "rx_mac_bytes"); 1696 1697 SYSCTL_ADD_QUAD(ctx, children, 1698 OID_AUTO, "rx_mac_uc_packets", 1699 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets, 1700 "rx_mac_uc_packets"); 1701 1702 SYSCTL_ADD_QUAD(ctx, children, 1703 OID_AUTO, "rx_mac_mc_packets", 1704 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets, 1705 "rx_mac_mc_packets"); 1706 1707 SYSCTL_ADD_QUAD(ctx, children, 1708 OID_AUTO, "rx_mac_bc_packets", 1709 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets, 1710 "rx_mac_bc_packets"); 1711 1712 SYSCTL_ADD_QUAD(ctx, children, 1713 OID_AUTO, "rx_mac_frames_ok", 1714 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok, 1715 "rx_mac_frames_ok"); 1716 1717 SYSCTL_ADD_QUAD(ctx, children, 1718 OID_AUTO, "tx_mac_bytes", 1719 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes, 1720 "tx_mac_bytes"); 1721 1722 SYSCTL_ADD_QUAD(ctx, children, 1723 OID_AUTO, "tx_mac_uc_packets", 1724 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets, 1725 "tx_mac_uc_packets"); 1726 1727 SYSCTL_ADD_QUAD(ctx, children, 1728 OID_AUTO, "tx_mac_mc_packets", 1729 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets, 1730 "tx_mac_mc_packets"); 1731 1732 SYSCTL_ADD_QUAD(ctx, children, 1733 OID_AUTO, "tx_mac_bc_packets", 1734 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets, 1735 "tx_mac_bc_packets"); 1736 1737 SYSCTL_ADD_QUAD(ctx, children, 1738 OID_AUTO, "tx_mac_ctrl_frames", 1739 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames, 1740 "tx_mac_ctrl_frames"); 1741 return; 1742 } 1743 1744 static void 1745 qlnx_add_sysctls(qlnx_host_t *ha) 1746 { 1747 device_t dev = ha->pci_dev; 1748 struct sysctl_ctx_list *ctx; 1749 struct sysctl_oid_list *children; 1750 1751 ctx = device_get_sysctl_ctx(dev); 1752 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 1753 1754 qlnx_add_fp_stats_sysctls(ha); 1755 qlnx_add_sp_stats_sysctls(ha); 1756 qlnx_add_hw_stats_sysctls(ha); 1757 1758 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version", 1759 CTLFLAG_RD, qlnx_ver_str, 0, 1760 "Driver Version"); 1761 1762 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version", 1763 CTLFLAG_RD, ha->stormfw_ver, 0, 1764 "STORM Firmware Version"); 1765 1766 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version", 1767 CTLFLAG_RD, ha->mfw_ver, 0, 1768 "Management Firmware Version"); 1769 1770 SYSCTL_ADD_UINT(ctx, children, 1771 OID_AUTO, "personality", CTLFLAG_RD, 1772 &ha->personality, ha->personality, 1773 "\tpersonality = 0 => Ethernet Only\n" 1774 "\tpersonality = 3 => Ethernet and RoCE\n" 1775 "\tpersonality = 4 => Ethernet and iWARP\n" 1776 "\tpersonality = 6 => Default in Shared Memory\n"); 1777 1778 ha->dbg_level = 0; 1779 SYSCTL_ADD_UINT(ctx, children, 1780 OID_AUTO, "debug", CTLFLAG_RW, 1781 &ha->dbg_level, ha->dbg_level, "Debug Level"); 1782 1783 ha->dp_level = 0x01; 1784 SYSCTL_ADD_UINT(ctx, children, 1785 OID_AUTO, "dp_level", CTLFLAG_RW, 1786 &ha->dp_level, ha->dp_level, "DP Level"); 1787 1788 ha->dbg_trace_lro_cnt = 0; 1789 SYSCTL_ADD_UINT(ctx, children, 1790 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW, 1791 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt, 1792 "Trace LRO Counts"); 1793 1794 ha->dbg_trace_tso_pkt_len = 0; 1795 SYSCTL_ADD_UINT(ctx, children, 1796 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW, 1797 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len, 1798 "Trace TSO packet lengths"); 1799 1800 ha->dp_module = 0; 1801 SYSCTL_ADD_UINT(ctx, children, 1802 OID_AUTO, "dp_module", CTLFLAG_RW, 1803 &ha->dp_module, ha->dp_module, "DP Module"); 1804 1805 ha->err_inject = 0; 1806 1807 SYSCTL_ADD_UINT(ctx, children, 1808 OID_AUTO, "err_inject", CTLFLAG_RW, 1809 &ha->err_inject, ha->err_inject, "Error Inject"); 1810 1811 ha->storm_stats_enable = 0; 1812 1813 SYSCTL_ADD_UINT(ctx, children, 1814 OID_AUTO, "storm_stats_enable", CTLFLAG_RW, 1815 &ha->storm_stats_enable, ha->storm_stats_enable, 1816 "Enable Storm Statistics Gathering"); 1817 1818 ha->storm_stats_index = 0; 1819 1820 SYSCTL_ADD_UINT(ctx, children, 1821 OID_AUTO, "storm_stats_index", CTLFLAG_RD, 1822 &ha->storm_stats_index, ha->storm_stats_index, 1823 "Enable Storm Statistics Gathering Current Index"); 1824 1825 ha->grcdump_taken = 0; 1826 SYSCTL_ADD_UINT(ctx, children, 1827 OID_AUTO, "grcdump_taken", CTLFLAG_RD, 1828 &ha->grcdump_taken, ha->grcdump_taken, "grcdump_taken"); 1829 1830 ha->idle_chk_taken = 0; 1831 SYSCTL_ADD_UINT(ctx, children, 1832 OID_AUTO, "idle_chk_taken", CTLFLAG_RD, 1833 &ha->idle_chk_taken, ha->idle_chk_taken, "idle_chk_taken"); 1834 1835 SYSCTL_ADD_UINT(ctx, children, 1836 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD, 1837 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs, 1838 "rx_coalesce_usecs"); 1839 1840 SYSCTL_ADD_UINT(ctx, children, 1841 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD, 1842 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs, 1843 "tx_coalesce_usecs"); 1844 1845 ha->rx_pkt_threshold = 128; 1846 SYSCTL_ADD_UINT(ctx, children, 1847 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW, 1848 &ha->rx_pkt_threshold, ha->rx_pkt_threshold, 1849 "No. of Rx Pkts to process at a time"); 1850 1851 ha->rx_jumbo_buf_eq_mtu = 0; 1852 SYSCTL_ADD_UINT(ctx, children, 1853 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW, 1854 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu, 1855 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n" 1856 "otherwise Rx Jumbo buffers are set to >= MTU size\n"); 1857 1858 SYSCTL_ADD_PROC(ctx, children, 1859 OID_AUTO, "trigger_dump", CTLTYPE_INT | CTLFLAG_RW, 1860 (void *)ha, 0, 1861 qlnx_trigger_dump_sysctl, "I", "trigger_dump"); 1862 1863 SYSCTL_ADD_PROC(ctx, children, 1864 OID_AUTO, "set_rx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW, 1865 (void *)ha, 0, 1866 qlnx_set_rx_coalesce, "I", 1867 "rx interrupt coalesce period microseconds"); 1868 1869 SYSCTL_ADD_PROC(ctx, children, 1870 OID_AUTO, "set_tx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW, 1871 (void *)ha, 0, 1872 qlnx_set_tx_coalesce, "I", 1873 "tx interrupt coalesce period microseconds"); 1874 1875 SYSCTL_ADD_QUAD(ctx, children, 1876 OID_AUTO, "err_illegal_intr", CTLFLAG_RD, 1877 &ha->err_illegal_intr, "err_illegal_intr"); 1878 1879 SYSCTL_ADD_QUAD(ctx, children, 1880 OID_AUTO, "err_fp_null", CTLFLAG_RD, 1881 &ha->err_fp_null, "err_fp_null"); 1882 1883 SYSCTL_ADD_QUAD(ctx, children, 1884 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD, 1885 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type"); 1886 return; 1887 } 1888 1889 1890 1891 /***************************************************************************** 1892 * Operating System Network Interface Functions 1893 *****************************************************************************/ 1894 1895 static void 1896 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) 1897 { 1898 uint16_t device_id; 1899 struct ifnet *ifp; 1900 1901 ifp = ha->ifp = if_alloc(IFT_ETHER); 1902 1903 if (ifp == NULL) 1904 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 1905 1906 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 1907 1908 device_id = pci_get_device(ha->pci_dev); 1909 1910 #if __FreeBSD_version >= 1000000 1911 1912 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) 1913 ifp->if_baudrate = IF_Gbps(40); 1914 else if (device_id == QLOGIC_PCI_DEVICE_ID_1656) 1915 ifp->if_baudrate = IF_Gbps(25); 1916 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) 1917 ifp->if_baudrate = IF_Gbps(50); 1918 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) 1919 ifp->if_baudrate = IF_Gbps(100); 1920 1921 ifp->if_capabilities = IFCAP_LINKSTATE; 1922 #else 1923 ifp->if_mtu = ETHERMTU; 1924 ifp->if_baudrate = (1 * 1000 * 1000 *1000); 1925 1926 #endif /* #if __FreeBSD_version >= 1000000 */ 1927 1928 ifp->if_init = qlnx_init; 1929 ifp->if_softc = ha; 1930 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1931 ifp->if_ioctl = qlnx_ioctl; 1932 ifp->if_transmit = qlnx_transmit; 1933 ifp->if_qflush = qlnx_qflush; 1934 1935 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha)); 1936 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha); 1937 IFQ_SET_READY(&ifp->if_snd); 1938 1939 #if __FreeBSD_version >= 1100036 1940 if_setgetcounterfn(ifp, qlnx_get_counter); 1941 #endif 1942 1943 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 1944 1945 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN); 1946 ether_ifattach(ifp, ha->primary_mac); 1947 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN); 1948 1949 ifp->if_capabilities = IFCAP_HWCSUM; 1950 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 1951 1952 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1953 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1954 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 1955 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 1956 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 1957 ifp->if_capabilities |= IFCAP_TSO4; 1958 ifp->if_capabilities |= IFCAP_TSO6; 1959 ifp->if_capabilities |= IFCAP_LRO; 1960 1961 ifp->if_capenable = ifp->if_capabilities; 1962 1963 ifp->if_hwassist = CSUM_IP; 1964 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP; 1965 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; 1966 ifp->if_hwassist |= CSUM_TSO; 1967 1968 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 1969 1970 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\ 1971 qlnx_media_status); 1972 1973 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) { 1974 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL); 1975 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL); 1976 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL); 1977 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1656) { 1978 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL); 1979 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL); 1980 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) { 1981 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL); 1982 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL); 1983 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) { 1984 ifmedia_add(&ha->media, 1985 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL); 1986 ifmedia_add(&ha->media, 1987 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL); 1988 ifmedia_add(&ha->media, 1989 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL); 1990 } 1991 1992 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL); 1993 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 1994 1995 1996 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 1997 1998 QL_DPRINT2(ha, "exit\n"); 1999 2000 return; 2001 } 2002 2003 static void 2004 qlnx_init_locked(qlnx_host_t *ha) 2005 { 2006 struct ifnet *ifp = ha->ifp; 2007 2008 QL_DPRINT1(ha, "Driver Initialization start \n"); 2009 2010 qlnx_stop(ha); 2011 2012 if (qlnx_load(ha) == 0) { 2013 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2014 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2015 } 2016 2017 return; 2018 } 2019 2020 static void 2021 qlnx_init(void *arg) 2022 { 2023 qlnx_host_t *ha; 2024 2025 ha = (qlnx_host_t *)arg; 2026 2027 QL_DPRINT2(ha, "enter\n"); 2028 2029 QLNX_LOCK(ha); 2030 qlnx_init_locked(ha); 2031 QLNX_UNLOCK(ha); 2032 2033 QL_DPRINT2(ha, "exit\n"); 2034 2035 return; 2036 } 2037 2038 static int 2039 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac) 2040 { 2041 struct ecore_filter_mcast *mcast; 2042 struct ecore_dev *cdev; 2043 int rc; 2044 2045 cdev = &ha->cdev; 2046 2047 mcast = &ha->ecore_mcast; 2048 bzero(mcast, sizeof(struct ecore_filter_mcast)); 2049 2050 if (add_mac) 2051 mcast->opcode = ECORE_FILTER_ADD; 2052 else 2053 mcast->opcode = ECORE_FILTER_REMOVE; 2054 2055 mcast->num_mc_addrs = 1; 2056 memcpy(mcast->mac, mac_addr, ETH_ALEN); 2057 2058 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 2059 2060 return (rc); 2061 } 2062 2063 static int 2064 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta) 2065 { 2066 int i; 2067 2068 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2069 2070 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) 2071 return 0; /* its been already added */ 2072 } 2073 2074 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2075 2076 if ((ha->mcast[i].addr[0] == 0) && 2077 (ha->mcast[i].addr[1] == 0) && 2078 (ha->mcast[i].addr[2] == 0) && 2079 (ha->mcast[i].addr[3] == 0) && 2080 (ha->mcast[i].addr[4] == 0) && 2081 (ha->mcast[i].addr[5] == 0)) { 2082 2083 if (qlnx_config_mcast_mac_addr(ha, mta, 1)) 2084 return (-1); 2085 2086 bcopy(mta, ha->mcast[i].addr, ETH_ALEN); 2087 ha->nmcast++; 2088 2089 return 0; 2090 } 2091 } 2092 return 0; 2093 } 2094 2095 static int 2096 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta) 2097 { 2098 int i; 2099 2100 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2101 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { 2102 2103 if (qlnx_config_mcast_mac_addr(ha, mta, 0)) 2104 return (-1); 2105 2106 ha->mcast[i].addr[0] = 0; 2107 ha->mcast[i].addr[1] = 0; 2108 ha->mcast[i].addr[2] = 0; 2109 ha->mcast[i].addr[3] = 0; 2110 ha->mcast[i].addr[4] = 0; 2111 ha->mcast[i].addr[5] = 0; 2112 2113 ha->nmcast--; 2114 2115 return 0; 2116 } 2117 } 2118 return 0; 2119 } 2120 2121 /* 2122 * Name: qls_hw_set_multi 2123 * Function: Sets the Multicast Addresses provided the host O.S into the 2124 * hardware (for the given interface) 2125 */ 2126 static void 2127 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 2128 uint32_t add_mac) 2129 { 2130 int i; 2131 2132 for (i = 0; i < mcnt; i++) { 2133 if (add_mac) { 2134 if (qlnx_hw_add_mcast(ha, mta)) 2135 break; 2136 } else { 2137 if (qlnx_hw_del_mcast(ha, mta)) 2138 break; 2139 } 2140 2141 mta += ETHER_HDR_LEN; 2142 } 2143 return; 2144 } 2145 2146 2147 #define QLNX_MCAST_ADDRS_SIZE (QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN) 2148 static int 2149 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi) 2150 { 2151 uint8_t mta[QLNX_MCAST_ADDRS_SIZE]; 2152 struct ifmultiaddr *ifma; 2153 int mcnt = 0; 2154 struct ifnet *ifp = ha->ifp; 2155 int ret = 0; 2156 2157 if_maddr_rlock(ifp); 2158 2159 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2160 2161 if (ifma->ifma_addr->sa_family != AF_LINK) 2162 continue; 2163 2164 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS) 2165 break; 2166 2167 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 2168 &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN); 2169 2170 mcnt++; 2171 } 2172 2173 if_maddr_runlock(ifp); 2174 2175 QLNX_LOCK(ha); 2176 qlnx_hw_set_multi(ha, mta, mcnt, add_multi); 2177 QLNX_UNLOCK(ha); 2178 2179 return (ret); 2180 } 2181 2182 static int 2183 qlnx_set_promisc(qlnx_host_t *ha) 2184 { 2185 int rc = 0; 2186 uint8_t filter; 2187 2188 filter = ha->filter; 2189 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2190 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 2191 2192 rc = qlnx_set_rx_accept_filter(ha, filter); 2193 return (rc); 2194 } 2195 2196 static int 2197 qlnx_set_allmulti(qlnx_host_t *ha) 2198 { 2199 int rc = 0; 2200 uint8_t filter; 2201 2202 filter = ha->filter; 2203 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2204 rc = qlnx_set_rx_accept_filter(ha, filter); 2205 2206 return (rc); 2207 } 2208 2209 2210 static int 2211 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2212 { 2213 int ret = 0, mask; 2214 struct ifreq *ifr = (struct ifreq *)data; 2215 struct ifaddr *ifa = (struct ifaddr *)data; 2216 qlnx_host_t *ha; 2217 2218 ha = (qlnx_host_t *)ifp->if_softc; 2219 2220 switch (cmd) { 2221 case SIOCSIFADDR: 2222 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd); 2223 2224 if (ifa->ifa_addr->sa_family == AF_INET) { 2225 ifp->if_flags |= IFF_UP; 2226 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2227 QLNX_LOCK(ha); 2228 qlnx_init_locked(ha); 2229 QLNX_UNLOCK(ha); 2230 } 2231 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 2232 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)); 2233 2234 arp_ifinit(ifp, ifa); 2235 } else { 2236 ether_ioctl(ifp, cmd, data); 2237 } 2238 break; 2239 2240 case SIOCSIFMTU: 2241 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd); 2242 2243 if (ifr->ifr_mtu > QLNX_MAX_MTU) { 2244 ret = EINVAL; 2245 } else { 2246 QLNX_LOCK(ha); 2247 ifp->if_mtu = ifr->ifr_mtu; 2248 ha->max_frame_size = 2249 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2250 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2251 qlnx_init_locked(ha); 2252 } 2253 2254 QLNX_UNLOCK(ha); 2255 } 2256 2257 break; 2258 2259 case SIOCSIFFLAGS: 2260 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd); 2261 2262 QLNX_LOCK(ha); 2263 2264 if (ifp->if_flags & IFF_UP) { 2265 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2266 if ((ifp->if_flags ^ ha->if_flags) & 2267 IFF_PROMISC) { 2268 ret = qlnx_set_promisc(ha); 2269 } else if ((ifp->if_flags ^ ha->if_flags) & 2270 IFF_ALLMULTI) { 2271 ret = qlnx_set_allmulti(ha); 2272 } 2273 } else { 2274 ha->max_frame_size = ifp->if_mtu + 2275 ETHER_HDR_LEN + ETHER_CRC_LEN; 2276 qlnx_init_locked(ha); 2277 } 2278 } else { 2279 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2280 qlnx_stop(ha); 2281 ha->if_flags = ifp->if_flags; 2282 } 2283 2284 QLNX_UNLOCK(ha); 2285 break; 2286 2287 case SIOCADDMULTI: 2288 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd); 2289 2290 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2291 if (qlnx_set_multi(ha, 1)) 2292 ret = EINVAL; 2293 } 2294 break; 2295 2296 case SIOCDELMULTI: 2297 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd); 2298 2299 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2300 if (qlnx_set_multi(ha, 0)) 2301 ret = EINVAL; 2302 } 2303 break; 2304 2305 case SIOCSIFMEDIA: 2306 case SIOCGIFMEDIA: 2307 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd); 2308 2309 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 2310 break; 2311 2312 case SIOCSIFCAP: 2313 2314 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2315 2316 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd); 2317 2318 if (mask & IFCAP_HWCSUM) 2319 ifp->if_capenable ^= IFCAP_HWCSUM; 2320 if (mask & IFCAP_TSO4) 2321 ifp->if_capenable ^= IFCAP_TSO4; 2322 if (mask & IFCAP_TSO6) 2323 ifp->if_capenable ^= IFCAP_TSO6; 2324 if (mask & IFCAP_VLAN_HWTAGGING) 2325 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2326 if (mask & IFCAP_VLAN_HWTSO) 2327 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2328 if (mask & IFCAP_LRO) 2329 ifp->if_capenable ^= IFCAP_LRO; 2330 2331 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 2332 qlnx_init(ha); 2333 2334 VLAN_CAPABILITIES(ifp); 2335 break; 2336 2337 #if (__FreeBSD_version >= 1100101) 2338 2339 case SIOCGI2C: 2340 { 2341 struct ifi2creq i2c; 2342 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 2343 struct ecore_ptt *p_ptt; 2344 2345 ret = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 2346 2347 if (ret) 2348 break; 2349 2350 if ((i2c.len > sizeof (i2c.data)) || 2351 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) { 2352 ret = EINVAL; 2353 break; 2354 } 2355 2356 p_ptt = ecore_ptt_acquire(p_hwfn); 2357 2358 if (!p_ptt) { 2359 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 2360 ret = -1; 2361 break; 2362 } 2363 2364 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt, 2365 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset, 2366 i2c.len, &i2c.data[0]); 2367 2368 ecore_ptt_release(p_hwfn, p_ptt); 2369 2370 if (ret) { 2371 ret = -1; 2372 break; 2373 } 2374 2375 ret = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 2376 2377 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \ 2378 len = %d addr = 0x%02x offset = 0x%04x \ 2379 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \ 2380 0x%02x 0x%02x 0x%02x\n", 2381 ret, i2c.len, i2c.dev_addr, i2c.offset, 2382 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3], 2383 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]); 2384 break; 2385 } 2386 #endif /* #if (__FreeBSD_version >= 1100101) */ 2387 2388 default: 2389 QL_DPRINT4(ha, "default (0x%lx)\n", cmd); 2390 ret = ether_ioctl(ifp, cmd, data); 2391 break; 2392 } 2393 2394 return (ret); 2395 } 2396 2397 static int 2398 qlnx_media_change(struct ifnet *ifp) 2399 { 2400 qlnx_host_t *ha; 2401 struct ifmedia *ifm; 2402 int ret = 0; 2403 2404 ha = (qlnx_host_t *)ifp->if_softc; 2405 2406 QL_DPRINT2(ha, "enter\n"); 2407 2408 ifm = &ha->media; 2409 2410 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2411 ret = EINVAL; 2412 2413 QL_DPRINT2(ha, "exit\n"); 2414 2415 return (ret); 2416 } 2417 2418 static void 2419 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2420 { 2421 qlnx_host_t *ha; 2422 2423 ha = (qlnx_host_t *)ifp->if_softc; 2424 2425 QL_DPRINT2(ha, "enter\n"); 2426 2427 ifmr->ifm_status = IFM_AVALID; 2428 ifmr->ifm_active = IFM_ETHER; 2429 2430 if (ha->link_up) { 2431 ifmr->ifm_status |= IFM_ACTIVE; 2432 ifmr->ifm_active |= 2433 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link)); 2434 2435 if (ha->if_link.link_partner_caps & 2436 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause)) 2437 ifmr->ifm_active |= 2438 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 2439 } 2440 2441 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down")); 2442 2443 return; 2444 } 2445 2446 2447 static void 2448 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2449 struct qlnx_tx_queue *txq) 2450 { 2451 u16 idx; 2452 struct mbuf *mp; 2453 bus_dmamap_t map; 2454 int i; 2455 struct eth_tx_bd *tx_data_bd; 2456 struct eth_tx_1st_bd *first_bd; 2457 int nbds = 0; 2458 2459 idx = txq->sw_tx_cons; 2460 mp = txq->sw_tx_ring[idx].mp; 2461 map = txq->sw_tx_ring[idx].map; 2462 2463 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){ 2464 2465 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL); 2466 2467 QL_DPRINT1(ha, "(mp == NULL) " 2468 " tx_idx = 0x%x" 2469 " ecore_prod_idx = 0x%x" 2470 " ecore_cons_idx = 0x%x" 2471 " hw_bd_cons = 0x%x" 2472 " txq_db_last = 0x%x" 2473 " elem_left = 0x%x\n", 2474 fp->rss_id, 2475 ecore_chain_get_prod_idx(&txq->tx_pbl), 2476 ecore_chain_get_cons_idx(&txq->tx_pbl), 2477 le16toh(*txq->hw_cons_ptr), 2478 txq->tx_db.raw, 2479 ecore_chain_get_elem_left(&txq->tx_pbl)); 2480 2481 fp->err_tx_free_pkt_null++; 2482 2483 //DEBUG 2484 qlnx_trigger_dump(ha); 2485 2486 return; 2487 } else { 2488 2489 QLNX_INC_OPACKETS((ha->ifp)); 2490 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len)); 2491 2492 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE); 2493 bus_dmamap_unload(ha->tx_tag, map); 2494 2495 fp->tx_pkts_freed++; 2496 fp->tx_pkts_completed++; 2497 2498 m_freem(mp); 2499 } 2500 2501 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl); 2502 nbds = first_bd->data.nbds; 2503 2504 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0); 2505 2506 for (i = 1; i < nbds; i++) { 2507 tx_data_bd = ecore_chain_consume(&txq->tx_pbl); 2508 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0); 2509 } 2510 txq->sw_tx_ring[idx].flags = 0; 2511 txq->sw_tx_ring[idx].mp = NULL; 2512 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0; 2513 2514 return; 2515 } 2516 2517 static void 2518 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2519 struct qlnx_tx_queue *txq) 2520 { 2521 u16 hw_bd_cons; 2522 u16 ecore_cons_idx; 2523 uint16_t diff; 2524 2525 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 2526 2527 while (hw_bd_cons != 2528 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 2529 2530 if (hw_bd_cons < ecore_cons_idx) { 2531 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 2532 } else { 2533 diff = hw_bd_cons - ecore_cons_idx; 2534 } 2535 if ((diff > TX_RING_SIZE) || 2536 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){ 2537 2538 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF); 2539 2540 QL_DPRINT1(ha, "(diff = 0x%x) " 2541 " tx_idx = 0x%x" 2542 " ecore_prod_idx = 0x%x" 2543 " ecore_cons_idx = 0x%x" 2544 " hw_bd_cons = 0x%x" 2545 " txq_db_last = 0x%x" 2546 " elem_left = 0x%x\n", 2547 diff, 2548 fp->rss_id, 2549 ecore_chain_get_prod_idx(&txq->tx_pbl), 2550 ecore_chain_get_cons_idx(&txq->tx_pbl), 2551 le16toh(*txq->hw_cons_ptr), 2552 txq->tx_db.raw, 2553 ecore_chain_get_elem_left(&txq->tx_pbl)); 2554 2555 fp->err_tx_cons_idx_conflict++; 2556 2557 //DEBUG 2558 qlnx_trigger_dump(ha); 2559 } 2560 2561 qlnx_free_tx_pkt(ha, fp, txq); 2562 2563 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 2564 } 2565 return; 2566 } 2567 2568 static int 2569 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp) 2570 { 2571 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc; 2572 struct qlnx_fastpath *fp; 2573 int rss_id = 0, ret = 0; 2574 2575 QL_DPRINT2(ha, "enter\n"); 2576 2577 #if __FreeBSD_version >= 1100000 2578 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 2579 #else 2580 if (mp->m_flags & M_FLOWID) 2581 #endif 2582 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) % 2583 ha->num_rss; 2584 2585 fp = &ha->fp_array[rss_id]; 2586 2587 if (fp->tx_br == NULL) { 2588 ret = EINVAL; 2589 goto qlnx_transmit_exit; 2590 } 2591 2592 if (mp != NULL) { 2593 ret = drbr_enqueue(ifp, fp->tx_br, mp); 2594 } 2595 2596 if (fp->fp_taskqueue != NULL) 2597 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 2598 2599 ret = 0; 2600 2601 qlnx_transmit_exit: 2602 2603 QL_DPRINT2(ha, "exit ret = %d\n", ret); 2604 return ret; 2605 } 2606 2607 static void 2608 qlnx_qflush(struct ifnet *ifp) 2609 { 2610 int rss_id; 2611 struct qlnx_fastpath *fp; 2612 struct mbuf *mp; 2613 qlnx_host_t *ha; 2614 2615 ha = (qlnx_host_t *)ifp->if_softc; 2616 2617 QL_DPRINT2(ha, "enter\n"); 2618 2619 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 2620 2621 fp = &ha->fp_array[rss_id]; 2622 2623 if (fp == NULL) 2624 continue; 2625 2626 if (fp->tx_br) { 2627 mtx_lock(&fp->tx_mtx); 2628 2629 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 2630 fp->tx_pkts_freed++; 2631 m_freem(mp); 2632 } 2633 mtx_unlock(&fp->tx_mtx); 2634 } 2635 } 2636 QL_DPRINT2(ha, "exit\n"); 2637 2638 return; 2639 } 2640 2641 static void 2642 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value) 2643 { 2644 struct ecore_dev *cdev; 2645 uint32_t offset; 2646 2647 cdev = &ha->cdev; 2648 2649 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)cdev->doorbells); 2650 2651 bus_write_4(ha->pci_dbells, offset, value); 2652 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ); 2653 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ); 2654 2655 return; 2656 } 2657 2658 static uint32_t 2659 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp) 2660 { 2661 struct ether_vlan_header *eh = NULL; 2662 struct ip *ip = NULL; 2663 struct ip6_hdr *ip6 = NULL; 2664 struct tcphdr *th = NULL; 2665 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0; 2666 uint16_t etype = 0; 2667 device_t dev; 2668 uint8_t buf[sizeof(struct ip6_hdr)]; 2669 2670 dev = ha->pci_dev; 2671 2672 eh = mtod(mp, struct ether_vlan_header *); 2673 2674 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2675 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2676 etype = ntohs(eh->evl_proto); 2677 } else { 2678 ehdrlen = ETHER_HDR_LEN; 2679 etype = ntohs(eh->evl_encap_proto); 2680 } 2681 2682 switch (etype) { 2683 2684 case ETHERTYPE_IP: 2685 ip = (struct ip *)(mp->m_data + ehdrlen); 2686 2687 ip_hlen = sizeof (struct ip); 2688 2689 if (mp->m_len < (ehdrlen + ip_hlen)) { 2690 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 2691 ip = (struct ip *)buf; 2692 } 2693 2694 th = (struct tcphdr *)(ip + 1); 2695 offset = ip_hlen + ehdrlen + (th->th_off << 2); 2696 break; 2697 2698 case ETHERTYPE_IPV6: 2699 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 2700 2701 ip_hlen = sizeof(struct ip6_hdr); 2702 2703 if (mp->m_len < (ehdrlen + ip_hlen)) { 2704 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 2705 buf); 2706 ip6 = (struct ip6_hdr *)buf; 2707 } 2708 th = (struct tcphdr *)(ip6 + 1); 2709 offset = ip_hlen + ehdrlen + (th->th_off << 2); 2710 break; 2711 2712 default: 2713 break; 2714 } 2715 2716 return (offset); 2717 } 2718 2719 static __inline int 2720 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs, 2721 uint32_t offset) 2722 { 2723 int i; 2724 uint32_t sum, nbds_in_hdr = 1; 2725 bus_dma_segment_t *t_segs = segs; 2726 2727 /* count the number of segments spanned by TCP header */ 2728 2729 i = 0; 2730 while ((i < nsegs) && (offset > t_segs->ds_len)) { 2731 nbds_in_hdr++; 2732 offset = offset - t_segs->ds_len; 2733 t_segs++; 2734 i++; 2735 } 2736 2737 while (nsegs >= QLNX_MAX_SEGMENTS_NON_TSO) { 2738 2739 sum = 0; 2740 2741 for (i = 0; i < (ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr); i++){ 2742 sum += segs->ds_len; 2743 segs++; 2744 } 2745 2746 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) { 2747 fp->tx_lso_wnd_min_len++; 2748 return (-1); 2749 } 2750 2751 nsegs -= QLNX_MAX_SEGMENTS_NON_TSO; 2752 } 2753 2754 return (0); 2755 } 2756 2757 static int 2758 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp) 2759 { 2760 bus_dma_segment_t *segs; 2761 bus_dmamap_t map = 0; 2762 uint32_t nsegs = 0; 2763 int ret = -1; 2764 struct mbuf *m_head = *m_headp; 2765 uint16_t idx = 0; 2766 uint16_t elem_left; 2767 2768 uint8_t nbd = 0; 2769 struct qlnx_tx_queue *txq; 2770 2771 struct eth_tx_1st_bd *first_bd; 2772 struct eth_tx_2nd_bd *second_bd; 2773 struct eth_tx_3rd_bd *third_bd; 2774 struct eth_tx_bd *tx_data_bd; 2775 2776 int seg_idx = 0; 2777 uint32_t nbds_in_hdr = 0; 2778 uint32_t offset = 0; 2779 2780 QL_DPRINT8(ha, "enter\n"); 2781 2782 if (!ha->link_up) 2783 return (-1); 2784 2785 first_bd = NULL; 2786 second_bd = NULL; 2787 third_bd = NULL; 2788 tx_data_bd = NULL; 2789 2790 txq = fp->txq[0]; 2791 2792 if (fp->tx_ring_full) { 2793 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl); 2794 2795 if (elem_left < (TX_RING_SIZE >> 4)) 2796 return (-1); 2797 else 2798 fp->tx_ring_full = 0; 2799 } 2800 2801 idx = txq->sw_tx_prod; 2802 2803 map = txq->sw_tx_ring[idx].map; 2804 segs = txq->segs; 2805 2806 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 2807 BUS_DMA_NOWAIT); 2808 2809 if (ha->dbg_trace_tso_pkt_len) { 2810 if (!fp->tx_tso_min_pkt_len) { 2811 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 2812 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 2813 } else { 2814 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len) 2815 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 2816 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len) 2817 fp->tx_tso_max_pkt_len = m_head->m_pkthdr.len; 2818 } 2819 } 2820 2821 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 2822 offset = qlnx_tcp_offset(ha, m_head); 2823 2824 if ((ret == EFBIG) || 2825 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && ( 2826 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) || 2827 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) && 2828 qlnx_tso_check(fp, segs, nsegs, offset))))) { 2829 2830 struct mbuf *m; 2831 2832 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len); 2833 2834 fp->tx_defrag++; 2835 2836 m = m_defrag(m_head, M_NOWAIT); 2837 if (m == NULL) { 2838 fp->err_tx_defrag++; 2839 fp->tx_pkts_freed++; 2840 m_freem(m_head); 2841 *m_headp = NULL; 2842 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret); 2843 return (ENOBUFS); 2844 } 2845 2846 m_head = m; 2847 *m_headp = m_head; 2848 2849 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 2850 segs, &nsegs, BUS_DMA_NOWAIT))) { 2851 2852 fp->err_tx_defrag_dmamap_load++; 2853 2854 QL_DPRINT1(ha, 2855 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n", 2856 ret, m_head->m_pkthdr.len); 2857 2858 fp->tx_pkts_freed++; 2859 m_freem(m_head); 2860 *m_headp = NULL; 2861 2862 return (ret); 2863 } 2864 2865 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && 2866 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) { 2867 2868 fp->err_tx_non_tso_max_seg++; 2869 2870 QL_DPRINT1(ha, 2871 "(%d) nsegs too many for non-TSO [%d, %d]\n", 2872 ret, nsegs, m_head->m_pkthdr.len); 2873 2874 fp->tx_pkts_freed++; 2875 m_freem(m_head); 2876 *m_headp = NULL; 2877 2878 return (ret); 2879 } 2880 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 2881 offset = qlnx_tcp_offset(ha, m_head); 2882 2883 } else if (ret) { 2884 2885 fp->err_tx_dmamap_load++; 2886 2887 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n", 2888 ret, m_head->m_pkthdr.len); 2889 fp->tx_pkts_freed++; 2890 m_freem(m_head); 2891 *m_headp = NULL; 2892 return (ret); 2893 } 2894 2895 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet")); 2896 2897 if (ha->dbg_trace_tso_pkt_len) { 2898 if (nsegs < QLNX_FP_MAX_SEGS) 2899 fp->tx_pkts[(nsegs - 1)]++; 2900 else 2901 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; 2902 } 2903 2904 if ((nsegs + QLNX_TX_ELEM_RESERVE) > 2905 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) { 2906 2907 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs" 2908 " in chain[%d] trying to free packets\n", 2909 nsegs, elem_left, fp->rss_id); 2910 2911 fp->tx_nsegs_gt_elem_left++; 2912 2913 (void)qlnx_tx_int(ha, fp, txq); 2914 2915 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left = 2916 ecore_chain_get_elem_left(&txq->tx_pbl))) { 2917 2918 QL_DPRINT1(ha, 2919 "(%d, 0x%x) insuffient BDs in chain[%d]\n", 2920 nsegs, elem_left, fp->rss_id); 2921 2922 fp->err_tx_nsegs_gt_elem_left++; 2923 fp->tx_ring_full = 1; 2924 ha->storm_stats_enable = 1; 2925 return (ENOBUFS); 2926 } 2927 } 2928 2929 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 2930 2931 txq->sw_tx_ring[idx].mp = m_head; 2932 2933 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); 2934 2935 memset(first_bd, 0, sizeof(*first_bd)); 2936 2937 first_bd->data.bd_flags.bitfields = 2938 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 2939 2940 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len); 2941 2942 nbd++; 2943 2944 if (m_head->m_pkthdr.csum_flags & CSUM_IP) { 2945 first_bd->data.bd_flags.bitfields |= 2946 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 2947 } 2948 2949 if (m_head->m_pkthdr.csum_flags & (CSUM_UDP | CSUM_TCP)) { 2950 first_bd->data.bd_flags.bitfields |= 2951 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT); 2952 } 2953 2954 if (m_head->m_flags & M_VLANTAG) { 2955 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag; 2956 first_bd->data.bd_flags.bitfields |= 2957 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT); 2958 } 2959 2960 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 2961 2962 first_bd->data.bd_flags.bitfields |= 2963 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); 2964 first_bd->data.bd_flags.bitfields |= 2965 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 2966 2967 nbds_in_hdr = 1; 2968 2969 if (offset == segs->ds_len) { 2970 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 2971 segs++; 2972 seg_idx++; 2973 2974 second_bd = (struct eth_tx_2nd_bd *) 2975 ecore_chain_produce(&txq->tx_pbl); 2976 memset(second_bd, 0, sizeof(*second_bd)); 2977 nbd++; 2978 2979 if (seg_idx < nsegs) { 2980 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 2981 (segs->ds_addr), (segs->ds_len)); 2982 segs++; 2983 seg_idx++; 2984 } 2985 2986 third_bd = (struct eth_tx_3rd_bd *) 2987 ecore_chain_produce(&txq->tx_pbl); 2988 memset(third_bd, 0, sizeof(*third_bd)); 2989 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 2990 third_bd->data.bitfields |= 2991 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 2992 nbd++; 2993 2994 if (seg_idx < nsegs) { 2995 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 2996 (segs->ds_addr), (segs->ds_len)); 2997 segs++; 2998 seg_idx++; 2999 } 3000 3001 for (; seg_idx < nsegs; seg_idx++) { 3002 tx_data_bd = (struct eth_tx_bd *) 3003 ecore_chain_produce(&txq->tx_pbl); 3004 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3005 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3006 segs->ds_addr,\ 3007 segs->ds_len); 3008 segs++; 3009 nbd++; 3010 } 3011 3012 } else if (offset < segs->ds_len) { 3013 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3014 3015 second_bd = (struct eth_tx_2nd_bd *) 3016 ecore_chain_produce(&txq->tx_pbl); 3017 memset(second_bd, 0, sizeof(*second_bd)); 3018 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3019 (segs->ds_addr + offset),\ 3020 (segs->ds_len - offset)); 3021 nbd++; 3022 segs++; 3023 3024 third_bd = (struct eth_tx_3rd_bd *) 3025 ecore_chain_produce(&txq->tx_pbl); 3026 memset(third_bd, 0, sizeof(*third_bd)); 3027 3028 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3029 segs->ds_addr,\ 3030 segs->ds_len); 3031 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3032 third_bd->data.bitfields |= 3033 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3034 segs++; 3035 nbd++; 3036 3037 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) { 3038 tx_data_bd = (struct eth_tx_bd *) 3039 ecore_chain_produce(&txq->tx_pbl); 3040 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3041 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3042 segs->ds_addr,\ 3043 segs->ds_len); 3044 segs++; 3045 nbd++; 3046 } 3047 3048 } else { 3049 offset = offset - segs->ds_len; 3050 segs++; 3051 3052 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3053 3054 if (offset) 3055 nbds_in_hdr++; 3056 3057 tx_data_bd = (struct eth_tx_bd *) 3058 ecore_chain_produce(&txq->tx_pbl); 3059 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3060 3061 if (second_bd == NULL) { 3062 second_bd = (struct eth_tx_2nd_bd *) 3063 tx_data_bd; 3064 } else if (third_bd == NULL) { 3065 third_bd = (struct eth_tx_3rd_bd *) 3066 tx_data_bd; 3067 } 3068 3069 if (offset && (offset < segs->ds_len)) { 3070 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3071 segs->ds_addr, offset); 3072 3073 tx_data_bd = (struct eth_tx_bd *) 3074 ecore_chain_produce(&txq->tx_pbl); 3075 3076 memset(tx_data_bd, 0, 3077 sizeof(*tx_data_bd)); 3078 3079 if (second_bd == NULL) { 3080 second_bd = 3081 (struct eth_tx_2nd_bd *)tx_data_bd; 3082 } else if (third_bd == NULL) { 3083 third_bd = 3084 (struct eth_tx_3rd_bd *)tx_data_bd; 3085 } 3086 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3087 (segs->ds_addr + offset), \ 3088 (segs->ds_len - offset)); 3089 nbd++; 3090 offset = 0; 3091 } else { 3092 if (offset) 3093 offset = offset - segs->ds_len; 3094 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3095 segs->ds_addr, segs->ds_len); 3096 } 3097 segs++; 3098 nbd++; 3099 } 3100 3101 if (third_bd == NULL) { 3102 third_bd = (struct eth_tx_3rd_bd *) 3103 ecore_chain_produce(&txq->tx_pbl); 3104 memset(third_bd, 0, sizeof(*third_bd)); 3105 } 3106 3107 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3108 third_bd->data.bitfields |= 3109 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3110 } 3111 } else { 3112 segs++; 3113 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3114 tx_data_bd = (struct eth_tx_bd *) 3115 ecore_chain_produce(&txq->tx_pbl); 3116 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3117 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\ 3118 segs->ds_len); 3119 segs++; 3120 nbd++; 3121 } 3122 first_bd->data.bitfields = 3123 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) 3124 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 3125 first_bd->data.bitfields = 3126 htole16(first_bd->data.bitfields); 3127 } 3128 3129 3130 first_bd->data.nbds = nbd; 3131 3132 if (ha->dbg_trace_tso_pkt_len) { 3133 if (fp->tx_tso_max_nsegs < nsegs) 3134 fp->tx_tso_max_nsegs = nsegs; 3135 3136 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs)) 3137 fp->tx_tso_min_nsegs = nsegs; 3138 } 3139 3140 txq->sw_tx_ring[idx].nsegs = nsegs; 3141 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1); 3142 3143 txq->tx_db.data.bd_prod = 3144 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl)); 3145 3146 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw); 3147 3148 QL_DPRINT8(ha, "exit\n"); 3149 return (0); 3150 } 3151 3152 static void 3153 qlnx_stop(qlnx_host_t *ha) 3154 { 3155 struct ifnet *ifp = ha->ifp; 3156 device_t dev; 3157 int i; 3158 3159 dev = ha->pci_dev; 3160 3161 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 3162 3163 /* 3164 * We simply lock and unlock each fp->tx_mtx to 3165 * propagate the if_drv_flags 3166 * state to each tx thread 3167 */ 3168 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state); 3169 3170 if (ha->state == QLNX_STATE_OPEN) { 3171 for (i = 0; i < ha->num_rss; i++) { 3172 struct qlnx_fastpath *fp = &ha->fp_array[i]; 3173 3174 mtx_lock(&fp->tx_mtx); 3175 mtx_unlock(&fp->tx_mtx); 3176 3177 if (fp->fp_taskqueue != NULL) 3178 taskqueue_enqueue(fp->fp_taskqueue, 3179 &fp->fp_task); 3180 } 3181 } 3182 3183 qlnx_unload(ha); 3184 3185 return; 3186 } 3187 3188 static int 3189 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha) 3190 { 3191 return(TX_RING_SIZE - 1); 3192 } 3193 3194 uint8_t * 3195 qlnx_get_mac_addr(qlnx_host_t *ha) 3196 { 3197 struct ecore_hwfn *p_hwfn; 3198 3199 p_hwfn = &ha->cdev.hwfns[0]; 3200 return (p_hwfn->hw_info.hw_mac_addr); 3201 } 3202 3203 static uint32_t 3204 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link) 3205 { 3206 uint32_t ifm_type = 0; 3207 3208 switch (if_link->media_type) { 3209 3210 case MEDIA_MODULE_FIBER: 3211 case MEDIA_UNSPECIFIED: 3212 if (if_link->speed == (100 * 1000)) 3213 ifm_type = QLNX_IFM_100G_SR4; 3214 else if (if_link->speed == (40 * 1000)) 3215 ifm_type = IFM_40G_SR4; 3216 else if (if_link->speed == (25 * 1000)) 3217 ifm_type = QLNX_IFM_25G_SR; 3218 break; 3219 3220 case MEDIA_DA_TWINAX: 3221 if (if_link->speed == (100 * 1000)) 3222 ifm_type = QLNX_IFM_100G_CR4; 3223 else if (if_link->speed == (40 * 1000)) 3224 ifm_type = IFM_40G_CR4; 3225 else if (if_link->speed == (25 * 1000)) 3226 ifm_type = QLNX_IFM_25G_CR; 3227 break; 3228 3229 default : 3230 ifm_type = IFM_UNKNOWN; 3231 break; 3232 } 3233 return (ifm_type); 3234 } 3235 3236 3237 3238 /***************************************************************************** 3239 * Interrupt Service Functions 3240 *****************************************************************************/ 3241 3242 static int 3243 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3244 struct mbuf *mp_head, uint16_t len) 3245 { 3246 struct mbuf *mp, *mpf, *mpl; 3247 struct sw_rx_data *sw_rx_data; 3248 struct qlnx_rx_queue *rxq; 3249 uint16_t len_in_buffer; 3250 3251 rxq = fp->rxq; 3252 mpf = mpl = mp = NULL; 3253 3254 while (len) { 3255 3256 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3257 3258 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3259 mp = sw_rx_data->data; 3260 3261 if (mp == NULL) { 3262 QL_DPRINT1(ha, "mp = NULL\n"); 3263 fp->err_rx_mp_null++; 3264 rxq->sw_rx_cons = 3265 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3266 3267 if (mpf != NULL) 3268 m_freem(mpf); 3269 3270 return (-1); 3271 } 3272 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3273 BUS_DMASYNC_POSTREAD); 3274 3275 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3276 3277 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 3278 " incoming packet and reusing its buffer\n"); 3279 3280 qlnx_reuse_rx_data(rxq); 3281 fp->err_rx_alloc_errors++; 3282 3283 if (mpf != NULL) 3284 m_freem(mpf); 3285 3286 return (-1); 3287 } 3288 ecore_chain_consume(&rxq->rx_bd_ring); 3289 3290 if (len > rxq->rx_buf_size) 3291 len_in_buffer = rxq->rx_buf_size; 3292 else 3293 len_in_buffer = len; 3294 3295 len = len - len_in_buffer; 3296 3297 mp->m_flags &= ~M_PKTHDR; 3298 mp->m_next = NULL; 3299 mp->m_len = len_in_buffer; 3300 3301 if (mpf == NULL) 3302 mpf = mpl = mp; 3303 else { 3304 mpl->m_next = mp; 3305 mpl = mp; 3306 } 3307 } 3308 3309 if (mpf != NULL) 3310 mp_head->m_next = mpf; 3311 3312 return (0); 3313 } 3314 3315 static void 3316 qlnx_tpa_start(qlnx_host_t *ha, 3317 struct qlnx_fastpath *fp, 3318 struct qlnx_rx_queue *rxq, 3319 struct eth_fast_path_rx_tpa_start_cqe *cqe) 3320 { 3321 uint32_t agg_index; 3322 struct ifnet *ifp = ha->ifp; 3323 struct mbuf *mp; 3324 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3325 struct sw_rx_data *sw_rx_data; 3326 dma_addr_t addr; 3327 bus_dmamap_t map; 3328 struct eth_rx_bd *rx_bd; 3329 int i; 3330 device_t dev; 3331 #if __FreeBSD_version >= 1100000 3332 uint8_t hash_type; 3333 #endif /* #if __FreeBSD_version >= 1100000 */ 3334 3335 dev = ha->pci_dev; 3336 agg_index = cqe->tpa_agg_index; 3337 3338 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \ 3339 \t type = 0x%x\n \ 3340 \t bitfields = 0x%x\n \ 3341 \t seg_len = 0x%x\n \ 3342 \t pars_flags = 0x%x\n \ 3343 \t vlan_tag = 0x%x\n \ 3344 \t rss_hash = 0x%x\n \ 3345 \t len_on_first_bd = 0x%x\n \ 3346 \t placement_offset = 0x%x\n \ 3347 \t tpa_agg_index = 0x%x\n \ 3348 \t header_len = 0x%x\n \ 3349 \t ext_bd_len_list[0] = 0x%x\n \ 3350 \t ext_bd_len_list[1] = 0x%x\n \ 3351 \t ext_bd_len_list[2] = 0x%x\n \ 3352 \t ext_bd_len_list[3] = 0x%x\n \ 3353 \t ext_bd_len_list[4] = 0x%x\n", 3354 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len, 3355 cqe->pars_flags.flags, cqe->vlan_tag, 3356 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset, 3357 cqe->tpa_agg_index, cqe->header_len, 3358 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1], 3359 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3], 3360 cqe->ext_bd_len_list[4]); 3361 3362 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3363 fp->err_rx_tpa_invalid_agg_num++; 3364 return; 3365 } 3366 3367 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3368 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); 3369 mp = sw_rx_data->data; 3370 3371 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp); 3372 3373 if (mp == NULL) { 3374 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id); 3375 fp->err_rx_mp_null++; 3376 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3377 3378 return; 3379 } 3380 3381 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) { 3382 3383 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error," 3384 " flags = %x, dropping incoming packet\n", fp->rss_id, 3385 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags)); 3386 3387 fp->err_rx_hw_errors++; 3388 3389 qlnx_reuse_rx_data(rxq); 3390 3391 QLNX_INC_IERRORS(ifp); 3392 3393 return; 3394 } 3395 3396 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3397 3398 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 3399 " dropping incoming packet and reusing its buffer\n", 3400 fp->rss_id); 3401 3402 fp->err_rx_alloc_errors++; 3403 QLNX_INC_IQDROPS(ifp); 3404 3405 /* 3406 * Load the tpa mbuf into the rx ring and save the 3407 * posted mbuf 3408 */ 3409 3410 map = sw_rx_data->map; 3411 addr = sw_rx_data->dma_addr; 3412 3413 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 3414 3415 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data; 3416 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr; 3417 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map; 3418 3419 rxq->tpa_info[agg_index].rx_buf.data = mp; 3420 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr; 3421 rxq->tpa_info[agg_index].rx_buf.map = map; 3422 3423 rx_bd = (struct eth_rx_bd *) 3424 ecore_chain_produce(&rxq->rx_bd_ring); 3425 3426 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr)); 3427 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr)); 3428 3429 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3430 BUS_DMASYNC_PREREAD); 3431 3432 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 3433 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3434 3435 ecore_chain_consume(&rxq->rx_bd_ring); 3436 3437 /* Now reuse any buffers posted in ext_bd_len_list */ 3438 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 3439 3440 if (cqe->ext_bd_len_list[i] == 0) 3441 break; 3442 3443 qlnx_reuse_rx_data(rxq); 3444 } 3445 3446 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 3447 return; 3448 } 3449 3450 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 3451 3452 QL_DPRINT7(ha, "[%d]: invalid aggregation state," 3453 " dropping incoming packet and reusing its buffer\n", 3454 fp->rss_id); 3455 3456 QLNX_INC_IQDROPS(ifp); 3457 3458 /* if we already have mbuf head in aggregation free it */ 3459 if (rxq->tpa_info[agg_index].mpf) { 3460 m_freem(rxq->tpa_info[agg_index].mpf); 3461 rxq->tpa_info[agg_index].mpl = NULL; 3462 } 3463 rxq->tpa_info[agg_index].mpf = mp; 3464 rxq->tpa_info[agg_index].mpl = NULL; 3465 3466 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3467 ecore_chain_consume(&rxq->rx_bd_ring); 3468 3469 /* Now reuse any buffers posted in ext_bd_len_list */ 3470 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 3471 3472 if (cqe->ext_bd_len_list[i] == 0) 3473 break; 3474 3475 qlnx_reuse_rx_data(rxq); 3476 } 3477 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 3478 3479 return; 3480 } 3481 3482 /* 3483 * first process the ext_bd_len_list 3484 * if this fails then we simply drop the packet 3485 */ 3486 ecore_chain_consume(&rxq->rx_bd_ring); 3487 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3488 3489 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 3490 3491 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id); 3492 3493 if (cqe->ext_bd_len_list[i] == 0) 3494 break; 3495 3496 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3497 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3498 BUS_DMASYNC_POSTREAD); 3499 3500 mpc = sw_rx_data->data; 3501 3502 if (mpc == NULL) { 3503 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 3504 fp->err_rx_mp_null++; 3505 if (mpf != NULL) 3506 m_freem(mpf); 3507 mpf = mpl = NULL; 3508 rxq->tpa_info[agg_index].agg_state = 3509 QLNX_AGG_STATE_ERROR; 3510 ecore_chain_consume(&rxq->rx_bd_ring); 3511 rxq->sw_rx_cons = 3512 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3513 continue; 3514 } 3515 3516 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3517 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 3518 " dropping incoming packet and reusing its" 3519 " buffer\n", fp->rss_id); 3520 3521 qlnx_reuse_rx_data(rxq); 3522 3523 if (mpf != NULL) 3524 m_freem(mpf); 3525 mpf = mpl = NULL; 3526 3527 rxq->tpa_info[agg_index].agg_state = 3528 QLNX_AGG_STATE_ERROR; 3529 3530 ecore_chain_consume(&rxq->rx_bd_ring); 3531 rxq->sw_rx_cons = 3532 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3533 3534 continue; 3535 } 3536 3537 mpc->m_flags &= ~M_PKTHDR; 3538 mpc->m_next = NULL; 3539 mpc->m_len = cqe->ext_bd_len_list[i]; 3540 3541 3542 if (mpf == NULL) { 3543 mpf = mpl = mpc; 3544 } else { 3545 mpl->m_len = ha->rx_buf_size; 3546 mpl->m_next = mpc; 3547 mpl = mpc; 3548 } 3549 3550 ecore_chain_consume(&rxq->rx_bd_ring); 3551 rxq->sw_rx_cons = 3552 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3553 } 3554 3555 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 3556 3557 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping" 3558 " incoming packet and reusing its buffer\n", 3559 fp->rss_id); 3560 3561 QLNX_INC_IQDROPS(ifp); 3562 3563 rxq->tpa_info[agg_index].mpf = mp; 3564 rxq->tpa_info[agg_index].mpl = NULL; 3565 3566 return; 3567 } 3568 3569 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset; 3570 3571 if (mpf != NULL) { 3572 mp->m_len = ha->rx_buf_size; 3573 mp->m_next = mpf; 3574 rxq->tpa_info[agg_index].mpf = mp; 3575 rxq->tpa_info[agg_index].mpl = mpl; 3576 } else { 3577 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset; 3578 rxq->tpa_info[agg_index].mpf = mp; 3579 rxq->tpa_info[agg_index].mpl = mp; 3580 mp->m_next = NULL; 3581 } 3582 3583 mp->m_flags |= M_PKTHDR; 3584 3585 /* assign packet to this interface interface */ 3586 mp->m_pkthdr.rcvif = ifp; 3587 3588 /* assume no hardware checksum has complated */ 3589 mp->m_pkthdr.csum_flags = 0; 3590 3591 //mp->m_pkthdr.flowid = fp->rss_id; 3592 mp->m_pkthdr.flowid = cqe->rss_hash; 3593 3594 #if __FreeBSD_version >= 1100000 3595 3596 hash_type = cqe->bitfields & 3597 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 3598 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 3599 3600 switch (hash_type) { 3601 3602 case RSS_HASH_TYPE_IPV4: 3603 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 3604 break; 3605 3606 case RSS_HASH_TYPE_TCP_IPV4: 3607 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 3608 break; 3609 3610 case RSS_HASH_TYPE_IPV6: 3611 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 3612 break; 3613 3614 case RSS_HASH_TYPE_TCP_IPV6: 3615 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 3616 break; 3617 3618 default: 3619 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 3620 break; 3621 } 3622 3623 #else 3624 mp->m_flags |= M_FLOWID; 3625 #endif 3626 3627 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | 3628 CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 3629 3630 mp->m_pkthdr.csum_data = 0xFFFF; 3631 3632 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) { 3633 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag); 3634 mp->m_flags |= M_VLANTAG; 3635 } 3636 3637 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START; 3638 3639 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n", 3640 fp->rss_id, rxq->tpa_info[agg_index].agg_state, 3641 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl); 3642 3643 return; 3644 } 3645 3646 static void 3647 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3648 struct qlnx_rx_queue *rxq, 3649 struct eth_fast_path_rx_tpa_cont_cqe *cqe) 3650 { 3651 struct sw_rx_data *sw_rx_data; 3652 int i; 3653 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3654 struct mbuf *mp; 3655 uint32_t agg_index; 3656 device_t dev; 3657 3658 dev = ha->pci_dev; 3659 3660 QL_DPRINT7(ha, "[%d]: enter\n \ 3661 \t type = 0x%x\n \ 3662 \t tpa_agg_index = 0x%x\n \ 3663 \t len_list[0] = 0x%x\n \ 3664 \t len_list[1] = 0x%x\n \ 3665 \t len_list[2] = 0x%x\n \ 3666 \t len_list[3] = 0x%x\n \ 3667 \t len_list[4] = 0x%x\n \ 3668 \t len_list[5] = 0x%x\n", 3669 fp->rss_id, cqe->type, cqe->tpa_agg_index, 3670 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 3671 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]); 3672 3673 agg_index = cqe->tpa_agg_index; 3674 3675 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3676 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 3677 fp->err_rx_tpa_invalid_agg_num++; 3678 return; 3679 } 3680 3681 3682 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) { 3683 3684 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 3685 3686 if (cqe->len_list[i] == 0) 3687 break; 3688 3689 if (rxq->tpa_info[agg_index].agg_state != 3690 QLNX_AGG_STATE_START) { 3691 qlnx_reuse_rx_data(rxq); 3692 continue; 3693 } 3694 3695 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3696 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3697 BUS_DMASYNC_POSTREAD); 3698 3699 mpc = sw_rx_data->data; 3700 3701 if (mpc == NULL) { 3702 3703 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 3704 3705 fp->err_rx_mp_null++; 3706 if (mpf != NULL) 3707 m_freem(mpf); 3708 mpf = mpl = NULL; 3709 rxq->tpa_info[agg_index].agg_state = 3710 QLNX_AGG_STATE_ERROR; 3711 ecore_chain_consume(&rxq->rx_bd_ring); 3712 rxq->sw_rx_cons = 3713 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3714 continue; 3715 } 3716 3717 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3718 3719 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 3720 " dropping incoming packet and reusing its" 3721 " buffer\n", fp->rss_id); 3722 3723 qlnx_reuse_rx_data(rxq); 3724 3725 if (mpf != NULL) 3726 m_freem(mpf); 3727 mpf = mpl = NULL; 3728 3729 rxq->tpa_info[agg_index].agg_state = 3730 QLNX_AGG_STATE_ERROR; 3731 3732 ecore_chain_consume(&rxq->rx_bd_ring); 3733 rxq->sw_rx_cons = 3734 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3735 3736 continue; 3737 } 3738 3739 mpc->m_flags &= ~M_PKTHDR; 3740 mpc->m_next = NULL; 3741 mpc->m_len = cqe->len_list[i]; 3742 3743 3744 if (mpf == NULL) { 3745 mpf = mpl = mpc; 3746 } else { 3747 mpl->m_len = ha->rx_buf_size; 3748 mpl->m_next = mpc; 3749 mpl = mpc; 3750 } 3751 3752 ecore_chain_consume(&rxq->rx_bd_ring); 3753 rxq->sw_rx_cons = 3754 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3755 } 3756 3757 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n", 3758 fp->rss_id, mpf, mpl); 3759 3760 if (mpf != NULL) { 3761 mp = rxq->tpa_info[agg_index].mpl; 3762 mp->m_len = ha->rx_buf_size; 3763 mp->m_next = mpf; 3764 rxq->tpa_info[agg_index].mpl = mpl; 3765 } 3766 3767 return; 3768 } 3769 3770 static int 3771 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3772 struct qlnx_rx_queue *rxq, 3773 struct eth_fast_path_rx_tpa_end_cqe *cqe) 3774 { 3775 struct sw_rx_data *sw_rx_data; 3776 int i; 3777 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3778 struct mbuf *mp; 3779 uint32_t agg_index; 3780 uint32_t len = 0; 3781 struct ifnet *ifp = ha->ifp; 3782 device_t dev; 3783 3784 dev = ha->pci_dev; 3785 3786 QL_DPRINT7(ha, "[%d]: enter\n \ 3787 \t type = 0x%x\n \ 3788 \t tpa_agg_index = 0x%x\n \ 3789 \t total_packet_len = 0x%x\n \ 3790 \t num_of_bds = 0x%x\n \ 3791 \t end_reason = 0x%x\n \ 3792 \t num_of_coalesced_segs = 0x%x\n \ 3793 \t ts_delta = 0x%x\n \ 3794 \t len_list[0] = 0x%x\n \ 3795 \t len_list[1] = 0x%x\n \ 3796 \t len_list[2] = 0x%x\n \ 3797 \t len_list[3] = 0x%x\n", 3798 fp->rss_id, cqe->type, cqe->tpa_agg_index, 3799 cqe->total_packet_len, cqe->num_of_bds, 3800 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta, 3801 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 3802 cqe->len_list[3]); 3803 3804 agg_index = cqe->tpa_agg_index; 3805 3806 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3807 3808 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 3809 3810 fp->err_rx_tpa_invalid_agg_num++; 3811 return (0); 3812 } 3813 3814 3815 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) { 3816 3817 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 3818 3819 if (cqe->len_list[i] == 0) 3820 break; 3821 3822 if (rxq->tpa_info[agg_index].agg_state != 3823 QLNX_AGG_STATE_START) { 3824 3825 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id); 3826 3827 qlnx_reuse_rx_data(rxq); 3828 continue; 3829 } 3830 3831 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3832 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3833 BUS_DMASYNC_POSTREAD); 3834 3835 mpc = sw_rx_data->data; 3836 3837 if (mpc == NULL) { 3838 3839 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 3840 3841 fp->err_rx_mp_null++; 3842 if (mpf != NULL) 3843 m_freem(mpf); 3844 mpf = mpl = NULL; 3845 rxq->tpa_info[agg_index].agg_state = 3846 QLNX_AGG_STATE_ERROR; 3847 ecore_chain_consume(&rxq->rx_bd_ring); 3848 rxq->sw_rx_cons = 3849 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3850 continue; 3851 } 3852 3853 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3854 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 3855 " dropping incoming packet and reusing its" 3856 " buffer\n", fp->rss_id); 3857 3858 qlnx_reuse_rx_data(rxq); 3859 3860 if (mpf != NULL) 3861 m_freem(mpf); 3862 mpf = mpl = NULL; 3863 3864 rxq->tpa_info[agg_index].agg_state = 3865 QLNX_AGG_STATE_ERROR; 3866 3867 ecore_chain_consume(&rxq->rx_bd_ring); 3868 rxq->sw_rx_cons = 3869 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3870 3871 continue; 3872 } 3873 3874 mpc->m_flags &= ~M_PKTHDR; 3875 mpc->m_next = NULL; 3876 mpc->m_len = cqe->len_list[i]; 3877 3878 3879 if (mpf == NULL) { 3880 mpf = mpl = mpc; 3881 } else { 3882 mpl->m_len = ha->rx_buf_size; 3883 mpl->m_next = mpc; 3884 mpl = mpc; 3885 } 3886 3887 ecore_chain_consume(&rxq->rx_bd_ring); 3888 rxq->sw_rx_cons = 3889 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3890 } 3891 3892 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id); 3893 3894 if (mpf != NULL) { 3895 3896 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id); 3897 3898 mp = rxq->tpa_info[agg_index].mpl; 3899 mp->m_len = ha->rx_buf_size; 3900 mp->m_next = mpf; 3901 } 3902 3903 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { 3904 3905 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id); 3906 3907 if (rxq->tpa_info[agg_index].mpf != NULL) 3908 m_freem(rxq->tpa_info[agg_index].mpf); 3909 rxq->tpa_info[agg_index].mpf = NULL; 3910 rxq->tpa_info[agg_index].mpl = NULL; 3911 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 3912 return (0); 3913 } 3914 3915 mp = rxq->tpa_info[agg_index].mpf; 3916 m_adj(mp, rxq->tpa_info[agg_index].placement_offset); 3917 mp->m_pkthdr.len = cqe->total_packet_len; 3918 3919 if (mp->m_next == NULL) 3920 mp->m_len = mp->m_pkthdr.len; 3921 else { 3922 /* compute the total packet length */ 3923 mpf = mp; 3924 while (mpf != NULL) { 3925 len += mpf->m_len; 3926 mpf = mpf->m_next; 3927 } 3928 3929 if (cqe->total_packet_len > len) { 3930 mpl = rxq->tpa_info[agg_index].mpl; 3931 mpl->m_len += (cqe->total_packet_len - len); 3932 } 3933 } 3934 3935 QLNX_INC_IPACKETS(ifp); 3936 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len)); 3937 3938 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%lx\n \ 3939 m_len = 0x%x m_pkthdr_len = 0x%x\n", 3940 fp->rss_id, mp->m_pkthdr.csum_data, 3941 mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len); 3942 3943 (*ifp->if_input)(ifp, mp); 3944 3945 rxq->tpa_info[agg_index].mpf = NULL; 3946 rxq->tpa_info[agg_index].mpl = NULL; 3947 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 3948 3949 return (cqe->num_of_coalesced_segs); 3950 } 3951 3952 static int 3953 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 3954 int lro_enable) 3955 { 3956 uint16_t hw_comp_cons, sw_comp_cons; 3957 int rx_pkt = 0; 3958 struct qlnx_rx_queue *rxq = fp->rxq; 3959 struct ifnet *ifp = ha->ifp; 3960 struct ecore_dev *cdev = &ha->cdev; 3961 struct ecore_hwfn *p_hwfn; 3962 3963 #ifdef QLNX_SOFT_LRO 3964 struct lro_ctrl *lro; 3965 3966 lro = &rxq->lro; 3967 #endif /* #ifdef QLNX_SOFT_LRO */ 3968 3969 hw_comp_cons = le16toh(*rxq->hw_cons_ptr); 3970 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 3971 3972 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)]; 3973 3974 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 3975 * / BD in the while-loop before reading hw_comp_cons. If the CQE is 3976 * read before it is written by FW, then FW writes CQE and SB, and then 3977 * the CPU reads the hw_comp_cons, it will use an old CQE. 3978 */ 3979 3980 /* Loop to complete all indicated BDs */ 3981 while (sw_comp_cons != hw_comp_cons) { 3982 union eth_rx_cqe *cqe; 3983 struct eth_fast_path_rx_reg_cqe *fp_cqe; 3984 struct sw_rx_data *sw_rx_data; 3985 register struct mbuf *mp; 3986 enum eth_rx_cqe_type cqe_type; 3987 uint16_t len, pad, len_on_first_bd; 3988 uint8_t *data; 3989 #if __FreeBSD_version >= 1100000 3990 uint8_t hash_type; 3991 #endif /* #if __FreeBSD_version >= 1100000 */ 3992 3993 /* Get the CQE from the completion ring */ 3994 cqe = (union eth_rx_cqe *) 3995 ecore_chain_consume(&rxq->rx_comp_ring); 3996 cqe_type = cqe->fast_path_regular.type; 3997 3998 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { 3999 QL_DPRINT3(ha, "Got a slowath CQE\n"); 4000 4001 ecore_eth_cqe_completion(p_hwfn, 4002 (struct eth_slow_path_rx_cqe *)cqe); 4003 goto next_cqe; 4004 } 4005 4006 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { 4007 4008 switch (cqe_type) { 4009 4010 case ETH_RX_CQE_TYPE_TPA_START: 4011 qlnx_tpa_start(ha, fp, rxq, 4012 &cqe->fast_path_tpa_start); 4013 fp->tpa_start++; 4014 break; 4015 4016 case ETH_RX_CQE_TYPE_TPA_CONT: 4017 qlnx_tpa_cont(ha, fp, rxq, 4018 &cqe->fast_path_tpa_cont); 4019 fp->tpa_cont++; 4020 break; 4021 4022 case ETH_RX_CQE_TYPE_TPA_END: 4023 rx_pkt += qlnx_tpa_end(ha, fp, rxq, 4024 &cqe->fast_path_tpa_end); 4025 fp->tpa_end++; 4026 break; 4027 4028 default: 4029 break; 4030 } 4031 4032 goto next_cqe; 4033 } 4034 4035 /* Get the data from the SW ring */ 4036 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4037 mp = sw_rx_data->data; 4038 4039 if (mp == NULL) { 4040 QL_DPRINT1(ha, "mp = NULL\n"); 4041 fp->err_rx_mp_null++; 4042 rxq->sw_rx_cons = 4043 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4044 goto next_cqe; 4045 } 4046 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4047 BUS_DMASYNC_POSTREAD); 4048 4049 /* non GRO */ 4050 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */ 4051 len = le16toh(fp_cqe->pkt_len); 4052 pad = fp_cqe->placement_offset; 4053 4054 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x," 4055 " len %u, parsing flags = %d pad = %d\n", 4056 cqe_type, fp_cqe->bitfields, 4057 le16toh(fp_cqe->vlan_tag), 4058 len, le16toh(fp_cqe->pars_flags.flags), pad); 4059 4060 data = mtod(mp, uint8_t *); 4061 data = data + pad; 4062 4063 if (0) 4064 qlnx_dump_buf8(ha, __func__, data, len); 4065 4066 /* For every Rx BD consumed, we allocate a new BD so the BD ring 4067 * is always with a fixed size. If allocation fails, we take the 4068 * consumed BD and return it to the ring in the PROD position. 4069 * The packet that was received on that BD will be dropped (and 4070 * not passed to the upper stack). 4071 */ 4072 /* If this is an error packet then drop it */ 4073 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) & 4074 CQE_FLAGS_ERR) { 4075 4076 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x," 4077 " dropping incoming packet\n", sw_comp_cons, 4078 le16toh(cqe->fast_path_regular.pars_flags.flags)); 4079 fp->err_rx_hw_errors++; 4080 4081 qlnx_reuse_rx_data(rxq); 4082 4083 QLNX_INC_IERRORS(ifp); 4084 4085 goto next_cqe; 4086 } 4087 4088 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4089 4090 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 4091 " incoming packet and reusing its buffer\n"); 4092 qlnx_reuse_rx_data(rxq); 4093 4094 fp->err_rx_alloc_errors++; 4095 4096 QLNX_INC_IQDROPS(ifp); 4097 4098 goto next_cqe; 4099 } 4100 4101 ecore_chain_consume(&rxq->rx_bd_ring); 4102 4103 len_on_first_bd = fp_cqe->len_on_first_bd; 4104 m_adj(mp, pad); 4105 mp->m_pkthdr.len = len; 4106 4107 QL_DPRINT1(ha, "len = %d len_on_first_bd = %d\n", 4108 len, len_on_first_bd); 4109 if ((len > 60 ) && (len > len_on_first_bd)) { 4110 4111 mp->m_len = len_on_first_bd; 4112 4113 if (qlnx_rx_jumbo_chain(ha, fp, mp, 4114 (len - len_on_first_bd)) != 0) { 4115 4116 m_freem(mp); 4117 4118 QLNX_INC_IQDROPS(ifp); 4119 4120 goto next_cqe; 4121 } 4122 4123 } else if (len_on_first_bd < len) { 4124 fp->err_rx_jumbo_chain_pkts++; 4125 } else { 4126 mp->m_len = len; 4127 } 4128 4129 mp->m_flags |= M_PKTHDR; 4130 4131 /* assign packet to this interface interface */ 4132 mp->m_pkthdr.rcvif = ifp; 4133 4134 /* assume no hardware checksum has complated */ 4135 mp->m_pkthdr.csum_flags = 0; 4136 4137 mp->m_pkthdr.flowid = fp_cqe->rss_hash; 4138 4139 #if __FreeBSD_version >= 1100000 4140 4141 hash_type = fp_cqe->bitfields & 4142 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4143 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4144 4145 switch (hash_type) { 4146 4147 case RSS_HASH_TYPE_IPV4: 4148 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4149 break; 4150 4151 case RSS_HASH_TYPE_TCP_IPV4: 4152 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4153 break; 4154 4155 case RSS_HASH_TYPE_IPV6: 4156 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4157 break; 4158 4159 case RSS_HASH_TYPE_TCP_IPV6: 4160 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4161 break; 4162 4163 default: 4164 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4165 break; 4166 } 4167 4168 #else 4169 mp->m_flags |= M_FLOWID; 4170 #endif 4171 4172 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) { 4173 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4174 } 4175 4176 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) { 4177 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4178 } 4179 4180 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) { 4181 mp->m_pkthdr.csum_data = 0xFFFF; 4182 mp->m_pkthdr.csum_flags |= 4183 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4184 } 4185 4186 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) { 4187 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag); 4188 mp->m_flags |= M_VLANTAG; 4189 } 4190 4191 QLNX_INC_IPACKETS(ifp); 4192 QLNX_INC_IBYTES(ifp, len); 4193 4194 #ifdef QLNX_SOFT_LRO 4195 4196 if (lro_enable) { 4197 4198 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 4199 4200 tcp_lro_queue_mbuf(lro, mp); 4201 4202 #else 4203 4204 if (tcp_lro_rx(lro, mp, 0)) 4205 (*ifp->if_input)(ifp, mp); 4206 4207 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 4208 4209 } else { 4210 (*ifp->if_input)(ifp, mp); 4211 } 4212 #else 4213 4214 (*ifp->if_input)(ifp, mp); 4215 4216 #endif /* #ifdef QLNX_SOFT_LRO */ 4217 4218 rx_pkt++; 4219 4220 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4221 4222 next_cqe: /* don't consume bd rx buffer */ 4223 ecore_chain_recycle_consumed(&rxq->rx_comp_ring); 4224 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4225 4226 /* CR TPA - revisit how to handle budget in TPA perhaps 4227 increase on "end" */ 4228 if (rx_pkt == budget) 4229 break; 4230 } /* repeat while sw_comp_cons != hw_comp_cons... */ 4231 4232 /* Update producers */ 4233 qlnx_update_rx_prod(p_hwfn, rxq); 4234 4235 return rx_pkt; 4236 } 4237 4238 /* 4239 * fast path interrupt 4240 */ 4241 4242 static void 4243 qlnx_fp_isr(void *arg) 4244 { 4245 qlnx_ivec_t *ivec = arg; 4246 qlnx_host_t *ha; 4247 struct qlnx_fastpath *fp = NULL; 4248 int idx; 4249 4250 ha = ivec->ha; 4251 4252 if (ha->state != QLNX_STATE_OPEN) { 4253 return; 4254 } 4255 4256 idx = ivec->rss_idx; 4257 4258 if ((idx = ivec->rss_idx) >= ha->num_rss) { 4259 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx); 4260 ha->err_illegal_intr++; 4261 return; 4262 } 4263 fp = &ha->fp_array[idx]; 4264 4265 if (fp == NULL) { 4266 ha->err_fp_null++; 4267 } else { 4268 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); 4269 if (fp->fp_taskqueue != NULL) 4270 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 4271 } 4272 4273 return; 4274 } 4275 4276 4277 /* 4278 * slow path interrupt processing function 4279 * can be invoked in polled mode or in interrupt mode via taskqueue. 4280 */ 4281 void 4282 qlnx_sp_isr(void *arg) 4283 { 4284 struct ecore_hwfn *p_hwfn; 4285 qlnx_host_t *ha; 4286 4287 p_hwfn = arg; 4288 4289 ha = (qlnx_host_t *)p_hwfn->p_dev; 4290 4291 ha->sp_interrupts++; 4292 4293 QL_DPRINT2(ha, "enter\n"); 4294 4295 ecore_int_sp_dpc(p_hwfn); 4296 4297 QL_DPRINT2(ha, "exit\n"); 4298 4299 return; 4300 } 4301 4302 /***************************************************************************** 4303 * Support Functions for DMA'able Memory 4304 *****************************************************************************/ 4305 4306 static void 4307 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 4308 { 4309 *((bus_addr_t *)arg) = 0; 4310 4311 if (error) { 4312 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 4313 return; 4314 } 4315 4316 *((bus_addr_t *)arg) = segs[0].ds_addr; 4317 4318 return; 4319 } 4320 4321 static int 4322 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 4323 { 4324 int ret = 0; 4325 device_t dev; 4326 bus_addr_t b_addr; 4327 4328 dev = ha->pci_dev; 4329 4330 ret = bus_dma_tag_create( 4331 ha->parent_tag,/* parent */ 4332 dma_buf->alignment, 4333 ((bus_size_t)(1ULL << 32)),/* boundary */ 4334 BUS_SPACE_MAXADDR, /* lowaddr */ 4335 BUS_SPACE_MAXADDR, /* highaddr */ 4336 NULL, NULL, /* filter, filterarg */ 4337 dma_buf->size, /* maxsize */ 4338 1, /* nsegments */ 4339 dma_buf->size, /* maxsegsize */ 4340 0, /* flags */ 4341 NULL, NULL, /* lockfunc, lockarg */ 4342 &dma_buf->dma_tag); 4343 4344 if (ret) { 4345 QL_DPRINT1(ha, "could not create dma tag\n"); 4346 goto qlnx_alloc_dmabuf_exit; 4347 } 4348 ret = bus_dmamem_alloc(dma_buf->dma_tag, 4349 (void **)&dma_buf->dma_b, 4350 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 4351 &dma_buf->dma_map); 4352 if (ret) { 4353 bus_dma_tag_destroy(dma_buf->dma_tag); 4354 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n"); 4355 goto qlnx_alloc_dmabuf_exit; 4356 } 4357 4358 ret = bus_dmamap_load(dma_buf->dma_tag, 4359 dma_buf->dma_map, 4360 dma_buf->dma_b, 4361 dma_buf->size, 4362 qlnx_dmamap_callback, 4363 &b_addr, BUS_DMA_NOWAIT); 4364 4365 if (ret || !b_addr) { 4366 bus_dma_tag_destroy(dma_buf->dma_tag); 4367 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 4368 dma_buf->dma_map); 4369 ret = -1; 4370 goto qlnx_alloc_dmabuf_exit; 4371 } 4372 4373 dma_buf->dma_addr = b_addr; 4374 4375 qlnx_alloc_dmabuf_exit: 4376 4377 return ret; 4378 } 4379 4380 static void 4381 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 4382 { 4383 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 4384 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 4385 bus_dma_tag_destroy(dma_buf->dma_tag); 4386 return; 4387 } 4388 4389 void * 4390 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size) 4391 { 4392 qlnx_dma_t dma_buf; 4393 qlnx_dma_t *dma_p; 4394 qlnx_host_t *ha; 4395 device_t dev; 4396 4397 ha = (qlnx_host_t *)ecore_dev; 4398 dev = ha->pci_dev; 4399 4400 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4401 4402 memset(&dma_buf, 0, sizeof (qlnx_dma_t)); 4403 4404 dma_buf.size = size + PAGE_SIZE; 4405 dma_buf.alignment = 8; 4406 4407 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0) 4408 return (NULL); 4409 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size); 4410 4411 *phys = dma_buf.dma_addr; 4412 4413 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size); 4414 4415 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t)); 4416 /* 4417 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 4418 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag, 4419 dma_buf.dma_b, (void *)dma_buf.dma_addr, size); 4420 */ 4421 return (dma_buf.dma_b); 4422 } 4423 4424 void 4425 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys, 4426 uint32_t size) 4427 { 4428 qlnx_dma_t dma_buf, *dma_p; 4429 qlnx_host_t *ha; 4430 device_t dev; 4431 4432 ha = (qlnx_host_t *)ecore_dev; 4433 dev = ha->pci_dev; 4434 4435 if (v_addr == NULL) 4436 return; 4437 4438 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4439 4440 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size); 4441 /* 4442 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 4443 (void *)dma_p->dma_map, (void *)dma_p->dma_tag, 4444 dma_p->dma_b, (void *)dma_p->dma_addr, size); 4445 */ 4446 dma_buf = *dma_p; 4447 4448 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf); 4449 return; 4450 } 4451 4452 static int 4453 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha) 4454 { 4455 int ret; 4456 device_t dev; 4457 4458 dev = ha->pci_dev; 4459 4460 /* 4461 * Allocate parent DMA Tag 4462 */ 4463 ret = bus_dma_tag_create( 4464 bus_get_dma_tag(dev), /* parent */ 4465 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 4466 BUS_SPACE_MAXADDR, /* lowaddr */ 4467 BUS_SPACE_MAXADDR, /* highaddr */ 4468 NULL, NULL, /* filter, filterarg */ 4469 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 4470 0, /* nsegments */ 4471 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 4472 0, /* flags */ 4473 NULL, NULL, /* lockfunc, lockarg */ 4474 &ha->parent_tag); 4475 4476 if (ret) { 4477 QL_DPRINT1(ha, "could not create parent dma tag\n"); 4478 return (-1); 4479 } 4480 4481 ha->flags.parent_tag = 1; 4482 4483 return (0); 4484 } 4485 4486 static void 4487 qlnx_free_parent_dma_tag(qlnx_host_t *ha) 4488 { 4489 if (ha->parent_tag != NULL) { 4490 bus_dma_tag_destroy(ha->parent_tag); 4491 ha->parent_tag = NULL; 4492 } 4493 return; 4494 } 4495 4496 static int 4497 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha) 4498 { 4499 if (bus_dma_tag_create(NULL, /* parent */ 4500 1, 0, /* alignment, bounds */ 4501 BUS_SPACE_MAXADDR, /* lowaddr */ 4502 BUS_SPACE_MAXADDR, /* highaddr */ 4503 NULL, NULL, /* filter, filterarg */ 4504 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */ 4505 QLNX_MAX_SEGMENTS, /* nsegments */ 4506 (PAGE_SIZE * 4), /* maxsegsize */ 4507 BUS_DMA_ALLOCNOW, /* flags */ 4508 NULL, /* lockfunc */ 4509 NULL, /* lockfuncarg */ 4510 &ha->tx_tag)) { 4511 4512 QL_DPRINT1(ha, "tx_tag alloc failed\n"); 4513 return (-1); 4514 } 4515 4516 return (0); 4517 } 4518 4519 static void 4520 qlnx_free_tx_dma_tag(qlnx_host_t *ha) 4521 { 4522 if (ha->tx_tag != NULL) { 4523 bus_dma_tag_destroy(ha->tx_tag); 4524 ha->tx_tag = NULL; 4525 } 4526 return; 4527 } 4528 4529 static int 4530 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha) 4531 { 4532 if (bus_dma_tag_create(NULL, /* parent */ 4533 1, 0, /* alignment, bounds */ 4534 BUS_SPACE_MAXADDR, /* lowaddr */ 4535 BUS_SPACE_MAXADDR, /* highaddr */ 4536 NULL, NULL, /* filter, filterarg */ 4537 MJUM9BYTES, /* maxsize */ 4538 1, /* nsegments */ 4539 MJUM9BYTES, /* maxsegsize */ 4540 BUS_DMA_ALLOCNOW, /* flags */ 4541 NULL, /* lockfunc */ 4542 NULL, /* lockfuncarg */ 4543 &ha->rx_tag)) { 4544 4545 QL_DPRINT1(ha, " rx_tag alloc failed\n"); 4546 4547 return (-1); 4548 } 4549 return (0); 4550 } 4551 4552 static void 4553 qlnx_free_rx_dma_tag(qlnx_host_t *ha) 4554 { 4555 if (ha->rx_tag != NULL) { 4556 bus_dma_tag_destroy(ha->rx_tag); 4557 ha->rx_tag = NULL; 4558 } 4559 return; 4560 } 4561 4562 /********************************* 4563 * Exported functions 4564 *********************************/ 4565 uint32_t 4566 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id) 4567 { 4568 uint32_t bar_size; 4569 4570 bar_id = bar_id * 2; 4571 4572 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev, 4573 SYS_RES_MEMORY, 4574 PCIR_BAR(bar_id)); 4575 4576 return (bar_size); 4577 } 4578 4579 uint32_t 4580 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value) 4581 { 4582 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4583 pci_reg, 1); 4584 return 0; 4585 } 4586 4587 uint32_t 4588 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg, 4589 uint16_t *reg_value) 4590 { 4591 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4592 pci_reg, 2); 4593 return 0; 4594 } 4595 4596 uint32_t 4597 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg, 4598 uint32_t *reg_value) 4599 { 4600 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4601 pci_reg, 4); 4602 return 0; 4603 } 4604 4605 void 4606 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value) 4607 { 4608 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4609 pci_reg, reg_value, 1); 4610 return; 4611 } 4612 4613 void 4614 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg, 4615 uint16_t reg_value) 4616 { 4617 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4618 pci_reg, reg_value, 2); 4619 return; 4620 } 4621 4622 void 4623 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg, 4624 uint32_t reg_value) 4625 { 4626 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 4627 pci_reg, reg_value, 4); 4628 return; 4629 } 4630 4631 4632 int 4633 qlnx_pci_find_capability(void *ecore_dev, int cap) 4634 { 4635 int reg; 4636 qlnx_host_t *ha; 4637 4638 ha = ecore_dev; 4639 4640 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0) 4641 return reg; 4642 else { 4643 QL_DPRINT1(ha, "failed\n"); 4644 return 0; 4645 } 4646 } 4647 4648 uint32_t 4649 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr) 4650 { 4651 uint32_t data32; 4652 struct ecore_dev *cdev; 4653 struct ecore_hwfn *p_hwfn; 4654 4655 p_hwfn = hwfn; 4656 4657 cdev = p_hwfn->p_dev; 4658 4659 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) - 4660 (uint8_t *)(cdev->regview)) + reg_addr; 4661 4662 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr); 4663 4664 return (data32); 4665 } 4666 4667 void 4668 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 4669 { 4670 struct ecore_dev *cdev; 4671 struct ecore_hwfn *p_hwfn; 4672 4673 p_hwfn = hwfn; 4674 4675 cdev = p_hwfn->p_dev; 4676 4677 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) - 4678 (uint8_t *)(cdev->regview)) + reg_addr; 4679 4680 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value); 4681 4682 return; 4683 } 4684 4685 void 4686 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value) 4687 { 4688 struct ecore_dev *cdev; 4689 struct ecore_hwfn *p_hwfn; 4690 4691 p_hwfn = hwfn; 4692 4693 cdev = p_hwfn->p_dev; 4694 4695 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) - 4696 (uint8_t *)(cdev->regview)) + reg_addr; 4697 4698 bus_write_2(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value); 4699 4700 return; 4701 } 4702 4703 void 4704 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 4705 { 4706 struct ecore_dev *cdev; 4707 struct ecore_hwfn *p_hwfn; 4708 4709 p_hwfn = hwfn; 4710 4711 cdev = p_hwfn->p_dev; 4712 4713 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->doorbells) - 4714 (uint8_t *)(cdev->doorbells)) + reg_addr; 4715 4716 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, reg_addr, value); 4717 4718 return; 4719 } 4720 4721 uint32_t 4722 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr) 4723 { 4724 uint32_t data32; 4725 uint32_t offset; 4726 struct ecore_dev *cdev; 4727 4728 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 4729 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 4730 4731 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset); 4732 4733 return (data32); 4734 } 4735 4736 void 4737 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value) 4738 { 4739 uint32_t offset; 4740 struct ecore_dev *cdev; 4741 4742 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 4743 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 4744 4745 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value); 4746 4747 return; 4748 } 4749 4750 void * 4751 qlnx_zalloc(uint32_t size) 4752 { 4753 caddr_t va; 4754 4755 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT); 4756 bzero(va, size); 4757 return ((void *)va); 4758 } 4759 4760 void 4761 qlnx_barrier(void *p_hwfn) 4762 { 4763 qlnx_host_t *ha; 4764 4765 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 4766 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE); 4767 } 4768 4769 void 4770 qlnx_link_update(void *p_hwfn) 4771 { 4772 qlnx_host_t *ha; 4773 int prev_link_state; 4774 4775 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 4776 4777 qlnx_fill_link(p_hwfn, &ha->if_link); 4778 4779 prev_link_state = ha->link_up; 4780 ha->link_up = ha->if_link.link_up; 4781 4782 if (prev_link_state != ha->link_up) { 4783 if (ha->link_up) { 4784 if_link_state_change(ha->ifp, LINK_STATE_UP); 4785 } else { 4786 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 4787 } 4788 } 4789 return; 4790 } 4791 4792 void 4793 qlnx_fill_link(struct ecore_hwfn *hwfn, struct qlnx_link_output *if_link) 4794 { 4795 struct ecore_mcp_link_params link_params; 4796 struct ecore_mcp_link_state link_state; 4797 4798 memset(if_link, 0, sizeof(*if_link)); 4799 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params)); 4800 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state)); 4801 4802 /* Prepare source inputs */ 4803 /* we only deal with physical functions */ 4804 memcpy(&link_params, ecore_mcp_get_link_params(hwfn), 4805 sizeof(link_params)); 4806 memcpy(&link_state, ecore_mcp_get_link_state(hwfn), 4807 sizeof(link_state)); 4808 4809 ecore_mcp_get_media_type(hwfn->p_dev, &if_link->media_type); 4810 4811 /* Set the link parameters to pass to protocol driver */ 4812 if (link_state.link_up) { 4813 if_link->link_up = true; 4814 if_link->speed = link_state.speed; 4815 } 4816 4817 if_link->supported_caps = QLNX_LINK_CAP_FIBRE; 4818 4819 if (link_params.speed.autoneg) 4820 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg; 4821 4822 if (link_params.pause.autoneg || 4823 (link_params.pause.forced_rx && link_params.pause.forced_tx)) 4824 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause; 4825 4826 if (link_params.pause.autoneg || link_params.pause.forced_rx || 4827 link_params.pause.forced_tx) 4828 if_link->supported_caps |= QLNX_LINK_CAP_Pause; 4829 4830 if (link_params.speed.advertised_speeds & 4831 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 4832 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half | 4833 QLNX_LINK_CAP_1000baseT_Full; 4834 4835 if (link_params.speed.advertised_speeds & 4836 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 4837 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full; 4838 4839 if (link_params.speed.advertised_speeds & 4840 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 4841 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full; 4842 4843 if (link_params.speed.advertised_speeds & 4844 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) 4845 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 4846 4847 if (link_params.speed.advertised_speeds & 4848 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 4849 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 4850 4851 if (link_params.speed.advertised_speeds & 4852 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 4853 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 4854 4855 if_link->advertised_caps = if_link->supported_caps; 4856 4857 if_link->autoneg = link_params.speed.autoneg; 4858 if_link->duplex = QLNX_LINK_DUPLEX; 4859 4860 /* Link partner capabilities */ 4861 4862 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD) 4863 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half; 4864 4865 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD) 4866 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full; 4867 4868 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G) 4869 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full; 4870 4871 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G) 4872 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full; 4873 4874 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G) 4875 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 4876 4877 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G) 4878 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 4879 4880 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G) 4881 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 4882 4883 if (link_state.an_complete) 4884 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg; 4885 4886 if (link_state.partner_adv_pause) 4887 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause; 4888 4889 if ((link_state.partner_adv_pause == 4890 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) || 4891 (link_state.partner_adv_pause == 4892 ECORE_LINK_PARTNER_BOTH_PAUSE)) 4893 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause; 4894 4895 return; 4896 } 4897 4898 static int 4899 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params) 4900 { 4901 int rc, i; 4902 4903 for (i = 0; i < cdev->num_hwfns; i++) { 4904 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 4905 p_hwfn->pf_params = *func_params; 4906 } 4907 4908 rc = ecore_resc_alloc(cdev); 4909 if (rc) 4910 goto qlnx_nic_setup_exit; 4911 4912 ecore_resc_setup(cdev); 4913 4914 qlnx_nic_setup_exit: 4915 4916 return rc; 4917 } 4918 4919 static int 4920 qlnx_nic_start(struct ecore_dev *cdev) 4921 { 4922 int rc; 4923 struct ecore_hw_init_params params; 4924 4925 bzero(¶ms, sizeof (struct ecore_hw_init_params)); 4926 4927 params.p_tunn = NULL; 4928 params.b_hw_start = true; 4929 params.int_mode = cdev->int_mode; 4930 params.allow_npar_tx_switch = true; 4931 params.bin_fw_data = NULL; 4932 4933 rc = ecore_hw_init(cdev, ¶ms); 4934 if (rc) { 4935 ecore_resc_free(cdev); 4936 return rc; 4937 } 4938 4939 return 0; 4940 } 4941 4942 static int 4943 qlnx_slowpath_start(qlnx_host_t *ha) 4944 { 4945 struct ecore_dev *cdev; 4946 struct ecore_pf_params pf_params; 4947 int rc; 4948 4949 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 4950 pf_params.eth_pf_params.num_cons = 4951 (ha->num_rss) * (ha->num_tc + 1); 4952 4953 cdev = &ha->cdev; 4954 4955 rc = qlnx_nic_setup(cdev, &pf_params); 4956 if (rc) 4957 goto qlnx_slowpath_start_exit; 4958 4959 cdev->int_mode = ECORE_INT_MODE_MSIX; 4960 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; 4961 4962 #ifdef QLNX_MAX_COALESCE 4963 cdev->rx_coalesce_usecs = 255; 4964 cdev->tx_coalesce_usecs = 255; 4965 #endif 4966 4967 rc = qlnx_nic_start(cdev); 4968 4969 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs; 4970 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs; 4971 4972 qlnx_slowpath_start_exit: 4973 4974 return (rc); 4975 } 4976 4977 static int 4978 qlnx_slowpath_stop(qlnx_host_t *ha) 4979 { 4980 struct ecore_dev *cdev; 4981 device_t dev = ha->pci_dev; 4982 int i; 4983 4984 cdev = &ha->cdev; 4985 4986 ecore_hw_stop(cdev); 4987 4988 for (i = 0; i < ha->cdev.num_hwfns; i++) { 4989 4990 if (ha->sp_handle[i]) 4991 (void)bus_teardown_intr(dev, ha->sp_irq[i], 4992 ha->sp_handle[i]); 4993 4994 ha->sp_handle[i] = NULL; 4995 4996 if (ha->sp_irq[i]) 4997 (void) bus_release_resource(dev, SYS_RES_IRQ, 4998 ha->sp_irq_rid[i], ha->sp_irq[i]); 4999 ha->sp_irq[i] = NULL; 5000 } 5001 5002 ecore_resc_free(cdev); 5003 5004 return 0; 5005 } 5006 5007 static void 5008 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 5009 char ver_str[VER_SIZE]) 5010 { 5011 int i; 5012 5013 memcpy(cdev->name, name, NAME_SIZE); 5014 5015 for_each_hwfn(cdev, i) { 5016 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 5017 } 5018 5019 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD; 5020 5021 return ; 5022 } 5023 5024 void 5025 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats) 5026 { 5027 enum ecore_mcp_protocol_type type; 5028 union ecore_mcp_protocol_stats *stats; 5029 struct ecore_eth_stats eth_stats; 5030 qlnx_host_t *ha; 5031 5032 ha = cdev; 5033 stats = proto_stats; 5034 type = proto_type; 5035 5036 switch (type) { 5037 5038 case ECORE_MCP_LAN_STATS: 5039 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats); 5040 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; 5041 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts; 5042 stats->lan_stats.fcs_err = -1; 5043 break; 5044 5045 default: 5046 ha->err_get_proto_invalid_type++; 5047 5048 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type); 5049 break; 5050 } 5051 return; 5052 } 5053 5054 static int 5055 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver) 5056 { 5057 struct ecore_hwfn *p_hwfn; 5058 struct ecore_ptt *p_ptt; 5059 5060 p_hwfn = &ha->cdev.hwfns[0]; 5061 p_ptt = ecore_ptt_acquire(p_hwfn); 5062 5063 if (p_ptt == NULL) { 5064 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 5065 return (-1); 5066 } 5067 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL); 5068 5069 ecore_ptt_release(p_hwfn, p_ptt); 5070 5071 return (0); 5072 } 5073 5074 static int 5075 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size) 5076 { 5077 struct ecore_hwfn *p_hwfn; 5078 struct ecore_ptt *p_ptt; 5079 5080 p_hwfn = &ha->cdev.hwfns[0]; 5081 p_ptt = ecore_ptt_acquire(p_hwfn); 5082 5083 if (p_ptt == NULL) { 5084 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n"); 5085 return (-1); 5086 } 5087 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size); 5088 5089 ecore_ptt_release(p_hwfn, p_ptt); 5090 5091 return (0); 5092 } 5093 5094 static int 5095 qlnx_alloc_mem_arrays(qlnx_host_t *ha) 5096 { 5097 struct ecore_dev *cdev; 5098 5099 cdev = &ha->cdev; 5100 5101 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS)); 5102 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS)); 5103 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS)); 5104 5105 return 0; 5106 } 5107 5108 static void 5109 qlnx_init_fp(qlnx_host_t *ha) 5110 { 5111 int rss_id, txq_array_index, tc; 5112 5113 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 5114 5115 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 5116 5117 fp->rss_id = rss_id; 5118 fp->edev = ha; 5119 fp->sb_info = &ha->sb_array[rss_id]; 5120 fp->rxq = &ha->rxq_array[rss_id]; 5121 fp->rxq->rxq_id = rss_id; 5122 5123 for (tc = 0; tc < ha->num_tc; tc++) { 5124 txq_array_index = tc * ha->num_rss + rss_id; 5125 fp->txq[tc] = &ha->txq_array[txq_array_index]; 5126 fp->txq[tc]->index = txq_array_index; 5127 } 5128 5129 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str, 5130 rss_id); 5131 5132 fp->tx_ring_full = 0; 5133 5134 /* reset all the statistics counters */ 5135 5136 fp->tx_pkts_processed = 0; 5137 fp->tx_pkts_freed = 0; 5138 fp->tx_pkts_transmitted = 0; 5139 fp->tx_pkts_completed = 0; 5140 fp->tx_lso_wnd_min_len = 0; 5141 fp->tx_defrag = 0; 5142 fp->tx_nsegs_gt_elem_left = 0; 5143 fp->tx_tso_max_nsegs = 0; 5144 fp->tx_tso_min_nsegs = 0; 5145 fp->err_tx_nsegs_gt_elem_left = 0; 5146 fp->err_tx_dmamap_create = 0; 5147 fp->err_tx_defrag_dmamap_load = 0; 5148 fp->err_tx_non_tso_max_seg = 0; 5149 fp->err_tx_dmamap_load = 0; 5150 fp->err_tx_defrag = 0; 5151 fp->err_tx_free_pkt_null = 0; 5152 fp->err_tx_cons_idx_conflict = 0; 5153 5154 fp->rx_pkts = 0; 5155 fp->err_m_getcl = 0; 5156 fp->err_m_getjcl = 0; 5157 } 5158 return; 5159 } 5160 5161 static void 5162 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info) 5163 { 5164 struct ecore_dev *cdev; 5165 5166 cdev = &ha->cdev; 5167 5168 if (sb_info->sb_virt) { 5169 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt), 5170 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt))); 5171 sb_info->sb_virt = NULL; 5172 } 5173 } 5174 5175 static int 5176 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info, 5177 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id) 5178 { 5179 struct ecore_hwfn *p_hwfn; 5180 int hwfn_index, rc; 5181 u16 rel_sb_id; 5182 5183 hwfn_index = sb_id % cdev->num_hwfns; 5184 p_hwfn = &cdev->hwfns[hwfn_index]; 5185 rel_sb_id = sb_id / cdev->num_hwfns; 5186 5187 QL_DPRINT2(((qlnx_host_t *)cdev), 5188 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \ 5189 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n", 5190 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info, 5191 sb_virt_addr, (void *)sb_phy_addr); 5192 5193 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, 5194 sb_virt_addr, sb_phy_addr, rel_sb_id); 5195 5196 return rc; 5197 } 5198 5199 /* This function allocates fast-path status block memory */ 5200 static int 5201 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id) 5202 { 5203 struct status_block *sb_virt; 5204 bus_addr_t sb_phys; 5205 int rc; 5206 uint32_t size; 5207 struct ecore_dev *cdev; 5208 5209 cdev = &ha->cdev; 5210 5211 size = sizeof(*sb_virt); 5212 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size); 5213 5214 if (!sb_virt) { 5215 QL_DPRINT1(ha, "Status block allocation failed\n"); 5216 return -ENOMEM; 5217 } 5218 5219 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id); 5220 if (rc) { 5221 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size); 5222 } 5223 5224 return rc; 5225 } 5226 5227 static void 5228 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5229 { 5230 int i; 5231 struct sw_rx_data *rx_buf; 5232 5233 for (i = 0; i < rxq->num_rx_buffers; i++) { 5234 5235 rx_buf = &rxq->sw_rx_ring[i]; 5236 5237 if (rx_buf->data != NULL) { 5238 if (rx_buf->map != NULL) { 5239 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 5240 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 5241 rx_buf->map = NULL; 5242 } 5243 m_freem(rx_buf->data); 5244 rx_buf->data = NULL; 5245 } 5246 } 5247 return; 5248 } 5249 5250 static void 5251 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5252 { 5253 struct ecore_dev *cdev; 5254 int i; 5255 5256 cdev = &ha->cdev; 5257 5258 qlnx_free_rx_buffers(ha, rxq); 5259 5260 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 5261 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]); 5262 if (rxq->tpa_info[i].mpf != NULL) 5263 m_freem(rxq->tpa_info[i].mpf); 5264 } 5265 5266 bzero((void *)&rxq->sw_rx_ring[0], 5267 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 5268 5269 /* Free the real RQ ring used by FW */ 5270 if (rxq->rx_bd_ring.p_virt_addr) { 5271 ecore_chain_free(cdev, &rxq->rx_bd_ring); 5272 rxq->rx_bd_ring.p_virt_addr = NULL; 5273 } 5274 5275 /* Free the real completion ring used by FW */ 5276 if (rxq->rx_comp_ring.p_virt_addr && 5277 rxq->rx_comp_ring.pbl_sp.p_virt_table) { 5278 ecore_chain_free(cdev, &rxq->rx_comp_ring); 5279 rxq->rx_comp_ring.p_virt_addr = NULL; 5280 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL; 5281 } 5282 5283 #ifdef QLNX_SOFT_LRO 5284 { 5285 struct lro_ctrl *lro; 5286 5287 lro = &rxq->lro; 5288 tcp_lro_free(lro); 5289 } 5290 #endif /* #ifdef QLNX_SOFT_LRO */ 5291 5292 return; 5293 } 5294 5295 static int 5296 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5297 { 5298 register struct mbuf *mp; 5299 uint16_t rx_buf_size; 5300 struct sw_rx_data *sw_rx_data; 5301 struct eth_rx_bd *rx_bd; 5302 dma_addr_t dma_addr; 5303 bus_dmamap_t map; 5304 bus_dma_segment_t segs[1]; 5305 int nsegs; 5306 int ret; 5307 struct ecore_dev *cdev; 5308 5309 cdev = &ha->cdev; 5310 5311 rx_buf_size = rxq->rx_buf_size; 5312 5313 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 5314 5315 if (mp == NULL) { 5316 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 5317 return -ENOMEM; 5318 } 5319 5320 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 5321 5322 map = (bus_dmamap_t)0; 5323 5324 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 5325 BUS_DMA_NOWAIT); 5326 dma_addr = segs[0].ds_addr; 5327 5328 if (ret || !dma_addr || (nsegs != 1)) { 5329 m_freem(mp); 5330 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 5331 ret, (long long unsigned int)dma_addr, nsegs); 5332 return -ENOMEM; 5333 } 5334 5335 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 5336 sw_rx_data->data = mp; 5337 sw_rx_data->dma_addr = dma_addr; 5338 sw_rx_data->map = map; 5339 5340 /* Advance PROD and get BD pointer */ 5341 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); 5342 rx_bd->addr.hi = htole32(U64_HI(dma_addr)); 5343 rx_bd->addr.lo = htole32(U64_LO(dma_addr)); 5344 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 5345 5346 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 5347 5348 return 0; 5349 } 5350 5351 static int 5352 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 5353 struct qlnx_agg_info *tpa) 5354 { 5355 struct mbuf *mp; 5356 dma_addr_t dma_addr; 5357 bus_dmamap_t map; 5358 bus_dma_segment_t segs[1]; 5359 int nsegs; 5360 int ret; 5361 struct sw_rx_data *rx_buf; 5362 5363 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 5364 5365 if (mp == NULL) { 5366 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 5367 return -ENOMEM; 5368 } 5369 5370 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 5371 5372 map = (bus_dmamap_t)0; 5373 5374 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 5375 BUS_DMA_NOWAIT); 5376 dma_addr = segs[0].ds_addr; 5377 5378 if (ret || !dma_addr || (nsegs != 1)) { 5379 m_freem(mp); 5380 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 5381 ret, (long long unsigned int)dma_addr, nsegs); 5382 return -ENOMEM; 5383 } 5384 5385 rx_buf = &tpa->rx_buf; 5386 5387 memset(rx_buf, 0, sizeof (struct sw_rx_data)); 5388 5389 rx_buf->data = mp; 5390 rx_buf->dma_addr = dma_addr; 5391 rx_buf->map = map; 5392 5393 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 5394 5395 return (0); 5396 } 5397 5398 static void 5399 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa) 5400 { 5401 struct sw_rx_data *rx_buf; 5402 5403 rx_buf = &tpa->rx_buf; 5404 5405 if (rx_buf->data != NULL) { 5406 if (rx_buf->map != NULL) { 5407 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 5408 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 5409 rx_buf->map = NULL; 5410 } 5411 m_freem(rx_buf->data); 5412 rx_buf->data = NULL; 5413 } 5414 return; 5415 } 5416 5417 /* This function allocates all memory needed per Rx queue */ 5418 static int 5419 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5420 { 5421 int i, rc, num_allocated; 5422 struct ifnet *ifp; 5423 struct ecore_dev *cdev; 5424 5425 cdev = &ha->cdev; 5426 ifp = ha->ifp; 5427 5428 rxq->num_rx_buffers = RX_RING_SIZE; 5429 5430 rxq->rx_buf_size = ha->rx_buf_size; 5431 5432 /* Allocate the parallel driver ring for Rx buffers */ 5433 bzero((void *)&rxq->sw_rx_ring[0], 5434 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 5435 5436 /* Allocate FW Rx ring */ 5437 5438 rc = ecore_chain_alloc(cdev, 5439 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 5440 ECORE_CHAIN_MODE_NEXT_PTR, 5441 ECORE_CHAIN_CNT_TYPE_U16, 5442 RX_RING_SIZE, 5443 sizeof(struct eth_rx_bd), 5444 &rxq->rx_bd_ring, NULL); 5445 5446 if (rc) 5447 goto err; 5448 5449 /* Allocate FW completion ring */ 5450 rc = ecore_chain_alloc(cdev, 5451 ECORE_CHAIN_USE_TO_CONSUME, 5452 ECORE_CHAIN_MODE_PBL, 5453 ECORE_CHAIN_CNT_TYPE_U16, 5454 RX_RING_SIZE, 5455 sizeof(union eth_rx_cqe), 5456 &rxq->rx_comp_ring, NULL); 5457 5458 if (rc) 5459 goto err; 5460 5461 /* Allocate buffers for the Rx ring */ 5462 5463 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 5464 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size, 5465 &rxq->tpa_info[i]); 5466 if (rc) 5467 break; 5468 5469 } 5470 5471 for (i = 0; i < rxq->num_rx_buffers; i++) { 5472 rc = qlnx_alloc_rx_buffer(ha, rxq); 5473 if (rc) 5474 break; 5475 } 5476 num_allocated = i; 5477 if (!num_allocated) { 5478 QL_DPRINT1(ha, "Rx buffers allocation failed\n"); 5479 goto err; 5480 } else if (num_allocated < rxq->num_rx_buffers) { 5481 QL_DPRINT1(ha, "Allocated less buffers than" 5482 " desired (%d allocated)\n", num_allocated); 5483 } 5484 5485 #ifdef QLNX_SOFT_LRO 5486 5487 { 5488 struct lro_ctrl *lro; 5489 5490 lro = &rxq->lro; 5491 5492 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 5493 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) { 5494 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 5495 rxq->rxq_id); 5496 goto err; 5497 } 5498 #else 5499 if (tcp_lro_init(lro)) { 5500 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 5501 rxq->rxq_id); 5502 goto err; 5503 } 5504 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 5505 5506 lro->ifp = ha->ifp; 5507 } 5508 #endif /* #ifdef QLNX_SOFT_LRO */ 5509 return 0; 5510 5511 err: 5512 qlnx_free_mem_rxq(ha, rxq); 5513 return -ENOMEM; 5514 } 5515 5516 5517 static void 5518 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 5519 struct qlnx_tx_queue *txq) 5520 { 5521 struct ecore_dev *cdev; 5522 5523 cdev = &ha->cdev; 5524 5525 bzero((void *)&txq->sw_tx_ring[0], 5526 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 5527 5528 /* Free the real RQ ring used by FW */ 5529 if (txq->tx_pbl.p_virt_addr) { 5530 ecore_chain_free(cdev, &txq->tx_pbl); 5531 txq->tx_pbl.p_virt_addr = NULL; 5532 } 5533 return; 5534 } 5535 5536 /* This function allocates all memory needed per Tx queue */ 5537 static int 5538 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 5539 struct qlnx_tx_queue *txq) 5540 { 5541 int ret = ECORE_SUCCESS; 5542 union eth_tx_bd_types *p_virt; 5543 struct ecore_dev *cdev; 5544 5545 cdev = &ha->cdev; 5546 5547 bzero((void *)&txq->sw_tx_ring[0], 5548 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 5549 5550 /* Allocate the real Tx ring to be used by FW */ 5551 ret = ecore_chain_alloc(cdev, 5552 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 5553 ECORE_CHAIN_MODE_PBL, 5554 ECORE_CHAIN_CNT_TYPE_U16, 5555 TX_RING_SIZE, 5556 sizeof(*p_virt), 5557 &txq->tx_pbl, NULL); 5558 5559 if (ret != ECORE_SUCCESS) { 5560 goto err; 5561 } 5562 5563 txq->num_tx_buffers = TX_RING_SIZE; 5564 5565 return 0; 5566 5567 err: 5568 qlnx_free_mem_txq(ha, fp, txq); 5569 return -ENOMEM; 5570 } 5571 5572 static void 5573 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5574 { 5575 struct mbuf *mp; 5576 struct ifnet *ifp = ha->ifp; 5577 5578 if (mtx_initialized(&fp->tx_mtx)) { 5579 5580 if (fp->tx_br != NULL) { 5581 5582 mtx_lock(&fp->tx_mtx); 5583 5584 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 5585 fp->tx_pkts_freed++; 5586 m_freem(mp); 5587 } 5588 5589 mtx_unlock(&fp->tx_mtx); 5590 5591 buf_ring_free(fp->tx_br, M_DEVBUF); 5592 fp->tx_br = NULL; 5593 } 5594 mtx_destroy(&fp->tx_mtx); 5595 } 5596 return; 5597 } 5598 5599 static void 5600 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5601 { 5602 int tc; 5603 5604 qlnx_free_mem_sb(ha, fp->sb_info); 5605 5606 qlnx_free_mem_rxq(ha, fp->rxq); 5607 5608 for (tc = 0; tc < ha->num_tc; tc++) 5609 qlnx_free_mem_txq(ha, fp, fp->txq[tc]); 5610 5611 return; 5612 } 5613 5614 static int 5615 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5616 { 5617 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 5618 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id); 5619 5620 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 5621 5622 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF, 5623 M_NOWAIT, &fp->tx_mtx); 5624 if (fp->tx_br == NULL) { 5625 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n", 5626 ha->dev_unit, fp->rss_id); 5627 return -ENOMEM; 5628 } 5629 return 0; 5630 } 5631 5632 static int 5633 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 5634 { 5635 int rc, tc; 5636 5637 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id); 5638 if (rc) 5639 goto err; 5640 5641 if (ha->rx_jumbo_buf_eq_mtu) { 5642 if (ha->max_frame_size <= MCLBYTES) 5643 ha->rx_buf_size = MCLBYTES; 5644 else if (ha->max_frame_size <= MJUMPAGESIZE) 5645 ha->rx_buf_size = MJUMPAGESIZE; 5646 else if (ha->max_frame_size <= MJUM9BYTES) 5647 ha->rx_buf_size = MJUM9BYTES; 5648 else if (ha->max_frame_size <= MJUM16BYTES) 5649 ha->rx_buf_size = MJUM16BYTES; 5650 } else { 5651 if (ha->max_frame_size <= MCLBYTES) 5652 ha->rx_buf_size = MCLBYTES; 5653 else 5654 ha->rx_buf_size = MJUMPAGESIZE; 5655 } 5656 5657 rc = qlnx_alloc_mem_rxq(ha, fp->rxq); 5658 if (rc) 5659 goto err; 5660 5661 for (tc = 0; tc < ha->num_tc; tc++) { 5662 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]); 5663 if (rc) 5664 goto err; 5665 } 5666 5667 return 0; 5668 5669 err: 5670 qlnx_free_mem_fp(ha, fp); 5671 return -ENOMEM; 5672 } 5673 5674 static void 5675 qlnx_free_mem_load(qlnx_host_t *ha) 5676 { 5677 int i; 5678 struct ecore_dev *cdev; 5679 5680 cdev = &ha->cdev; 5681 5682 for (i = 0; i < ha->num_rss; i++) { 5683 struct qlnx_fastpath *fp = &ha->fp_array[i]; 5684 5685 qlnx_free_mem_fp(ha, fp); 5686 } 5687 return; 5688 } 5689 5690 static int 5691 qlnx_alloc_mem_load(qlnx_host_t *ha) 5692 { 5693 int rc = 0, rss_id; 5694 5695 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 5696 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 5697 5698 rc = qlnx_alloc_mem_fp(ha, fp); 5699 if (rc) 5700 break; 5701 } 5702 return (rc); 5703 } 5704 5705 static int 5706 qlnx_start_vport(struct ecore_dev *cdev, 5707 u8 vport_id, 5708 u16 mtu, 5709 u8 drop_ttl0_flg, 5710 u8 inner_vlan_removal_en_flg, 5711 u8 tx_switching, 5712 u8 hw_lro_enable) 5713 { 5714 int rc, i; 5715 struct ecore_sp_vport_start_params vport_start_params = { 0 }; 5716 qlnx_host_t *ha; 5717 5718 ha = (qlnx_host_t *)cdev; 5719 5720 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg; 5721 vport_start_params.tx_switching = 0; 5722 vport_start_params.handle_ptp_pkts = 0; 5723 vport_start_params.only_untagged = 0; 5724 vport_start_params.drop_ttl0 = drop_ttl0_flg; 5725 5726 vport_start_params.tpa_mode = 5727 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE); 5728 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 5729 5730 vport_start_params.vport_id = vport_id; 5731 vport_start_params.mtu = mtu; 5732 5733 5734 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id); 5735 5736 for_each_hwfn(cdev, i) { 5737 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 5738 5739 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid; 5740 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 5741 5742 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params); 5743 5744 if (rc) { 5745 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d" 5746 " with MTU %d\n" , vport_id, mtu); 5747 return -ENOMEM; 5748 } 5749 5750 ecore_hw_start_fastpath(p_hwfn); 5751 5752 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n", 5753 vport_id, mtu); 5754 } 5755 return 0; 5756 } 5757 5758 5759 static int 5760 qlnx_update_vport(struct ecore_dev *cdev, 5761 struct qlnx_update_vport_params *params) 5762 { 5763 struct ecore_sp_vport_update_params sp_params; 5764 int rc, i, j, fp_index; 5765 struct ecore_hwfn *p_hwfn; 5766 struct ecore_rss_params *rss; 5767 qlnx_host_t *ha = (qlnx_host_t *)cdev; 5768 struct qlnx_fastpath *fp; 5769 5770 memset(&sp_params, 0, sizeof(sp_params)); 5771 /* Translate protocol params into sp params */ 5772 sp_params.vport_id = params->vport_id; 5773 5774 sp_params.update_vport_active_rx_flg = 5775 params->update_vport_active_rx_flg; 5776 sp_params.vport_active_rx_flg = params->vport_active_rx_flg; 5777 5778 sp_params.update_vport_active_tx_flg = 5779 params->update_vport_active_tx_flg; 5780 sp_params.vport_active_tx_flg = params->vport_active_tx_flg; 5781 5782 sp_params.update_inner_vlan_removal_flg = 5783 params->update_inner_vlan_removal_flg; 5784 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; 5785 5786 sp_params.sge_tpa_params = params->sge_tpa_params; 5787 5788 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 5789 * We need to re-fix the rss values per engine for CMT. 5790 */ 5791 if (params->rss_params->update_rss_config) 5792 sp_params.rss_params = params->rss_params; 5793 else 5794 sp_params.rss_params = NULL; 5795 5796 for_each_hwfn(cdev, i) { 5797 5798 p_hwfn = &cdev->hwfns[i]; 5799 5800 if ((cdev->num_hwfns > 1) && 5801 params->rss_params->update_rss_config && 5802 params->rss_params->rss_enable) { 5803 5804 rss = params->rss_params; 5805 5806 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) { 5807 5808 fp_index = ((cdev->num_hwfns * j) + i) % 5809 ha->num_rss; 5810 5811 fp = &ha->fp_array[fp_index]; 5812 rss->rss_ind_table[j] = fp->rxq->handle; 5813 } 5814 5815 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) { 5816 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n", 5817 rss->rss_ind_table[j], 5818 rss->rss_ind_table[j+1], 5819 rss->rss_ind_table[j+2], 5820 rss->rss_ind_table[j+3], 5821 rss->rss_ind_table[j+4], 5822 rss->rss_ind_table[j+5], 5823 rss->rss_ind_table[j+6], 5824 rss->rss_ind_table[j+7]); 5825 j += 8; 5826 } 5827 } 5828 5829 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 5830 5831 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id); 5832 5833 rc = ecore_sp_vport_update(p_hwfn, &sp_params, 5834 ECORE_SPQ_MODE_EBLOCK, NULL); 5835 if (rc) { 5836 QL_DPRINT1(ha, "Failed to update VPORT\n"); 5837 return rc; 5838 } 5839 5840 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \ 5841 rx_active_flag %d [tx_update %d], [rx_update %d]\n", 5842 params->vport_id, params->vport_active_tx_flg, 5843 params->vport_active_rx_flg, 5844 params->update_vport_active_tx_flg, 5845 params->update_vport_active_rx_flg); 5846 } 5847 5848 return 0; 5849 } 5850 5851 static void 5852 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq) 5853 { 5854 struct eth_rx_bd *rx_bd_cons = 5855 ecore_chain_consume(&rxq->rx_bd_ring); 5856 struct eth_rx_bd *rx_bd_prod = 5857 ecore_chain_produce(&rxq->rx_bd_ring); 5858 struct sw_rx_data *sw_rx_data_cons = 5859 &rxq->sw_rx_ring[rxq->sw_rx_cons]; 5860 struct sw_rx_data *sw_rx_data_prod = 5861 &rxq->sw_rx_ring[rxq->sw_rx_prod]; 5862 5863 sw_rx_data_prod->data = sw_rx_data_cons->data; 5864 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); 5865 5866 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 5867 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 5868 5869 return; 5870 } 5871 5872 static void 5873 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq) 5874 { 5875 5876 uint16_t bd_prod; 5877 uint16_t cqe_prod; 5878 union { 5879 struct eth_rx_prod_data rx_prod_data; 5880 uint32_t data32; 5881 } rx_prods; 5882 5883 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); 5884 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); 5885 5886 /* Update producers */ 5887 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod); 5888 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod); 5889 5890 /* Make sure that the BD and SGE data is updated before updating the 5891 * producers since FW might read the BD/SGE right after the producer 5892 * is updated. 5893 */ 5894 wmb(); 5895 5896 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr, 5897 sizeof(rx_prods), &rx_prods.data32); 5898 5899 /* mmiowb is needed to synchronize doorbell writes from more than one 5900 * processor. It guarantees that the write arrives to the device before 5901 * the napi lock is released and another qlnx_poll is called (possibly 5902 * on another CPU). Without this barrier, the next doorbell can bypass 5903 * this doorbell. This is applicable to IA64/Altix systems. 5904 */ 5905 wmb(); 5906 5907 return; 5908 } 5909 5910 static uint32_t qlnx_hash_key[] = { 5911 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda), 5912 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2), 5913 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d), 5914 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0), 5915 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb), 5916 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4), 5917 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3), 5918 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c), 5919 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b), 5920 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)}; 5921 5922 static int 5923 qlnx_start_queues(qlnx_host_t *ha) 5924 { 5925 int rc, tc, i, vport_id = 0, 5926 drop_ttl0_flg = 1, vlan_removal_en = 1, 5927 tx_switching = 0, hw_lro_enable = 0; 5928 struct ecore_dev *cdev = &ha->cdev; 5929 struct ecore_rss_params *rss_params = &ha->rss_params; 5930 struct qlnx_update_vport_params vport_update_params; 5931 struct ifnet *ifp; 5932 struct ecore_hwfn *p_hwfn; 5933 struct ecore_sge_tpa_params tpa_params; 5934 struct ecore_queue_start_common_params qparams; 5935 struct qlnx_fastpath *fp; 5936 5937 ifp = ha->ifp; 5938 5939 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss); 5940 5941 if (!ha->num_rss) { 5942 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there" 5943 " are no Rx queues\n"); 5944 return -EINVAL; 5945 } 5946 5947 #ifndef QLNX_SOFT_LRO 5948 hw_lro_enable = ifp->if_capenable & IFCAP_LRO; 5949 #endif /* #ifndef QLNX_SOFT_LRO */ 5950 5951 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg, 5952 vlan_removal_en, tx_switching, hw_lro_enable); 5953 5954 if (rc) { 5955 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc); 5956 return rc; 5957 } 5958 5959 QL_DPRINT2(ha, "Start vport ramrod passed, " 5960 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n", 5961 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en); 5962 5963 for_each_rss(i) { 5964 struct ecore_rxq_start_ret_params rx_ret_params; 5965 struct ecore_txq_start_ret_params tx_ret_params; 5966 5967 fp = &ha->fp_array[i]; 5968 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)]; 5969 5970 bzero(&qparams, sizeof(struct ecore_queue_start_common_params)); 5971 bzero(&rx_ret_params, 5972 sizeof (struct ecore_rxq_start_ret_params)); 5973 5974 qparams.queue_id = i ; 5975 qparams.vport_id = vport_id; 5976 qparams.stats_id = vport_id; 5977 qparams.p_sb = fp->sb_info; 5978 qparams.sb_idx = RX_PI; 5979 5980 5981 rc = ecore_eth_rx_queue_start(p_hwfn, 5982 p_hwfn->hw_info.opaque_fid, 5983 &qparams, 5984 fp->rxq->rx_buf_size, /* bd_max_bytes */ 5985 /* bd_chain_phys_addr */ 5986 fp->rxq->rx_bd_ring.p_phys_addr, 5987 /* cqe_pbl_addr */ 5988 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring), 5989 /* cqe_pbl_size */ 5990 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring), 5991 &rx_ret_params); 5992 5993 if (rc) { 5994 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc); 5995 return rc; 5996 } 5997 5998 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod; 5999 fp->rxq->handle = rx_ret_params.p_handle; 6000 fp->rxq->hw_cons_ptr = 6001 &fp->sb_info->sb_virt->pi_array[RX_PI]; 6002 6003 qlnx_update_rx_prod(p_hwfn, fp->rxq); 6004 6005 for (tc = 0; tc < ha->num_tc; tc++) { 6006 struct qlnx_tx_queue *txq = fp->txq[tc]; 6007 6008 bzero(&qparams, 6009 sizeof(struct ecore_queue_start_common_params)); 6010 bzero(&tx_ret_params, 6011 sizeof (struct ecore_txq_start_ret_params)); 6012 6013 qparams.queue_id = txq->index / cdev->num_hwfns ; 6014 qparams.vport_id = vport_id; 6015 qparams.stats_id = vport_id; 6016 qparams.p_sb = fp->sb_info; 6017 qparams.sb_idx = TX_PI(tc); 6018 6019 rc = ecore_eth_tx_queue_start(p_hwfn, 6020 p_hwfn->hw_info.opaque_fid, 6021 &qparams, tc, 6022 /* bd_chain_phys_addr */ 6023 ecore_chain_get_pbl_phys(&txq->tx_pbl), 6024 ecore_chain_get_page_cnt(&txq->tx_pbl), 6025 &tx_ret_params); 6026 6027 if (rc) { 6028 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n", 6029 txq->index, rc); 6030 return rc; 6031 } 6032 6033 txq->doorbell_addr = tx_ret_params.p_doorbell; 6034 txq->handle = tx_ret_params.p_handle; 6035 6036 txq->hw_cons_ptr = 6037 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; 6038 SET_FIELD(txq->tx_db.data.params, 6039 ETH_DB_DATA_DEST, DB_DEST_XCM); 6040 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, 6041 DB_AGG_CMD_SET); 6042 SET_FIELD(txq->tx_db.data.params, 6043 ETH_DB_DATA_AGG_VAL_SEL, 6044 DQ_XCM_ETH_TX_BD_PROD_CMD); 6045 6046 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 6047 } 6048 } 6049 6050 /* Fill struct with RSS params */ 6051 if (ha->num_rss > 1) { 6052 6053 rss_params->update_rss_config = 1; 6054 rss_params->rss_enable = 1; 6055 rss_params->update_rss_capabilities = 1; 6056 rss_params->update_rss_ind_table = 1; 6057 rss_params->update_rss_key = 1; 6058 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | 6059 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; 6060 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */ 6061 6062 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 6063 fp = &ha->fp_array[(i % ha->num_rss)]; 6064 rss_params->rss_ind_table[i] = fp->rxq->handle; 6065 } 6066 6067 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 6068 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i]; 6069 6070 } else { 6071 memset(rss_params, 0, sizeof(*rss_params)); 6072 } 6073 6074 6075 /* Prepare and send the vport enable */ 6076 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6077 vport_update_params.vport_id = vport_id; 6078 vport_update_params.update_vport_active_tx_flg = 1; 6079 vport_update_params.vport_active_tx_flg = 1; 6080 vport_update_params.update_vport_active_rx_flg = 1; 6081 vport_update_params.vport_active_rx_flg = 1; 6082 vport_update_params.rss_params = rss_params; 6083 vport_update_params.update_inner_vlan_removal_flg = 1; 6084 vport_update_params.inner_vlan_removal_flg = 1; 6085 6086 if (hw_lro_enable) { 6087 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params)); 6088 6089 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6090 6091 tpa_params.update_tpa_en_flg = 1; 6092 tpa_params.tpa_ipv4_en_flg = 1; 6093 tpa_params.tpa_ipv6_en_flg = 1; 6094 6095 tpa_params.update_tpa_param_flg = 1; 6096 tpa_params.tpa_pkt_split_flg = 0; 6097 tpa_params.tpa_hdr_data_split_flg = 0; 6098 tpa_params.tpa_gro_consistent_flg = 0; 6099 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 6100 tpa_params.tpa_max_size = (uint16_t)(-1); 6101 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2; 6102 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2; 6103 6104 vport_update_params.sge_tpa_params = &tpa_params; 6105 } 6106 6107 rc = qlnx_update_vport(cdev, &vport_update_params); 6108 if (rc) { 6109 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc); 6110 return rc; 6111 } 6112 6113 return 0; 6114 } 6115 6116 static int 6117 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6118 struct qlnx_tx_queue *txq) 6119 { 6120 uint16_t hw_bd_cons; 6121 uint16_t ecore_cons_idx; 6122 6123 QL_DPRINT2(ha, "enter\n"); 6124 6125 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6126 6127 while (hw_bd_cons != 6128 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 6129 6130 mtx_lock(&fp->tx_mtx); 6131 6132 (void)qlnx_tx_int(ha, fp, txq); 6133 6134 mtx_unlock(&fp->tx_mtx); 6135 6136 qlnx_mdelay(__func__, 2); 6137 6138 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6139 } 6140 6141 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index); 6142 6143 return 0; 6144 } 6145 6146 static int 6147 qlnx_stop_queues(qlnx_host_t *ha) 6148 { 6149 struct qlnx_update_vport_params vport_update_params; 6150 struct ecore_dev *cdev; 6151 struct qlnx_fastpath *fp; 6152 int rc, tc, i; 6153 6154 cdev = &ha->cdev; 6155 6156 /* Disable the vport */ 6157 6158 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6159 6160 vport_update_params.vport_id = 0; 6161 vport_update_params.update_vport_active_tx_flg = 1; 6162 vport_update_params.vport_active_tx_flg = 0; 6163 vport_update_params.update_vport_active_rx_flg = 1; 6164 vport_update_params.vport_active_rx_flg = 0; 6165 vport_update_params.rss_params = &ha->rss_params; 6166 vport_update_params.rss_params->update_rss_config = 0; 6167 vport_update_params.rss_params->rss_enable = 0; 6168 vport_update_params.update_inner_vlan_removal_flg = 0; 6169 vport_update_params.inner_vlan_removal_flg = 0; 6170 6171 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id); 6172 6173 rc = qlnx_update_vport(cdev, &vport_update_params); 6174 if (rc) { 6175 QL_DPRINT1(ha, "Failed to update vport\n"); 6176 return rc; 6177 } 6178 6179 /* Flush Tx queues. If needed, request drain from MCP */ 6180 for_each_rss(i) { 6181 fp = &ha->fp_array[i]; 6182 6183 for (tc = 0; tc < ha->num_tc; tc++) { 6184 struct qlnx_tx_queue *txq = fp->txq[tc]; 6185 6186 rc = qlnx_drain_txq(ha, fp, txq); 6187 if (rc) 6188 return rc; 6189 } 6190 } 6191 6192 /* Stop all Queues in reverse order*/ 6193 for (i = ha->num_rss - 1; i >= 0; i--) { 6194 6195 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)]; 6196 6197 fp = &ha->fp_array[i]; 6198 6199 /* Stop the Tx Queue(s)*/ 6200 for (tc = 0; tc < ha->num_tc; tc++) { 6201 int tx_queue_id; 6202 6203 tx_queue_id = tc * ha->num_rss + i; 6204 rc = ecore_eth_tx_queue_stop(p_hwfn, 6205 fp->txq[tc]->handle); 6206 6207 if (rc) { 6208 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n", 6209 tx_queue_id); 6210 return rc; 6211 } 6212 } 6213 6214 /* Stop the Rx Queue*/ 6215 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false, 6216 false); 6217 if (rc) { 6218 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i); 6219 return rc; 6220 } 6221 } 6222 6223 /* Stop the vport */ 6224 for_each_hwfn(cdev, i) { 6225 6226 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6227 6228 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0); 6229 6230 if (rc) { 6231 QL_DPRINT1(ha, "Failed to stop VPORT\n"); 6232 return rc; 6233 } 6234 } 6235 6236 return rc; 6237 } 6238 6239 static int 6240 qlnx_set_ucast_rx_mac(qlnx_host_t *ha, 6241 enum ecore_filter_opcode opcode, 6242 unsigned char mac[ETH_ALEN]) 6243 { 6244 struct ecore_filter_ucast ucast; 6245 struct ecore_dev *cdev; 6246 int rc; 6247 6248 cdev = &ha->cdev; 6249 6250 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6251 6252 ucast.opcode = opcode; 6253 ucast.type = ECORE_FILTER_MAC; 6254 ucast.is_rx_filter = 1; 6255 ucast.vport_to_add_to = 0; 6256 memcpy(&ucast.mac[0], mac, ETH_ALEN); 6257 6258 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6259 6260 return (rc); 6261 } 6262 6263 static int 6264 qlnx_remove_all_ucast_mac(qlnx_host_t *ha) 6265 { 6266 struct ecore_filter_ucast ucast; 6267 struct ecore_dev *cdev; 6268 int rc; 6269 6270 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6271 6272 ucast.opcode = ECORE_FILTER_REPLACE; 6273 ucast.type = ECORE_FILTER_MAC; 6274 ucast.is_rx_filter = 1; 6275 6276 cdev = &ha->cdev; 6277 6278 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6279 6280 return (rc); 6281 } 6282 6283 static int 6284 qlnx_remove_all_mcast_mac(qlnx_host_t *ha) 6285 { 6286 struct ecore_filter_mcast *mcast; 6287 struct ecore_dev *cdev; 6288 int rc, i; 6289 6290 cdev = &ha->cdev; 6291 6292 mcast = &ha->ecore_mcast; 6293 bzero(mcast, sizeof(struct ecore_filter_mcast)); 6294 6295 mcast->opcode = ECORE_FILTER_REMOVE; 6296 6297 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 6298 6299 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] || 6300 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] || 6301 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) { 6302 6303 memcpy(&mcast->mac[i], &ha->mcast[i].addr[0], ETH_ALEN); 6304 mcast->num_mc_addrs++; 6305 } 6306 } 6307 mcast = &ha->ecore_mcast; 6308 6309 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 6310 6311 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS)); 6312 ha->nmcast = 0; 6313 6314 return (rc); 6315 } 6316 6317 static int 6318 qlnx_clean_filters(qlnx_host_t *ha) 6319 { 6320 int rc = 0; 6321 6322 /* Remove all unicast macs */ 6323 rc = qlnx_remove_all_ucast_mac(ha); 6324 if (rc) 6325 return rc; 6326 6327 /* Remove all multicast macs */ 6328 rc = qlnx_remove_all_mcast_mac(ha); 6329 if (rc) 6330 return rc; 6331 6332 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac); 6333 6334 return (rc); 6335 } 6336 6337 static int 6338 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter) 6339 { 6340 struct ecore_filter_accept_flags accept; 6341 int rc = 0; 6342 struct ecore_dev *cdev; 6343 6344 cdev = &ha->cdev; 6345 6346 bzero(&accept, sizeof(struct ecore_filter_accept_flags)); 6347 6348 accept.update_rx_mode_config = 1; 6349 accept.rx_accept_filter = filter; 6350 6351 accept.update_tx_mode_config = 1; 6352 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 6353 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST; 6354 6355 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false, 6356 ECORE_SPQ_MODE_CB, NULL); 6357 6358 return (rc); 6359 } 6360 6361 static int 6362 qlnx_set_rx_mode(qlnx_host_t *ha) 6363 { 6364 int rc = 0; 6365 uint8_t filter; 6366 6367 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac); 6368 if (rc) 6369 return rc; 6370 6371 rc = qlnx_remove_all_mcast_mac(ha); 6372 if (rc) 6373 return rc; 6374 6375 filter = ECORE_ACCEPT_UCAST_MATCHED | 6376 ECORE_ACCEPT_MCAST_MATCHED | 6377 ECORE_ACCEPT_BCAST; 6378 ha->filter = filter; 6379 6380 rc = qlnx_set_rx_accept_filter(ha, filter); 6381 6382 return (rc); 6383 } 6384 6385 static int 6386 qlnx_set_link(qlnx_host_t *ha, bool link_up) 6387 { 6388 int i, rc = 0; 6389 struct ecore_dev *cdev; 6390 struct ecore_hwfn *hwfn; 6391 struct ecore_ptt *ptt; 6392 6393 cdev = &ha->cdev; 6394 6395 for_each_hwfn(cdev, i) { 6396 6397 hwfn = &cdev->hwfns[i]; 6398 6399 ptt = ecore_ptt_acquire(hwfn); 6400 if (!ptt) 6401 return -EBUSY; 6402 6403 rc = ecore_mcp_set_link(hwfn, ptt, link_up); 6404 6405 ecore_ptt_release(hwfn, ptt); 6406 6407 if (rc) 6408 return rc; 6409 } 6410 return (rc); 6411 } 6412 6413 #if __FreeBSD_version >= 1100000 6414 static uint64_t 6415 qlnx_get_counter(if_t ifp, ift_counter cnt) 6416 { 6417 qlnx_host_t *ha; 6418 uint64_t count; 6419 6420 ha = (qlnx_host_t *)if_getsoftc(ifp); 6421 6422 switch (cnt) { 6423 6424 case IFCOUNTER_IPACKETS: 6425 count = ha->hw_stats.common.rx_ucast_pkts + 6426 ha->hw_stats.common.rx_mcast_pkts + 6427 ha->hw_stats.common.rx_bcast_pkts; 6428 break; 6429 6430 case IFCOUNTER_IERRORS: 6431 count = ha->hw_stats.common.rx_crc_errors + 6432 ha->hw_stats.common.rx_align_errors + 6433 ha->hw_stats.common.rx_oversize_packets + 6434 ha->hw_stats.common.rx_undersize_packets; 6435 break; 6436 6437 case IFCOUNTER_OPACKETS: 6438 count = ha->hw_stats.common.tx_ucast_pkts + 6439 ha->hw_stats.common.tx_mcast_pkts + 6440 ha->hw_stats.common.tx_bcast_pkts; 6441 break; 6442 6443 case IFCOUNTER_OERRORS: 6444 count = ha->hw_stats.common.tx_err_drop_pkts; 6445 break; 6446 6447 case IFCOUNTER_COLLISIONS: 6448 return (0); 6449 6450 case IFCOUNTER_IBYTES: 6451 count = ha->hw_stats.common.rx_ucast_bytes + 6452 ha->hw_stats.common.rx_mcast_bytes + 6453 ha->hw_stats.common.rx_bcast_bytes; 6454 break; 6455 6456 case IFCOUNTER_OBYTES: 6457 count = ha->hw_stats.common.tx_ucast_bytes + 6458 ha->hw_stats.common.tx_mcast_bytes + 6459 ha->hw_stats.common.tx_bcast_bytes; 6460 break; 6461 6462 case IFCOUNTER_IMCASTS: 6463 count = ha->hw_stats.common.rx_mcast_bytes; 6464 break; 6465 6466 case IFCOUNTER_OMCASTS: 6467 count = ha->hw_stats.common.tx_mcast_bytes; 6468 break; 6469 6470 case IFCOUNTER_IQDROPS: 6471 case IFCOUNTER_OQDROPS: 6472 case IFCOUNTER_NOPROTO: 6473 6474 default: 6475 return (if_get_counter_default(ifp, cnt)); 6476 } 6477 return (count); 6478 } 6479 #endif 6480 6481 6482 static void 6483 qlnx_timer(void *arg) 6484 { 6485 qlnx_host_t *ha; 6486 6487 ha = (qlnx_host_t *)arg; 6488 6489 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats); 6490 6491 if (ha->storm_stats_enable) 6492 qlnx_sample_storm_stats(ha); 6493 6494 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 6495 6496 return; 6497 } 6498 6499 static int 6500 qlnx_load(qlnx_host_t *ha) 6501 { 6502 int i; 6503 int rc = 0; 6504 struct ecore_dev *cdev; 6505 device_t dev; 6506 6507 cdev = &ha->cdev; 6508 dev = ha->pci_dev; 6509 6510 QL_DPRINT2(ha, "enter\n"); 6511 6512 rc = qlnx_alloc_mem_arrays(ha); 6513 if (rc) 6514 goto qlnx_load_exit0; 6515 6516 qlnx_init_fp(ha); 6517 6518 rc = qlnx_alloc_mem_load(ha); 6519 if (rc) 6520 goto qlnx_load_exit1; 6521 6522 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n", 6523 ha->num_rss, ha->num_tc); 6524 6525 for (i = 0; i < ha->num_rss; i++) { 6526 6527 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq, 6528 (INTR_TYPE_NET | INTR_MPSAFE), 6529 NULL, qlnx_fp_isr, &ha->irq_vec[i], 6530 &ha->irq_vec[i].handle))) { 6531 6532 QL_DPRINT1(ha, "could not setup interrupt\n"); 6533 goto qlnx_load_exit2; 6534 } 6535 6536 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \ 6537 irq %p handle %p\n", i, 6538 ha->irq_vec[i].irq_rid, 6539 ha->irq_vec[i].irq, ha->irq_vec[i].handle); 6540 6541 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus)); 6542 } 6543 6544 rc = qlnx_start_queues(ha); 6545 if (rc) 6546 goto qlnx_load_exit2; 6547 6548 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n"); 6549 6550 /* Add primary mac and set Rx filters */ 6551 rc = qlnx_set_rx_mode(ha); 6552 if (rc) 6553 goto qlnx_load_exit2; 6554 6555 /* Ask for link-up using current configuration */ 6556 qlnx_set_link(ha, true); 6557 6558 ha->state = QLNX_STATE_OPEN; 6559 6560 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats)); 6561 6562 if (ha->flags.callout_init) 6563 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 6564 6565 goto qlnx_load_exit0; 6566 6567 qlnx_load_exit2: 6568 qlnx_free_mem_load(ha); 6569 6570 qlnx_load_exit1: 6571 ha->num_rss = 0; 6572 6573 qlnx_load_exit0: 6574 QL_DPRINT2(ha, "exit [%d]\n", rc); 6575 return rc; 6576 } 6577 6578 static void 6579 qlnx_drain_soft_lro(qlnx_host_t *ha) 6580 { 6581 #ifdef QLNX_SOFT_LRO 6582 6583 struct ifnet *ifp; 6584 int i; 6585 6586 ifp = ha->ifp; 6587 6588 6589 if (ifp->if_capenable & IFCAP_LRO) { 6590 6591 for (i = 0; i < ha->num_rss; i++) { 6592 6593 struct qlnx_fastpath *fp = &ha->fp_array[i]; 6594 struct lro_ctrl *lro; 6595 6596 lro = &fp->rxq->lro; 6597 6598 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 6599 6600 tcp_lro_flush_all(lro); 6601 6602 #else 6603 struct lro_entry *queued; 6604 6605 while ((!SLIST_EMPTY(&lro->lro_active))){ 6606 queued = SLIST_FIRST(&lro->lro_active); 6607 SLIST_REMOVE_HEAD(&lro->lro_active, next); 6608 tcp_lro_flush(lro, queued); 6609 } 6610 6611 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 6612 6613 } 6614 } 6615 6616 #endif /* #ifdef QLNX_SOFT_LRO */ 6617 6618 return; 6619 } 6620 6621 static void 6622 qlnx_unload(qlnx_host_t *ha) 6623 { 6624 struct ecore_dev *cdev; 6625 device_t dev; 6626 int i; 6627 6628 cdev = &ha->cdev; 6629 dev = ha->pci_dev; 6630 6631 QL_DPRINT2(ha, "enter\n"); 6632 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state); 6633 6634 if (ha->state == QLNX_STATE_OPEN) { 6635 6636 qlnx_set_link(ha, false); 6637 qlnx_clean_filters(ha); 6638 qlnx_stop_queues(ha); 6639 ecore_hw_stop_fastpath(cdev); 6640 6641 for (i = 0; i < ha->num_rss; i++) { 6642 if (ha->irq_vec[i].handle) { 6643 (void)bus_teardown_intr(dev, 6644 ha->irq_vec[i].irq, 6645 ha->irq_vec[i].handle); 6646 ha->irq_vec[i].handle = NULL; 6647 } 6648 } 6649 6650 qlnx_drain_fp_taskqueues(ha); 6651 qlnx_drain_soft_lro(ha); 6652 qlnx_free_mem_load(ha); 6653 } 6654 6655 if (ha->flags.callout_init) 6656 callout_drain(&ha->qlnx_callout); 6657 6658 qlnx_mdelay(__func__, 1000); 6659 6660 ha->state = QLNX_STATE_CLOSED; 6661 6662 QL_DPRINT2(ha, "exit\n"); 6663 return; 6664 } 6665 6666 static int 6667 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 6668 { 6669 int rval = -1; 6670 struct ecore_hwfn *p_hwfn; 6671 struct ecore_ptt *p_ptt; 6672 6673 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 6674 6675 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 6676 p_ptt = ecore_ptt_acquire(p_hwfn); 6677 6678 if (!p_ptt) { 6679 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 6680 return (rval); 6681 } 6682 6683 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 6684 6685 if (rval == DBG_STATUS_OK) 6686 rval = 0; 6687 else { 6688 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed" 6689 "[0x%x]\n", rval); 6690 } 6691 6692 ecore_ptt_release(p_hwfn, p_ptt); 6693 6694 return (rval); 6695 } 6696 6697 static int 6698 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 6699 { 6700 int rval = -1; 6701 struct ecore_hwfn *p_hwfn; 6702 struct ecore_ptt *p_ptt; 6703 6704 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 6705 6706 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 6707 p_ptt = ecore_ptt_acquire(p_hwfn); 6708 6709 if (!p_ptt) { 6710 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 6711 return (rval); 6712 } 6713 6714 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 6715 6716 if (rval == DBG_STATUS_OK) 6717 rval = 0; 6718 else { 6719 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed" 6720 " [0x%x]\n", rval); 6721 } 6722 6723 ecore_ptt_release(p_hwfn, p_ptt); 6724 6725 return (rval); 6726 } 6727 6728 6729 static void 6730 qlnx_sample_storm_stats(qlnx_host_t *ha) 6731 { 6732 int i, index; 6733 struct ecore_dev *cdev; 6734 qlnx_storm_stats_t *s_stats; 6735 uint32_t reg; 6736 struct ecore_ptt *p_ptt; 6737 struct ecore_hwfn *hwfn; 6738 6739 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) { 6740 ha->storm_stats_enable = 0; 6741 return; 6742 } 6743 6744 cdev = &ha->cdev; 6745 6746 for_each_hwfn(cdev, i) { 6747 6748 hwfn = &cdev->hwfns[i]; 6749 6750 p_ptt = ecore_ptt_acquire(hwfn); 6751 if (!p_ptt) 6752 return; 6753 6754 index = ha->storm_stats_index + 6755 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN); 6756 6757 s_stats = &ha->storm_stats[index]; 6758 6759 /* XSTORM */ 6760 reg = XSEM_REG_FAST_MEMORY + 6761 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6762 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6763 6764 reg = XSEM_REG_FAST_MEMORY + 6765 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6766 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6767 6768 reg = XSEM_REG_FAST_MEMORY + 6769 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6770 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6771 6772 reg = XSEM_REG_FAST_MEMORY + 6773 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6774 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6775 6776 /* YSTORM */ 6777 reg = YSEM_REG_FAST_MEMORY + 6778 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6779 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6780 6781 reg = YSEM_REG_FAST_MEMORY + 6782 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6783 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6784 6785 reg = YSEM_REG_FAST_MEMORY + 6786 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6787 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6788 6789 reg = YSEM_REG_FAST_MEMORY + 6790 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6791 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6792 6793 /* PSTORM */ 6794 reg = PSEM_REG_FAST_MEMORY + 6795 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6796 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6797 6798 reg = PSEM_REG_FAST_MEMORY + 6799 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6800 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6801 6802 reg = PSEM_REG_FAST_MEMORY + 6803 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6804 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6805 6806 reg = PSEM_REG_FAST_MEMORY + 6807 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6808 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6809 6810 /* TSTORM */ 6811 reg = TSEM_REG_FAST_MEMORY + 6812 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6813 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6814 6815 reg = TSEM_REG_FAST_MEMORY + 6816 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6817 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6818 6819 reg = TSEM_REG_FAST_MEMORY + 6820 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6821 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6822 6823 reg = TSEM_REG_FAST_MEMORY + 6824 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6825 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6826 6827 /* MSTORM */ 6828 reg = MSEM_REG_FAST_MEMORY + 6829 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6830 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6831 6832 reg = MSEM_REG_FAST_MEMORY + 6833 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6834 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6835 6836 reg = MSEM_REG_FAST_MEMORY + 6837 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6838 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6839 6840 reg = MSEM_REG_FAST_MEMORY + 6841 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6842 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6843 6844 /* USTORM */ 6845 reg = USEM_REG_FAST_MEMORY + 6846 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 6847 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 6848 6849 reg = USEM_REG_FAST_MEMORY + 6850 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 6851 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 6852 6853 reg = USEM_REG_FAST_MEMORY + 6854 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 6855 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 6856 6857 reg = USEM_REG_FAST_MEMORY + 6858 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 6859 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 6860 6861 ecore_ptt_release(hwfn, p_ptt); 6862 } 6863 6864 ha->storm_stats_index++; 6865 6866 return; 6867 } 6868 6869 /* 6870 * Name: qlnx_dump_buf8 6871 * Function: dumps a buffer as bytes 6872 */ 6873 static void 6874 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len) 6875 { 6876 device_t dev; 6877 uint32_t i = 0; 6878 uint8_t *buf; 6879 6880 dev = ha->pci_dev; 6881 buf = dbuf; 6882 6883 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len); 6884 6885 while (len >= 16) { 6886 device_printf(dev,"0x%08x:" 6887 " %02x %02x %02x %02x %02x %02x %02x %02x" 6888 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 6889 buf[0], buf[1], buf[2], buf[3], 6890 buf[4], buf[5], buf[6], buf[7], 6891 buf[8], buf[9], buf[10], buf[11], 6892 buf[12], buf[13], buf[14], buf[15]); 6893 i += 16; 6894 len -= 16; 6895 buf += 16; 6896 } 6897 switch (len) { 6898 case 1: 6899 device_printf(dev,"0x%08x: %02x\n", i, buf[0]); 6900 break; 6901 case 2: 6902 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]); 6903 break; 6904 case 3: 6905 device_printf(dev,"0x%08x: %02x %02x %02x\n", 6906 i, buf[0], buf[1], buf[2]); 6907 break; 6908 case 4: 6909 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i, 6910 buf[0], buf[1], buf[2], buf[3]); 6911 break; 6912 case 5: 6913 device_printf(dev,"0x%08x:" 6914 " %02x %02x %02x %02x %02x\n", i, 6915 buf[0], buf[1], buf[2], buf[3], buf[4]); 6916 break; 6917 case 6: 6918 device_printf(dev,"0x%08x:" 6919 " %02x %02x %02x %02x %02x %02x\n", i, 6920 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); 6921 break; 6922 case 7: 6923 device_printf(dev,"0x%08x:" 6924 " %02x %02x %02x %02x %02x %02x %02x\n", i, 6925 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); 6926 break; 6927 case 8: 6928 device_printf(dev,"0x%08x:" 6929 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 6930 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 6931 buf[7]); 6932 break; 6933 case 9: 6934 device_printf(dev,"0x%08x:" 6935 " %02x %02x %02x %02x %02x %02x %02x %02x" 6936 " %02x\n", i, 6937 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 6938 buf[7], buf[8]); 6939 break; 6940 case 10: 6941 device_printf(dev,"0x%08x:" 6942 " %02x %02x %02x %02x %02x %02x %02x %02x" 6943 " %02x %02x\n", i, 6944 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 6945 buf[7], buf[8], buf[9]); 6946 break; 6947 case 11: 6948 device_printf(dev,"0x%08x:" 6949 " %02x %02x %02x %02x %02x %02x %02x %02x" 6950 " %02x %02x %02x\n", i, 6951 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 6952 buf[7], buf[8], buf[9], buf[10]); 6953 break; 6954 case 12: 6955 device_printf(dev,"0x%08x:" 6956 " %02x %02x %02x %02x %02x %02x %02x %02x" 6957 " %02x %02x %02x %02x\n", i, 6958 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 6959 buf[7], buf[8], buf[9], buf[10], buf[11]); 6960 break; 6961 case 13: 6962 device_printf(dev,"0x%08x:" 6963 " %02x %02x %02x %02x %02x %02x %02x %02x" 6964 " %02x %02x %02x %02x %02x\n", i, 6965 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 6966 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]); 6967 break; 6968 case 14: 6969 device_printf(dev,"0x%08x:" 6970 " %02x %02x %02x %02x %02x %02x %02x %02x" 6971 " %02x %02x %02x %02x %02x %02x\n", i, 6972 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 6973 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 6974 buf[13]); 6975 break; 6976 case 15: 6977 device_printf(dev,"0x%08x:" 6978 " %02x %02x %02x %02x %02x %02x %02x %02x" 6979 " %02x %02x %02x %02x %02x %02x %02x\n", i, 6980 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 6981 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 6982 buf[13], buf[14]); 6983 break; 6984 default: 6985 break; 6986 } 6987 6988 device_printf(dev, "%s: %s dump end\n", __func__, msg); 6989 6990 return; 6991 } 6992 6993