1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 29 /* 30 * File: qlnx_os.c 31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "qlnx_os.h" 38 #include "bcm_osal.h" 39 #include "reg_addr.h" 40 #include "ecore_gtt_reg_addr.h" 41 #include "ecore.h" 42 #include "ecore_chain.h" 43 #include "ecore_status.h" 44 #include "ecore_hw.h" 45 #include "ecore_rt_defs.h" 46 #include "ecore_init_ops.h" 47 #include "ecore_int.h" 48 #include "ecore_cxt.h" 49 #include "ecore_spq.h" 50 #include "ecore_init_fw_funcs.h" 51 #include "ecore_sp_commands.h" 52 #include "ecore_dev_api.h" 53 #include "ecore_l2_api.h" 54 #include "ecore_mcp.h" 55 #include "ecore_hw_defs.h" 56 #include "mcp_public.h" 57 #include "ecore_iro.h" 58 #include "nvm_cfg.h" 59 #include "ecore_dev_api.h" 60 #include "ecore_dbg_fw_funcs.h" 61 62 #include "qlnx_ioctl.h" 63 #include "qlnx_def.h" 64 #include "qlnx_ver.h" 65 #include <sys/smp.h> 66 67 68 /* 69 * static functions 70 */ 71 /* 72 * ioctl related functions 73 */ 74 static void qlnx_add_sysctls(qlnx_host_t *ha); 75 76 /* 77 * main driver 78 */ 79 static void qlnx_release(qlnx_host_t *ha); 80 static void qlnx_fp_isr(void *arg); 81 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha); 82 static void qlnx_init(void *arg); 83 static void qlnx_init_locked(qlnx_host_t *ha); 84 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi); 85 static int qlnx_set_promisc(qlnx_host_t *ha); 86 static int qlnx_set_allmulti(qlnx_host_t *ha); 87 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 88 static int qlnx_media_change(struct ifnet *ifp); 89 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 90 static void qlnx_stop(qlnx_host_t *ha); 91 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, 92 struct mbuf **m_headp); 93 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha); 94 static uint32_t qlnx_get_optics(qlnx_host_t *ha, 95 struct qlnx_link_output *if_link); 96 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp); 97 static int qlnx_transmit_locked(struct ifnet *ifp, struct qlnx_fastpath *fp, 98 struct mbuf *mp); 99 static void qlnx_qflush(struct ifnet *ifp); 100 101 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha); 102 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha); 103 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha); 104 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha); 105 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha); 106 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha); 107 108 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver); 109 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size); 110 111 static int qlnx_nic_setup(struct ecore_dev *cdev, 112 struct ecore_pf_params *func_params); 113 static int qlnx_nic_start(struct ecore_dev *cdev); 114 static int qlnx_slowpath_start(qlnx_host_t *ha); 115 static int qlnx_slowpath_stop(qlnx_host_t *ha); 116 static int qlnx_init_hw(qlnx_host_t *ha); 117 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 118 char ver_str[VER_SIZE]); 119 static void qlnx_unload(qlnx_host_t *ha); 120 static int qlnx_load(qlnx_host_t *ha); 121 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 122 uint32_t add_mac); 123 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, 124 uint32_t len); 125 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq); 126 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq); 127 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, 128 struct qlnx_rx_queue *rxq); 129 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter); 130 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, 131 int hwfn_index); 132 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, 133 int hwfn_index); 134 static void qlnx_timer(void *arg); 135 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 136 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 137 static void qlnx_trigger_dump(qlnx_host_t *ha); 138 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 139 struct qlnx_tx_queue *txq); 140 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 141 struct qlnx_tx_queue *txq); 142 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 143 int lro_enable); 144 static void qlnx_fp_taskqueue(void *context, int pending); 145 static void qlnx_sample_storm_stats(qlnx_host_t *ha); 146 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 147 struct qlnx_agg_info *tpa); 148 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa); 149 150 #if __FreeBSD_version >= 1100000 151 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt); 152 #endif 153 154 155 /* 156 * Hooks to the Operating Systems 157 */ 158 static int qlnx_pci_probe (device_t); 159 static int qlnx_pci_attach (device_t); 160 static int qlnx_pci_detach (device_t); 161 162 static device_method_t qlnx_pci_methods[] = { 163 /* Device interface */ 164 DEVMETHOD(device_probe, qlnx_pci_probe), 165 DEVMETHOD(device_attach, qlnx_pci_attach), 166 DEVMETHOD(device_detach, qlnx_pci_detach), 167 { 0, 0 } 168 }; 169 170 static driver_t qlnx_pci_driver = { 171 "ql", qlnx_pci_methods, sizeof (qlnx_host_t), 172 }; 173 174 static devclass_t qlnx_devclass; 175 176 MODULE_VERSION(if_qlnxe,1); 177 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0); 178 179 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1); 180 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1); 181 182 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver"); 183 184 185 char qlnx_dev_str[64]; 186 char qlnx_ver_str[VER_SIZE]; 187 char qlnx_name_str[NAME_SIZE]; 188 189 /* 190 * Some PCI Configuration Space Related Defines 191 */ 192 193 #ifndef PCI_VENDOR_QLOGIC 194 #define PCI_VENDOR_QLOGIC 0x1077 195 #endif 196 197 /* 40G Adapter QLE45xxx*/ 198 #ifndef QLOGIC_PCI_DEVICE_ID_1634 199 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634 200 #endif 201 202 /* 100G Adapter QLE45xxx*/ 203 #ifndef QLOGIC_PCI_DEVICE_ID_1644 204 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644 205 #endif 206 207 /* 25G Adapter QLE45xxx*/ 208 #ifndef QLOGIC_PCI_DEVICE_ID_1656 209 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656 210 #endif 211 212 /* 50G Adapter QLE45xxx*/ 213 #ifndef QLOGIC_PCI_DEVICE_ID_1654 214 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654 215 #endif 216 217 /* 10G/25G/40G Adapter QLE41xxx*/ 218 #ifndef QLOGIC_PCI_DEVICE_ID_8070 219 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070 220 #endif 221 222 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD, 0, "qlnxe driver parameters"); 223 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */ 224 static int qlnxe_queue_count = QLNX_DEFAULT_RSS; 225 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 226 &qlnxe_queue_count, 0, "Multi-Queue queue count"); 227 228 static int 229 qlnx_valid_device(device_t dev) 230 { 231 uint16_t device_id; 232 233 device_id = pci_get_device(dev); 234 235 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 236 (device_id == QLOGIC_PCI_DEVICE_ID_1644) || 237 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 238 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 239 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 240 return 0; 241 242 return -1; 243 } 244 245 /* 246 * Name: qlnx_pci_probe 247 * Function: Validate the PCI device to be a QLA80XX device 248 */ 249 static int 250 qlnx_pci_probe(device_t dev) 251 { 252 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d", 253 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); 254 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx"); 255 256 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) { 257 return (ENXIO); 258 } 259 260 switch (pci_get_device(dev)) { 261 262 case QLOGIC_PCI_DEVICE_ID_1644: 263 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 264 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function", 265 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 266 QLNX_VERSION_BUILD); 267 device_set_desc_copy(dev, qlnx_dev_str); 268 269 break; 270 271 case QLOGIC_PCI_DEVICE_ID_1634: 272 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 273 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function", 274 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 275 QLNX_VERSION_BUILD); 276 device_set_desc_copy(dev, qlnx_dev_str); 277 278 break; 279 280 case QLOGIC_PCI_DEVICE_ID_1656: 281 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 282 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function", 283 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 284 QLNX_VERSION_BUILD); 285 device_set_desc_copy(dev, qlnx_dev_str); 286 287 break; 288 289 case QLOGIC_PCI_DEVICE_ID_1654: 290 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 291 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function", 292 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 293 QLNX_VERSION_BUILD); 294 device_set_desc_copy(dev, qlnx_dev_str); 295 296 break; 297 298 case QLOGIC_PCI_DEVICE_ID_8070: 299 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 300 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH) " 301 "Adapter-Ethernet Function", 302 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 303 QLNX_VERSION_BUILD); 304 device_set_desc_copy(dev, qlnx_dev_str); 305 306 break; 307 308 default: 309 return (ENXIO); 310 } 311 312 return (BUS_PROBE_DEFAULT); 313 } 314 315 static uint16_t 316 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 317 struct qlnx_tx_queue *txq) 318 { 319 u16 hw_bd_cons; 320 u16 ecore_cons_idx; 321 uint16_t diff; 322 323 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 324 325 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl); 326 if (hw_bd_cons < ecore_cons_idx) { 327 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 328 } else { 329 diff = hw_bd_cons - ecore_cons_idx; 330 } 331 return diff; 332 } 333 334 335 static void 336 qlnx_sp_intr(void *arg) 337 { 338 struct ecore_hwfn *p_hwfn; 339 qlnx_host_t *ha; 340 int i; 341 342 p_hwfn = arg; 343 344 if (p_hwfn == NULL) { 345 printf("%s: spurious slowpath intr\n", __func__); 346 return; 347 } 348 349 ha = (qlnx_host_t *)p_hwfn->p_dev; 350 351 QL_DPRINT2(ha, "enter\n"); 352 353 for (i = 0; i < ha->cdev.num_hwfns; i++) { 354 if (&ha->cdev.hwfns[i] == p_hwfn) { 355 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]); 356 break; 357 } 358 } 359 QL_DPRINT2(ha, "exit\n"); 360 361 return; 362 } 363 364 static void 365 qlnx_sp_taskqueue(void *context, int pending) 366 { 367 struct ecore_hwfn *p_hwfn; 368 369 p_hwfn = context; 370 371 if (p_hwfn != NULL) { 372 qlnx_sp_isr(p_hwfn); 373 } 374 return; 375 } 376 377 static int 378 qlnx_create_sp_taskqueues(qlnx_host_t *ha) 379 { 380 int i; 381 uint8_t tq_name[32]; 382 383 for (i = 0; i < ha->cdev.num_hwfns; i++) { 384 385 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 386 387 bzero(tq_name, sizeof (tq_name)); 388 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i); 389 390 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn); 391 392 ha->sp_taskqueue[i] = taskqueue_create_fast(tq_name, M_NOWAIT, 393 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]); 394 395 if (ha->sp_taskqueue[i] == NULL) 396 return (-1); 397 398 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s", 399 tq_name); 400 401 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]); 402 } 403 404 return (0); 405 } 406 407 static void 408 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha) 409 { 410 int i; 411 412 for (i = 0; i < ha->cdev.num_hwfns; i++) { 413 if (ha->sp_taskqueue[i] != NULL) { 414 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]); 415 taskqueue_free(ha->sp_taskqueue[i]); 416 } 417 } 418 return; 419 } 420 421 static void 422 qlnx_fp_taskqueue(void *context, int pending) 423 { 424 struct qlnx_fastpath *fp; 425 qlnx_host_t *ha; 426 struct ifnet *ifp; 427 428 #ifdef QLNX_RCV_IN_TASKQ 429 int lro_enable; 430 int rx_int = 0, total_rx_count = 0; 431 struct thread *cthread; 432 #endif /* #ifdef QLNX_RCV_IN_TASKQ */ 433 434 fp = context; 435 436 if (fp == NULL) 437 return; 438 439 ha = (qlnx_host_t *)fp->edev; 440 441 ifp = ha->ifp; 442 443 #ifdef QLNX_RCV_IN_TASKQ 444 445 cthread = curthread; 446 447 thread_lock(cthread); 448 449 if (!sched_is_bound(cthread)) 450 sched_bind(cthread, fp->rss_id); 451 452 thread_unlock(cthread); 453 454 lro_enable = ifp->if_capenable & IFCAP_LRO; 455 456 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, lro_enable); 457 458 if (rx_int) { 459 fp->rx_pkts += rx_int; 460 total_rx_count += rx_int; 461 } 462 463 #ifdef QLNX_SOFT_LRO 464 { 465 struct lro_ctrl *lro; 466 467 lro = &fp->rxq->lro; 468 469 if (lro_enable && total_rx_count) { 470 471 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 472 473 if (ha->dbg_trace_lro_cnt) { 474 if (lro->lro_mbuf_count & ~1023) 475 fp->lro_cnt_1024++; 476 else if (lro->lro_mbuf_count & ~511) 477 fp->lro_cnt_512++; 478 else if (lro->lro_mbuf_count & ~255) 479 fp->lro_cnt_256++; 480 else if (lro->lro_mbuf_count & ~127) 481 fp->lro_cnt_128++; 482 else if (lro->lro_mbuf_count & ~63) 483 fp->lro_cnt_64++; 484 } 485 tcp_lro_flush_all(lro); 486 487 #else 488 struct lro_entry *queued; 489 490 while ((!SLIST_EMPTY(&lro->lro_active))) { 491 queued = SLIST_FIRST(&lro->lro_active); 492 SLIST_REMOVE_HEAD(&lro->lro_active, next); 493 tcp_lro_flush(lro, queued); 494 } 495 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 496 } 497 } 498 #endif /* #ifdef QLNX_SOFT_LRO */ 499 500 ecore_sb_update_sb_idx(fp->sb_info); 501 rmb(); 502 503 #endif /* #ifdef QLNX_RCV_IN_TASKQ */ 504 505 if(ifp->if_drv_flags & IFF_DRV_RUNNING) { 506 507 if (!drbr_empty(ifp, fp->tx_br)) { 508 509 if(mtx_trylock(&fp->tx_mtx)) { 510 511 #ifdef QLNX_TRACE_PERF_DATA 512 tx_pkts = fp->tx_pkts_transmitted; 513 tx_compl = fp->tx_pkts_completed; 514 #endif 515 516 qlnx_transmit_locked(ifp, fp, NULL); 517 518 #ifdef QLNX_TRACE_PERF_DATA 519 fp->tx_pkts_trans_fp += 520 (fp->tx_pkts_transmitted - tx_pkts); 521 fp->tx_pkts_compl_fp += 522 (fp->tx_pkts_completed - tx_compl); 523 #endif 524 mtx_unlock(&fp->tx_mtx); 525 } 526 } 527 } 528 529 #ifdef QLNX_RCV_IN_TASKQ 530 if (rx_int) { 531 if (fp->fp_taskqueue != NULL) 532 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 533 } else { 534 if (fp->tx_ring_full) { 535 qlnx_mdelay(__func__, 100); 536 } 537 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 538 } 539 #endif /* #ifdef QLNX_RCV_IN_TASKQ */ 540 541 QL_DPRINT2(ha, "exit \n"); 542 return; 543 } 544 545 static int 546 qlnx_create_fp_taskqueues(qlnx_host_t *ha) 547 { 548 int i; 549 uint8_t tq_name[32]; 550 struct qlnx_fastpath *fp; 551 552 for (i = 0; i < ha->num_rss; i++) { 553 554 fp = &ha->fp_array[i]; 555 556 bzero(tq_name, sizeof (tq_name)); 557 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 558 559 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp); 560 561 fp->fp_taskqueue = taskqueue_create_fast(tq_name, M_NOWAIT, 562 taskqueue_thread_enqueue, 563 &fp->fp_taskqueue); 564 565 if (fp->fp_taskqueue == NULL) 566 return (-1); 567 568 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 569 tq_name); 570 571 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue); 572 } 573 574 return (0); 575 } 576 577 static void 578 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha) 579 { 580 int i; 581 struct qlnx_fastpath *fp; 582 583 for (i = 0; i < ha->num_rss; i++) { 584 585 fp = &ha->fp_array[i]; 586 587 if (fp->fp_taskqueue != NULL) { 588 589 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 590 taskqueue_free(fp->fp_taskqueue); 591 fp->fp_taskqueue = NULL; 592 } 593 } 594 return; 595 } 596 597 static void 598 qlnx_drain_fp_taskqueues(qlnx_host_t *ha) 599 { 600 int i; 601 struct qlnx_fastpath *fp; 602 603 for (i = 0; i < ha->num_rss; i++) { 604 fp = &ha->fp_array[i]; 605 606 if (fp->fp_taskqueue != NULL) { 607 QLNX_UNLOCK(ha); 608 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 609 QLNX_LOCK(ha); 610 } 611 } 612 return; 613 } 614 615 static void 616 qlnx_get_params(qlnx_host_t *ha) 617 { 618 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) { 619 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n", 620 qlnxe_queue_count); 621 qlnxe_queue_count = 0; 622 } 623 return; 624 } 625 626 /* 627 * Name: qlnx_pci_attach 628 * Function: attaches the device to the operating system 629 */ 630 static int 631 qlnx_pci_attach(device_t dev) 632 { 633 qlnx_host_t *ha = NULL; 634 uint32_t rsrc_len_reg = 0; 635 uint32_t rsrc_len_dbells = 0; 636 uint32_t rsrc_len_msix = 0; 637 int i; 638 uint32_t mfw_ver; 639 640 if ((ha = device_get_softc(dev)) == NULL) { 641 device_printf(dev, "cannot get softc\n"); 642 return (ENOMEM); 643 } 644 645 memset(ha, 0, sizeof (qlnx_host_t)); 646 647 if (qlnx_valid_device(dev) != 0) { 648 device_printf(dev, "device is not valid device\n"); 649 return (ENXIO); 650 } 651 ha->pci_func = pci_get_function(dev); 652 653 ha->pci_dev = dev; 654 655 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 656 657 ha->flags.lock_init = 1; 658 659 pci_enable_busmaster(dev); 660 661 /* 662 * map the PCI BARs 663 */ 664 665 ha->reg_rid = PCIR_BAR(0); 666 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 667 RF_ACTIVE); 668 669 if (ha->pci_reg == NULL) { 670 device_printf(dev, "unable to map BAR0\n"); 671 goto qlnx_pci_attach_err; 672 } 673 674 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 675 ha->reg_rid); 676 677 ha->dbells_rid = PCIR_BAR(2); 678 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 679 &ha->dbells_rid, RF_ACTIVE); 680 681 if (ha->pci_dbells == NULL) { 682 device_printf(dev, "unable to map BAR1\n"); 683 goto qlnx_pci_attach_err; 684 } 685 686 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 687 ha->dbells_rid); 688 689 ha->dbells_phys_addr = (uint64_t) 690 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid);; 691 ha->dbells_size = rsrc_len_dbells; 692 693 ha->msix_rid = PCIR_BAR(4); 694 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 695 &ha->msix_rid, RF_ACTIVE); 696 697 if (ha->msix_bar == NULL) { 698 device_printf(dev, "unable to map BAR2\n"); 699 goto qlnx_pci_attach_err; 700 } 701 702 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 703 ha->msix_rid); 704 /* 705 * allocate dma tags 706 */ 707 708 if (qlnx_alloc_parent_dma_tag(ha)) 709 goto qlnx_pci_attach_err; 710 711 if (qlnx_alloc_tx_dma_tag(ha)) 712 goto qlnx_pci_attach_err; 713 714 if (qlnx_alloc_rx_dma_tag(ha)) 715 goto qlnx_pci_attach_err; 716 717 718 if (qlnx_init_hw(ha) != 0) 719 goto qlnx_pci_attach_err; 720 721 qlnx_get_params(ha); 722 723 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) && 724 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) { 725 qlnxe_queue_count = QLNX_MAX_RSS; 726 } 727 728 /* 729 * Allocate MSI-x vectors 730 */ 731 if(qlnxe_queue_count == 0) 732 ha->num_rss = QLNX_DEFAULT_RSS; 733 else 734 ha->num_rss = qlnxe_queue_count; 735 736 ha->num_tc = QLNX_MAX_TC; 737 738 ha->msix_count = pci_msix_count(dev); 739 740 if (ha->msix_count > (mp_ncpus + ha->cdev.num_hwfns)) 741 ha->msix_count = mp_ncpus + ha->cdev.num_hwfns; 742 743 if (!ha->msix_count || 744 (ha->msix_count < (ha->cdev.num_hwfns + 1 ))) { 745 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 746 ha->msix_count); 747 goto qlnx_pci_attach_err; 748 } 749 750 if (ha->msix_count > (ha->num_rss + ha->cdev.num_hwfns )) 751 ha->msix_count = ha->num_rss + ha->cdev.num_hwfns; 752 else 753 ha->num_rss = ha->msix_count - ha->cdev.num_hwfns; 754 755 QL_DPRINT1(ha, "\n\t\t\tpci_reg [%p, 0x%08x 0x%08x]" 756 "\n\t\t\tdbells [%p, 0x%08x 0x%08x]" 757 "\n\t\t\tmsix [%p, 0x%08x 0x%08x 0x%x 0x%x]" 758 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n", 759 ha->pci_reg, rsrc_len_reg, 760 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 761 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 762 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc); 763 if (pci_alloc_msix(dev, &ha->msix_count)) { 764 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__, 765 ha->msix_count); 766 ha->msix_count = 0; 767 goto qlnx_pci_attach_err; 768 } 769 770 /* 771 * Initialize slow path interrupt and task queue 772 */ 773 if (qlnx_create_sp_taskqueues(ha) != 0) 774 goto qlnx_pci_attach_err; 775 776 for (i = 0; i < ha->cdev.num_hwfns; i++) { 777 778 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 779 780 ha->sp_irq_rid[i] = i + 1; 781 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, 782 &ha->sp_irq_rid[i], 783 (RF_ACTIVE | RF_SHAREABLE)); 784 if (ha->sp_irq[i] == NULL) { 785 device_printf(dev, 786 "could not allocate mbx interrupt\n"); 787 goto qlnx_pci_attach_err; 788 } 789 790 if (bus_setup_intr(dev, ha->sp_irq[i], 791 (INTR_TYPE_NET | INTR_MPSAFE), NULL, 792 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) { 793 device_printf(dev, 794 "could not setup slow path interrupt\n"); 795 goto qlnx_pci_attach_err; 796 } 797 798 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d" 799 " sp_irq %p sp_handle %p\n", p_hwfn, 800 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]); 801 802 } 803 804 /* 805 * initialize fast path interrupt 806 */ 807 if (qlnx_create_fp_taskqueues(ha) != 0) 808 goto qlnx_pci_attach_err; 809 810 for (i = 0; i < ha->num_rss; i++) { 811 ha->irq_vec[i].rss_idx = i; 812 ha->irq_vec[i].ha = ha; 813 ha->irq_vec[i].irq_rid = (1 + ha->cdev.num_hwfns) + i; 814 815 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 816 &ha->irq_vec[i].irq_rid, 817 (RF_ACTIVE | RF_SHAREABLE)); 818 819 if (ha->irq_vec[i].irq == NULL) { 820 device_printf(dev, 821 "could not allocate interrupt[%d]\n", i); 822 goto qlnx_pci_attach_err; 823 } 824 825 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) { 826 device_printf(dev, "could not allocate tx_br[%d]\n", i); 827 goto qlnx_pci_attach_err; 828 829 } 830 } 831 832 callout_init(&ha->qlnx_callout, 1); 833 ha->flags.callout_init = 1; 834 835 for (i = 0; i < ha->cdev.num_hwfns; i++) { 836 837 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0) 838 goto qlnx_pci_attach_err; 839 if (ha->grcdump_size[i] == 0) 840 goto qlnx_pci_attach_err; 841 842 ha->grcdump_size[i] = ha->grcdump_size[i] << 2; 843 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n", 844 i, ha->grcdump_size[i]); 845 846 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]); 847 if (ha->grcdump[i] == NULL) { 848 device_printf(dev, "grcdump alloc[%d] failed\n", i); 849 goto qlnx_pci_attach_err; 850 } 851 852 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0) 853 goto qlnx_pci_attach_err; 854 if (ha->idle_chk_size[i] == 0) 855 goto qlnx_pci_attach_err; 856 857 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2; 858 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n", 859 i, ha->idle_chk_size[i]); 860 861 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]); 862 863 if (ha->idle_chk[i] == NULL) { 864 device_printf(dev, "idle_chk alloc failed\n"); 865 goto qlnx_pci_attach_err; 866 } 867 } 868 869 if (qlnx_slowpath_start(ha) != 0) { 870 871 qlnx_mdelay(__func__, 1000); 872 qlnx_trigger_dump(ha); 873 874 goto qlnx_pci_attach_err0; 875 } else 876 ha->flags.slowpath_start = 1; 877 878 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) { 879 qlnx_mdelay(__func__, 1000); 880 qlnx_trigger_dump(ha); 881 882 goto qlnx_pci_attach_err0; 883 } 884 885 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) { 886 qlnx_mdelay(__func__, 1000); 887 qlnx_trigger_dump(ha); 888 889 goto qlnx_pci_attach_err0; 890 } 891 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d", 892 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF), 893 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF)); 894 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d", 895 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, 896 FW_ENGINEERING_VERSION); 897 898 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n", 899 ha->stormfw_ver, ha->mfw_ver); 900 901 qlnx_init_ifnet(dev, ha); 902 903 /* 904 * add sysctls 905 */ 906 qlnx_add_sysctls(ha); 907 908 qlnx_pci_attach_err0: 909 /* 910 * create ioctl device interface 911 */ 912 if (qlnx_make_cdev(ha)) { 913 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 914 goto qlnx_pci_attach_err; 915 } 916 917 QL_DPRINT2(ha, "success\n"); 918 919 return (0); 920 921 qlnx_pci_attach_err: 922 923 qlnx_release(ha); 924 925 return (ENXIO); 926 } 927 928 /* 929 * Name: qlnx_pci_detach 930 * Function: Unhooks the device from the operating system 931 */ 932 static int 933 qlnx_pci_detach(device_t dev) 934 { 935 qlnx_host_t *ha = NULL; 936 937 if ((ha = device_get_softc(dev)) == NULL) { 938 device_printf(dev, "cannot get softc\n"); 939 return (ENOMEM); 940 } 941 942 QLNX_LOCK(ha); 943 qlnx_stop(ha); 944 QLNX_UNLOCK(ha); 945 946 qlnx_release(ha); 947 948 return (0); 949 } 950 951 static int 952 qlnx_init_hw(qlnx_host_t *ha) 953 { 954 int rval = 0; 955 struct ecore_hw_prepare_params params; 956 957 ecore_init_struct(&ha->cdev); 958 959 /* ha->dp_module = ECORE_MSG_PROBE | 960 ECORE_MSG_INTR | 961 ECORE_MSG_SP | 962 ECORE_MSG_LINK | 963 ECORE_MSG_SPQ | 964 ECORE_MSG_RDMA; 965 ha->dp_level = ECORE_LEVEL_VERBOSE;*/ 966 ha->dp_level = ECORE_LEVEL_NOTICE; 967 968 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev); 969 970 ha->cdev.regview = ha->pci_reg; 971 ha->cdev.doorbells = ha->pci_dbells; 972 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 973 ha->cdev.db_size = ha->dbells_size; 974 975 bzero(¶ms, sizeof (struct ecore_hw_prepare_params)); 976 977 ha->personality = ECORE_PCI_DEFAULT; 978 979 params.personality = ha->personality; 980 981 params.drv_resc_alloc = false; 982 params.chk_reg_fifo = false; 983 params.initiate_pf_flr = true; 984 params.epoch = 0; 985 986 ecore_hw_prepare(&ha->cdev, ¶ms); 987 988 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str); 989 990 return (rval); 991 } 992 993 static void 994 qlnx_release(qlnx_host_t *ha) 995 { 996 device_t dev; 997 int i; 998 999 dev = ha->pci_dev; 1000 1001 QL_DPRINT2(ha, "enter\n"); 1002 1003 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) { 1004 if (ha->idle_chk[i] != NULL) { 1005 free(ha->idle_chk[i], M_QLNXBUF); 1006 ha->idle_chk[i] = NULL; 1007 } 1008 1009 if (ha->grcdump[i] != NULL) { 1010 free(ha->grcdump[i], M_QLNXBUF); 1011 ha->grcdump[i] = NULL; 1012 } 1013 } 1014 1015 if (ha->flags.callout_init) 1016 callout_drain(&ha->qlnx_callout); 1017 1018 if (ha->flags.slowpath_start) { 1019 qlnx_slowpath_stop(ha); 1020 } 1021 1022 ecore_hw_remove(&ha->cdev); 1023 1024 qlnx_del_cdev(ha); 1025 1026 if (ha->ifp != NULL) 1027 ether_ifdetach(ha->ifp); 1028 1029 qlnx_free_tx_dma_tag(ha); 1030 1031 qlnx_free_rx_dma_tag(ha); 1032 1033 qlnx_free_parent_dma_tag(ha); 1034 1035 for (i = 0; i < ha->num_rss; i++) { 1036 struct qlnx_fastpath *fp = &ha->fp_array[i]; 1037 1038 if (ha->irq_vec[i].handle) { 1039 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 1040 ha->irq_vec[i].handle); 1041 } 1042 1043 if (ha->irq_vec[i].irq) { 1044 (void)bus_release_resource(dev, SYS_RES_IRQ, 1045 ha->irq_vec[i].irq_rid, 1046 ha->irq_vec[i].irq); 1047 } 1048 1049 qlnx_free_tx_br(ha, fp); 1050 } 1051 qlnx_destroy_fp_taskqueues(ha); 1052 1053 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1054 if (ha->sp_handle[i]) 1055 (void)bus_teardown_intr(dev, ha->sp_irq[i], 1056 ha->sp_handle[i]); 1057 1058 if (ha->sp_irq[i]) 1059 (void) bus_release_resource(dev, SYS_RES_IRQ, 1060 ha->sp_irq_rid[i], ha->sp_irq[i]); 1061 } 1062 1063 qlnx_destroy_sp_taskqueues(ha); 1064 1065 if (ha->msix_count) 1066 pci_release_msi(dev); 1067 1068 if (ha->flags.lock_init) { 1069 mtx_destroy(&ha->hw_lock); 1070 } 1071 1072 if (ha->pci_reg) 1073 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 1074 ha->pci_reg); 1075 1076 if (ha->pci_dbells) 1077 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid, 1078 ha->pci_dbells); 1079 1080 if (ha->msix_bar) 1081 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid, 1082 ha->msix_bar); 1083 1084 QL_DPRINT2(ha, "exit\n"); 1085 return; 1086 } 1087 1088 static void 1089 qlnx_trigger_dump(qlnx_host_t *ha) 1090 { 1091 int i; 1092 1093 if (ha->ifp != NULL) 1094 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 1095 1096 QL_DPRINT2(ha, "enter\n"); 1097 1098 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1099 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i); 1100 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i); 1101 } 1102 1103 QL_DPRINT2(ha, "exit\n"); 1104 1105 return; 1106 } 1107 1108 static int 1109 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS) 1110 { 1111 int err, ret = 0; 1112 qlnx_host_t *ha; 1113 1114 err = sysctl_handle_int(oidp, &ret, 0, req); 1115 1116 if (err || !req->newptr) 1117 return (err); 1118 1119 if (ret == 1) { 1120 ha = (qlnx_host_t *)arg1; 1121 qlnx_trigger_dump(ha); 1122 } 1123 return (err); 1124 } 1125 1126 static int 1127 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS) 1128 { 1129 int err, i, ret = 0, usecs = 0; 1130 qlnx_host_t *ha; 1131 struct ecore_hwfn *p_hwfn; 1132 struct qlnx_fastpath *fp; 1133 1134 err = sysctl_handle_int(oidp, &usecs, 0, req); 1135 1136 if (err || !req->newptr || !usecs || (usecs > 255)) 1137 return (err); 1138 1139 ha = (qlnx_host_t *)arg1; 1140 1141 for (i = 0; i < ha->num_rss; i++) { 1142 1143 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1144 1145 fp = &ha->fp_array[i]; 1146 1147 if (fp->txq[0]->handle != NULL) { 1148 ret = ecore_set_queue_coalesce(p_hwfn, 0, 1149 (uint16_t)usecs, fp->txq[0]->handle); 1150 } 1151 } 1152 1153 if (!ret) 1154 ha->tx_coalesce_usecs = (uint8_t)usecs; 1155 1156 return (err); 1157 } 1158 1159 static int 1160 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS) 1161 { 1162 int err, i, ret = 0, usecs = 0; 1163 qlnx_host_t *ha; 1164 struct ecore_hwfn *p_hwfn; 1165 struct qlnx_fastpath *fp; 1166 1167 err = sysctl_handle_int(oidp, &usecs, 0, req); 1168 1169 if (err || !req->newptr || !usecs || (usecs > 255)) 1170 return (err); 1171 1172 ha = (qlnx_host_t *)arg1; 1173 1174 for (i = 0; i < ha->num_rss; i++) { 1175 1176 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1177 1178 fp = &ha->fp_array[i]; 1179 1180 if (fp->rxq->handle != NULL) { 1181 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs, 1182 0, fp->rxq->handle); 1183 } 1184 } 1185 1186 if (!ret) 1187 ha->rx_coalesce_usecs = (uint8_t)usecs; 1188 1189 return (err); 1190 } 1191 1192 static void 1193 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha) 1194 { 1195 struct sysctl_ctx_list *ctx; 1196 struct sysctl_oid_list *children; 1197 struct sysctl_oid *ctx_oid; 1198 1199 ctx = device_get_sysctl_ctx(ha->pci_dev); 1200 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1201 1202 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat", 1203 CTLFLAG_RD, NULL, "spstat"); 1204 children = SYSCTL_CHILDREN(ctx_oid); 1205 1206 SYSCTL_ADD_QUAD(ctx, children, 1207 OID_AUTO, "sp_interrupts", 1208 CTLFLAG_RD, &ha->sp_interrupts, 1209 "No. of slowpath interrupts"); 1210 1211 return; 1212 } 1213 1214 static void 1215 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha) 1216 { 1217 struct sysctl_ctx_list *ctx; 1218 struct sysctl_oid_list *children; 1219 struct sysctl_oid_list *node_children; 1220 struct sysctl_oid *ctx_oid; 1221 int i, j; 1222 uint8_t name_str[16]; 1223 1224 ctx = device_get_sysctl_ctx(ha->pci_dev); 1225 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1226 1227 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat", 1228 CTLFLAG_RD, NULL, "fpstat"); 1229 children = SYSCTL_CHILDREN(ctx_oid); 1230 1231 for (i = 0; i < ha->num_rss; i++) { 1232 1233 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1234 snprintf(name_str, sizeof(name_str), "%d", i); 1235 1236 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 1237 CTLFLAG_RD, NULL, name_str); 1238 node_children = SYSCTL_CHILDREN(ctx_oid); 1239 1240 /* Tx Related */ 1241 1242 SYSCTL_ADD_QUAD(ctx, node_children, 1243 OID_AUTO, "tx_pkts_processed", 1244 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed, 1245 "No. of packets processed for transmission"); 1246 1247 SYSCTL_ADD_QUAD(ctx, node_children, 1248 OID_AUTO, "tx_pkts_freed", 1249 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed, 1250 "No. of freed packets"); 1251 1252 SYSCTL_ADD_QUAD(ctx, node_children, 1253 OID_AUTO, "tx_pkts_transmitted", 1254 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted, 1255 "No. of transmitted packets"); 1256 1257 SYSCTL_ADD_QUAD(ctx, node_children, 1258 OID_AUTO, "tx_pkts_completed", 1259 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed, 1260 "No. of transmit completions"); 1261 1262 SYSCTL_ADD_QUAD(ctx, node_children, 1263 OID_AUTO, "tx_non_tso_pkts", 1264 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts, 1265 "No. of non LSO transmited packets"); 1266 1267 #ifdef QLNX_TRACE_PERF_DATA 1268 1269 SYSCTL_ADD_QUAD(ctx, node_children, 1270 OID_AUTO, "tx_pkts_trans_ctx", 1271 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx, 1272 "No. of transmitted packets in transmit context"); 1273 1274 SYSCTL_ADD_QUAD(ctx, node_children, 1275 OID_AUTO, "tx_pkts_compl_ctx", 1276 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx, 1277 "No. of transmit completions in transmit context"); 1278 1279 SYSCTL_ADD_QUAD(ctx, node_children, 1280 OID_AUTO, "tx_pkts_trans_fp", 1281 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp, 1282 "No. of transmitted packets in taskqueue"); 1283 1284 SYSCTL_ADD_QUAD(ctx, node_children, 1285 OID_AUTO, "tx_pkts_compl_fp", 1286 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp, 1287 "No. of transmit completions in taskqueue"); 1288 1289 SYSCTL_ADD_QUAD(ctx, node_children, 1290 OID_AUTO, "tx_pkts_compl_intr", 1291 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr, 1292 "No. of transmit completions in interrupt ctx"); 1293 #endif 1294 1295 SYSCTL_ADD_QUAD(ctx, node_children, 1296 OID_AUTO, "tx_tso_pkts", 1297 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts, 1298 "No. of LSO transmited packets"); 1299 1300 SYSCTL_ADD_QUAD(ctx, node_children, 1301 OID_AUTO, "tx_lso_wnd_min_len", 1302 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len, 1303 "tx_lso_wnd_min_len"); 1304 1305 SYSCTL_ADD_QUAD(ctx, node_children, 1306 OID_AUTO, "tx_defrag", 1307 CTLFLAG_RD, &ha->fp_array[i].tx_defrag, 1308 "tx_defrag"); 1309 1310 SYSCTL_ADD_QUAD(ctx, node_children, 1311 OID_AUTO, "tx_nsegs_gt_elem_left", 1312 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left, 1313 "tx_nsegs_gt_elem_left"); 1314 1315 SYSCTL_ADD_UINT(ctx, node_children, 1316 OID_AUTO, "tx_tso_max_nsegs", 1317 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs, 1318 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs"); 1319 1320 SYSCTL_ADD_UINT(ctx, node_children, 1321 OID_AUTO, "tx_tso_min_nsegs", 1322 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs, 1323 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs"); 1324 1325 SYSCTL_ADD_UINT(ctx, node_children, 1326 OID_AUTO, "tx_tso_max_pkt_len", 1327 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len, 1328 ha->fp_array[i].tx_tso_max_pkt_len, 1329 "tx_tso_max_pkt_len"); 1330 1331 SYSCTL_ADD_UINT(ctx, node_children, 1332 OID_AUTO, "tx_tso_min_pkt_len", 1333 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len, 1334 ha->fp_array[i].tx_tso_min_pkt_len, 1335 "tx_tso_min_pkt_len"); 1336 1337 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) { 1338 1339 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1340 snprintf(name_str, sizeof(name_str), 1341 "tx_pkts_nseg_%02d", (j+1)); 1342 1343 SYSCTL_ADD_QUAD(ctx, node_children, 1344 OID_AUTO, name_str, CTLFLAG_RD, 1345 &ha->fp_array[i].tx_pkts[j], name_str); 1346 } 1347 1348 #ifdef QLNX_TRACE_PERF_DATA 1349 for (j = 0; j < 18; j++) { 1350 1351 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1352 snprintf(name_str, sizeof(name_str), 1353 "tx_pkts_hist_%02d", (j+1)); 1354 1355 SYSCTL_ADD_QUAD(ctx, node_children, 1356 OID_AUTO, name_str, CTLFLAG_RD, 1357 &ha->fp_array[i].tx_pkts_hist[j], name_str); 1358 } 1359 for (j = 0; j < 5; j++) { 1360 1361 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1362 snprintf(name_str, sizeof(name_str), 1363 "tx_comInt_%02d", (j+1)); 1364 1365 SYSCTL_ADD_QUAD(ctx, node_children, 1366 OID_AUTO, name_str, CTLFLAG_RD, 1367 &ha->fp_array[i].tx_comInt[j], name_str); 1368 } 1369 for (j = 0; j < 18; j++) { 1370 1371 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1372 snprintf(name_str, sizeof(name_str), 1373 "tx_pkts_q_%02d", (j+1)); 1374 1375 SYSCTL_ADD_QUAD(ctx, node_children, 1376 OID_AUTO, name_str, CTLFLAG_RD, 1377 &ha->fp_array[i].tx_pkts_q[j], name_str); 1378 } 1379 #endif 1380 1381 SYSCTL_ADD_QUAD(ctx, node_children, 1382 OID_AUTO, "err_tx_nsegs_gt_elem_left", 1383 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left, 1384 "err_tx_nsegs_gt_elem_left"); 1385 1386 SYSCTL_ADD_QUAD(ctx, node_children, 1387 OID_AUTO, "err_tx_dmamap_create", 1388 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create, 1389 "err_tx_dmamap_create"); 1390 1391 SYSCTL_ADD_QUAD(ctx, node_children, 1392 OID_AUTO, "err_tx_defrag_dmamap_load", 1393 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load, 1394 "err_tx_defrag_dmamap_load"); 1395 1396 SYSCTL_ADD_QUAD(ctx, node_children, 1397 OID_AUTO, "err_tx_non_tso_max_seg", 1398 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg, 1399 "err_tx_non_tso_max_seg"); 1400 1401 SYSCTL_ADD_QUAD(ctx, node_children, 1402 OID_AUTO, "err_tx_dmamap_load", 1403 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load, 1404 "err_tx_dmamap_load"); 1405 1406 SYSCTL_ADD_QUAD(ctx, node_children, 1407 OID_AUTO, "err_tx_defrag", 1408 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag, 1409 "err_tx_defrag"); 1410 1411 SYSCTL_ADD_QUAD(ctx, node_children, 1412 OID_AUTO, "err_tx_free_pkt_null", 1413 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null, 1414 "err_tx_free_pkt_null"); 1415 1416 SYSCTL_ADD_QUAD(ctx, node_children, 1417 OID_AUTO, "err_tx_cons_idx_conflict", 1418 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict, 1419 "err_tx_cons_idx_conflict"); 1420 1421 SYSCTL_ADD_QUAD(ctx, node_children, 1422 OID_AUTO, "lro_cnt_64", 1423 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64, 1424 "lro_cnt_64"); 1425 1426 SYSCTL_ADD_QUAD(ctx, node_children, 1427 OID_AUTO, "lro_cnt_128", 1428 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128, 1429 "lro_cnt_128"); 1430 1431 SYSCTL_ADD_QUAD(ctx, node_children, 1432 OID_AUTO, "lro_cnt_256", 1433 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256, 1434 "lro_cnt_256"); 1435 1436 SYSCTL_ADD_QUAD(ctx, node_children, 1437 OID_AUTO, "lro_cnt_512", 1438 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512, 1439 "lro_cnt_512"); 1440 1441 SYSCTL_ADD_QUAD(ctx, node_children, 1442 OID_AUTO, "lro_cnt_1024", 1443 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024, 1444 "lro_cnt_1024"); 1445 1446 /* Rx Related */ 1447 1448 SYSCTL_ADD_QUAD(ctx, node_children, 1449 OID_AUTO, "rx_pkts", 1450 CTLFLAG_RD, &ha->fp_array[i].rx_pkts, 1451 "No. of received packets"); 1452 1453 SYSCTL_ADD_QUAD(ctx, node_children, 1454 OID_AUTO, "tpa_start", 1455 CTLFLAG_RD, &ha->fp_array[i].tpa_start, 1456 "No. of tpa_start packets"); 1457 1458 SYSCTL_ADD_QUAD(ctx, node_children, 1459 OID_AUTO, "tpa_cont", 1460 CTLFLAG_RD, &ha->fp_array[i].tpa_cont, 1461 "No. of tpa_cont packets"); 1462 1463 SYSCTL_ADD_QUAD(ctx, node_children, 1464 OID_AUTO, "tpa_end", 1465 CTLFLAG_RD, &ha->fp_array[i].tpa_end, 1466 "No. of tpa_end packets"); 1467 1468 SYSCTL_ADD_QUAD(ctx, node_children, 1469 OID_AUTO, "err_m_getcl", 1470 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl, 1471 "err_m_getcl"); 1472 1473 SYSCTL_ADD_QUAD(ctx, node_children, 1474 OID_AUTO, "err_m_getjcl", 1475 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl, 1476 "err_m_getjcl"); 1477 1478 SYSCTL_ADD_QUAD(ctx, node_children, 1479 OID_AUTO, "err_rx_hw_errors", 1480 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors, 1481 "err_rx_hw_errors"); 1482 1483 SYSCTL_ADD_QUAD(ctx, node_children, 1484 OID_AUTO, "err_rx_alloc_errors", 1485 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors, 1486 "err_rx_alloc_errors"); 1487 } 1488 1489 return; 1490 } 1491 1492 static void 1493 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha) 1494 { 1495 struct sysctl_ctx_list *ctx; 1496 struct sysctl_oid_list *children; 1497 struct sysctl_oid *ctx_oid; 1498 1499 ctx = device_get_sysctl_ctx(ha->pci_dev); 1500 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1501 1502 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat", 1503 CTLFLAG_RD, NULL, "hwstat"); 1504 children = SYSCTL_CHILDREN(ctx_oid); 1505 1506 SYSCTL_ADD_QUAD(ctx, children, 1507 OID_AUTO, "no_buff_discards", 1508 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards, 1509 "No. of packets discarded due to lack of buffer"); 1510 1511 SYSCTL_ADD_QUAD(ctx, children, 1512 OID_AUTO, "packet_too_big_discard", 1513 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard, 1514 "No. of packets discarded because packet was too big"); 1515 1516 SYSCTL_ADD_QUAD(ctx, children, 1517 OID_AUTO, "ttl0_discard", 1518 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard, 1519 "ttl0_discard"); 1520 1521 SYSCTL_ADD_QUAD(ctx, children, 1522 OID_AUTO, "rx_ucast_bytes", 1523 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes, 1524 "rx_ucast_bytes"); 1525 1526 SYSCTL_ADD_QUAD(ctx, children, 1527 OID_AUTO, "rx_mcast_bytes", 1528 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes, 1529 "rx_mcast_bytes"); 1530 1531 SYSCTL_ADD_QUAD(ctx, children, 1532 OID_AUTO, "rx_bcast_bytes", 1533 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes, 1534 "rx_bcast_bytes"); 1535 1536 SYSCTL_ADD_QUAD(ctx, children, 1537 OID_AUTO, "rx_ucast_pkts", 1538 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts, 1539 "rx_ucast_pkts"); 1540 1541 SYSCTL_ADD_QUAD(ctx, children, 1542 OID_AUTO, "rx_mcast_pkts", 1543 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts, 1544 "rx_mcast_pkts"); 1545 1546 SYSCTL_ADD_QUAD(ctx, children, 1547 OID_AUTO, "rx_bcast_pkts", 1548 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts, 1549 "rx_bcast_pkts"); 1550 1551 SYSCTL_ADD_QUAD(ctx, children, 1552 OID_AUTO, "mftag_filter_discards", 1553 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards, 1554 "mftag_filter_discards"); 1555 1556 SYSCTL_ADD_QUAD(ctx, children, 1557 OID_AUTO, "mac_filter_discards", 1558 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards, 1559 "mac_filter_discards"); 1560 1561 SYSCTL_ADD_QUAD(ctx, children, 1562 OID_AUTO, "tx_ucast_bytes", 1563 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes, 1564 "tx_ucast_bytes"); 1565 1566 SYSCTL_ADD_QUAD(ctx, children, 1567 OID_AUTO, "tx_mcast_bytes", 1568 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes, 1569 "tx_mcast_bytes"); 1570 1571 SYSCTL_ADD_QUAD(ctx, children, 1572 OID_AUTO, "tx_bcast_bytes", 1573 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes, 1574 "tx_bcast_bytes"); 1575 1576 SYSCTL_ADD_QUAD(ctx, children, 1577 OID_AUTO, "tx_ucast_pkts", 1578 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts, 1579 "tx_ucast_pkts"); 1580 1581 SYSCTL_ADD_QUAD(ctx, children, 1582 OID_AUTO, "tx_mcast_pkts", 1583 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts, 1584 "tx_mcast_pkts"); 1585 1586 SYSCTL_ADD_QUAD(ctx, children, 1587 OID_AUTO, "tx_bcast_pkts", 1588 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts, 1589 "tx_bcast_pkts"); 1590 1591 SYSCTL_ADD_QUAD(ctx, children, 1592 OID_AUTO, "tx_err_drop_pkts", 1593 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts, 1594 "tx_err_drop_pkts"); 1595 1596 SYSCTL_ADD_QUAD(ctx, children, 1597 OID_AUTO, "tpa_coalesced_pkts", 1598 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts, 1599 "tpa_coalesced_pkts"); 1600 1601 SYSCTL_ADD_QUAD(ctx, children, 1602 OID_AUTO, "tpa_coalesced_events", 1603 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events, 1604 "tpa_coalesced_events"); 1605 1606 SYSCTL_ADD_QUAD(ctx, children, 1607 OID_AUTO, "tpa_aborts_num", 1608 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num, 1609 "tpa_aborts_num"); 1610 1611 SYSCTL_ADD_QUAD(ctx, children, 1612 OID_AUTO, "tpa_not_coalesced_pkts", 1613 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts, 1614 "tpa_not_coalesced_pkts"); 1615 1616 SYSCTL_ADD_QUAD(ctx, children, 1617 OID_AUTO, "tpa_coalesced_bytes", 1618 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes, 1619 "tpa_coalesced_bytes"); 1620 1621 SYSCTL_ADD_QUAD(ctx, children, 1622 OID_AUTO, "rx_64_byte_packets", 1623 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets, 1624 "rx_64_byte_packets"); 1625 1626 SYSCTL_ADD_QUAD(ctx, children, 1627 OID_AUTO, "rx_65_to_127_byte_packets", 1628 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets, 1629 "rx_65_to_127_byte_packets"); 1630 1631 SYSCTL_ADD_QUAD(ctx, children, 1632 OID_AUTO, "rx_128_to_255_byte_packets", 1633 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets, 1634 "rx_128_to_255_byte_packets"); 1635 1636 SYSCTL_ADD_QUAD(ctx, children, 1637 OID_AUTO, "rx_256_to_511_byte_packets", 1638 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets, 1639 "rx_256_to_511_byte_packets"); 1640 1641 SYSCTL_ADD_QUAD(ctx, children, 1642 OID_AUTO, "rx_512_to_1023_byte_packets", 1643 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets, 1644 "rx_512_to_1023_byte_packets"); 1645 1646 SYSCTL_ADD_QUAD(ctx, children, 1647 OID_AUTO, "rx_1024_to_1518_byte_packets", 1648 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets, 1649 "rx_1024_to_1518_byte_packets"); 1650 1651 SYSCTL_ADD_QUAD(ctx, children, 1652 OID_AUTO, "rx_1519_to_1522_byte_packets", 1653 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets, 1654 "rx_1519_to_1522_byte_packets"); 1655 1656 SYSCTL_ADD_QUAD(ctx, children, 1657 OID_AUTO, "rx_1523_to_2047_byte_packets", 1658 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets, 1659 "rx_1523_to_2047_byte_packets"); 1660 1661 SYSCTL_ADD_QUAD(ctx, children, 1662 OID_AUTO, "rx_2048_to_4095_byte_packets", 1663 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets, 1664 "rx_2048_to_4095_byte_packets"); 1665 1666 SYSCTL_ADD_QUAD(ctx, children, 1667 OID_AUTO, "rx_4096_to_9216_byte_packets", 1668 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets, 1669 "rx_4096_to_9216_byte_packets"); 1670 1671 SYSCTL_ADD_QUAD(ctx, children, 1672 OID_AUTO, "rx_9217_to_16383_byte_packets", 1673 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets, 1674 "rx_9217_to_16383_byte_packets"); 1675 1676 SYSCTL_ADD_QUAD(ctx, children, 1677 OID_AUTO, "rx_crc_errors", 1678 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors, 1679 "rx_crc_errors"); 1680 1681 SYSCTL_ADD_QUAD(ctx, children, 1682 OID_AUTO, "rx_mac_crtl_frames", 1683 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames, 1684 "rx_mac_crtl_frames"); 1685 1686 SYSCTL_ADD_QUAD(ctx, children, 1687 OID_AUTO, "rx_pause_frames", 1688 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames, 1689 "rx_pause_frames"); 1690 1691 SYSCTL_ADD_QUAD(ctx, children, 1692 OID_AUTO, "rx_pfc_frames", 1693 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames, 1694 "rx_pfc_frames"); 1695 1696 SYSCTL_ADD_QUAD(ctx, children, 1697 OID_AUTO, "rx_align_errors", 1698 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors, 1699 "rx_align_errors"); 1700 1701 SYSCTL_ADD_QUAD(ctx, children, 1702 OID_AUTO, "rx_carrier_errors", 1703 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors, 1704 "rx_carrier_errors"); 1705 1706 SYSCTL_ADD_QUAD(ctx, children, 1707 OID_AUTO, "rx_oversize_packets", 1708 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets, 1709 "rx_oversize_packets"); 1710 1711 SYSCTL_ADD_QUAD(ctx, children, 1712 OID_AUTO, "rx_jabbers", 1713 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers, 1714 "rx_jabbers"); 1715 1716 SYSCTL_ADD_QUAD(ctx, children, 1717 OID_AUTO, "rx_undersize_packets", 1718 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets, 1719 "rx_undersize_packets"); 1720 1721 SYSCTL_ADD_QUAD(ctx, children, 1722 OID_AUTO, "rx_fragments", 1723 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments, 1724 "rx_fragments"); 1725 1726 SYSCTL_ADD_QUAD(ctx, children, 1727 OID_AUTO, "tx_64_byte_packets", 1728 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets, 1729 "tx_64_byte_packets"); 1730 1731 SYSCTL_ADD_QUAD(ctx, children, 1732 OID_AUTO, "tx_65_to_127_byte_packets", 1733 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets, 1734 "tx_65_to_127_byte_packets"); 1735 1736 SYSCTL_ADD_QUAD(ctx, children, 1737 OID_AUTO, "tx_128_to_255_byte_packets", 1738 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets, 1739 "tx_128_to_255_byte_packets"); 1740 1741 SYSCTL_ADD_QUAD(ctx, children, 1742 OID_AUTO, "tx_256_to_511_byte_packets", 1743 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets, 1744 "tx_256_to_511_byte_packets"); 1745 1746 SYSCTL_ADD_QUAD(ctx, children, 1747 OID_AUTO, "tx_512_to_1023_byte_packets", 1748 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets, 1749 "tx_512_to_1023_byte_packets"); 1750 1751 SYSCTL_ADD_QUAD(ctx, children, 1752 OID_AUTO, "tx_1024_to_1518_byte_packets", 1753 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets, 1754 "tx_1024_to_1518_byte_packets"); 1755 1756 SYSCTL_ADD_QUAD(ctx, children, 1757 OID_AUTO, "tx_1519_to_2047_byte_packets", 1758 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets, 1759 "tx_1519_to_2047_byte_packets"); 1760 1761 SYSCTL_ADD_QUAD(ctx, children, 1762 OID_AUTO, "tx_2048_to_4095_byte_packets", 1763 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets, 1764 "tx_2048_to_4095_byte_packets"); 1765 1766 SYSCTL_ADD_QUAD(ctx, children, 1767 OID_AUTO, "tx_4096_to_9216_byte_packets", 1768 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets, 1769 "tx_4096_to_9216_byte_packets"); 1770 1771 SYSCTL_ADD_QUAD(ctx, children, 1772 OID_AUTO, "tx_9217_to_16383_byte_packets", 1773 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets, 1774 "tx_9217_to_16383_byte_packets"); 1775 1776 SYSCTL_ADD_QUAD(ctx, children, 1777 OID_AUTO, "tx_pause_frames", 1778 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames, 1779 "tx_pause_frames"); 1780 1781 SYSCTL_ADD_QUAD(ctx, children, 1782 OID_AUTO, "tx_pfc_frames", 1783 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames, 1784 "tx_pfc_frames"); 1785 1786 SYSCTL_ADD_QUAD(ctx, children, 1787 OID_AUTO, "tx_lpi_entry_count", 1788 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count, 1789 "tx_lpi_entry_count"); 1790 1791 SYSCTL_ADD_QUAD(ctx, children, 1792 OID_AUTO, "tx_total_collisions", 1793 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions, 1794 "tx_total_collisions"); 1795 1796 SYSCTL_ADD_QUAD(ctx, children, 1797 OID_AUTO, "brb_truncates", 1798 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates, 1799 "brb_truncates"); 1800 1801 SYSCTL_ADD_QUAD(ctx, children, 1802 OID_AUTO, "brb_discards", 1803 CTLFLAG_RD, &ha->hw_stats.common.brb_discards, 1804 "brb_discards"); 1805 1806 SYSCTL_ADD_QUAD(ctx, children, 1807 OID_AUTO, "rx_mac_bytes", 1808 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes, 1809 "rx_mac_bytes"); 1810 1811 SYSCTL_ADD_QUAD(ctx, children, 1812 OID_AUTO, "rx_mac_uc_packets", 1813 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets, 1814 "rx_mac_uc_packets"); 1815 1816 SYSCTL_ADD_QUAD(ctx, children, 1817 OID_AUTO, "rx_mac_mc_packets", 1818 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets, 1819 "rx_mac_mc_packets"); 1820 1821 SYSCTL_ADD_QUAD(ctx, children, 1822 OID_AUTO, "rx_mac_bc_packets", 1823 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets, 1824 "rx_mac_bc_packets"); 1825 1826 SYSCTL_ADD_QUAD(ctx, children, 1827 OID_AUTO, "rx_mac_frames_ok", 1828 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok, 1829 "rx_mac_frames_ok"); 1830 1831 SYSCTL_ADD_QUAD(ctx, children, 1832 OID_AUTO, "tx_mac_bytes", 1833 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes, 1834 "tx_mac_bytes"); 1835 1836 SYSCTL_ADD_QUAD(ctx, children, 1837 OID_AUTO, "tx_mac_uc_packets", 1838 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets, 1839 "tx_mac_uc_packets"); 1840 1841 SYSCTL_ADD_QUAD(ctx, children, 1842 OID_AUTO, "tx_mac_mc_packets", 1843 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets, 1844 "tx_mac_mc_packets"); 1845 1846 SYSCTL_ADD_QUAD(ctx, children, 1847 OID_AUTO, "tx_mac_bc_packets", 1848 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets, 1849 "tx_mac_bc_packets"); 1850 1851 SYSCTL_ADD_QUAD(ctx, children, 1852 OID_AUTO, "tx_mac_ctrl_frames", 1853 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames, 1854 "tx_mac_ctrl_frames"); 1855 return; 1856 } 1857 1858 static void 1859 qlnx_add_sysctls(qlnx_host_t *ha) 1860 { 1861 device_t dev = ha->pci_dev; 1862 struct sysctl_ctx_list *ctx; 1863 struct sysctl_oid_list *children; 1864 1865 ctx = device_get_sysctl_ctx(dev); 1866 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 1867 1868 qlnx_add_fp_stats_sysctls(ha); 1869 qlnx_add_sp_stats_sysctls(ha); 1870 qlnx_add_hw_stats_sysctls(ha); 1871 1872 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version", 1873 CTLFLAG_RD, qlnx_ver_str, 0, 1874 "Driver Version"); 1875 1876 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version", 1877 CTLFLAG_RD, ha->stormfw_ver, 0, 1878 "STORM Firmware Version"); 1879 1880 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version", 1881 CTLFLAG_RD, ha->mfw_ver, 0, 1882 "Management Firmware Version"); 1883 1884 SYSCTL_ADD_UINT(ctx, children, 1885 OID_AUTO, "personality", CTLFLAG_RD, 1886 &ha->personality, ha->personality, 1887 "\tpersonality = 0 => Ethernet Only\n" 1888 "\tpersonality = 3 => Ethernet and RoCE\n" 1889 "\tpersonality = 4 => Ethernet and iWARP\n" 1890 "\tpersonality = 6 => Default in Shared Memory\n"); 1891 1892 ha->dbg_level = 0; 1893 SYSCTL_ADD_UINT(ctx, children, 1894 OID_AUTO, "debug", CTLFLAG_RW, 1895 &ha->dbg_level, ha->dbg_level, "Debug Level"); 1896 1897 ha->dp_level = 0x01; 1898 SYSCTL_ADD_UINT(ctx, children, 1899 OID_AUTO, "dp_level", CTLFLAG_RW, 1900 &ha->dp_level, ha->dp_level, "DP Level"); 1901 1902 ha->dbg_trace_lro_cnt = 0; 1903 SYSCTL_ADD_UINT(ctx, children, 1904 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW, 1905 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt, 1906 "Trace LRO Counts"); 1907 1908 ha->dbg_trace_tso_pkt_len = 0; 1909 SYSCTL_ADD_UINT(ctx, children, 1910 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW, 1911 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len, 1912 "Trace TSO packet lengths"); 1913 1914 ha->dp_module = 0; 1915 SYSCTL_ADD_UINT(ctx, children, 1916 OID_AUTO, "dp_module", CTLFLAG_RW, 1917 &ha->dp_module, ha->dp_module, "DP Module"); 1918 1919 ha->err_inject = 0; 1920 1921 SYSCTL_ADD_UINT(ctx, children, 1922 OID_AUTO, "err_inject", CTLFLAG_RW, 1923 &ha->err_inject, ha->err_inject, "Error Inject"); 1924 1925 ha->storm_stats_enable = 0; 1926 1927 SYSCTL_ADD_UINT(ctx, children, 1928 OID_AUTO, "storm_stats_enable", CTLFLAG_RW, 1929 &ha->storm_stats_enable, ha->storm_stats_enable, 1930 "Enable Storm Statistics Gathering"); 1931 1932 ha->storm_stats_index = 0; 1933 1934 SYSCTL_ADD_UINT(ctx, children, 1935 OID_AUTO, "storm_stats_index", CTLFLAG_RD, 1936 &ha->storm_stats_index, ha->storm_stats_index, 1937 "Enable Storm Statistics Gathering Current Index"); 1938 1939 ha->grcdump_taken = 0; 1940 SYSCTL_ADD_UINT(ctx, children, 1941 OID_AUTO, "grcdump_taken", CTLFLAG_RD, 1942 &ha->grcdump_taken, ha->grcdump_taken, "grcdump_taken"); 1943 1944 ha->idle_chk_taken = 0; 1945 SYSCTL_ADD_UINT(ctx, children, 1946 OID_AUTO, "idle_chk_taken", CTLFLAG_RD, 1947 &ha->idle_chk_taken, ha->idle_chk_taken, "idle_chk_taken"); 1948 1949 SYSCTL_ADD_UINT(ctx, children, 1950 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD, 1951 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs, 1952 "rx_coalesce_usecs"); 1953 1954 SYSCTL_ADD_UINT(ctx, children, 1955 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD, 1956 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs, 1957 "tx_coalesce_usecs"); 1958 1959 ha->rx_pkt_threshold = 128; 1960 SYSCTL_ADD_UINT(ctx, children, 1961 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW, 1962 &ha->rx_pkt_threshold, ha->rx_pkt_threshold, 1963 "No. of Rx Pkts to process at a time"); 1964 1965 ha->rx_jumbo_buf_eq_mtu = 0; 1966 SYSCTL_ADD_UINT(ctx, children, 1967 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW, 1968 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu, 1969 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n" 1970 "otherwise Rx Jumbo buffers are set to >= MTU size\n"); 1971 1972 SYSCTL_ADD_PROC(ctx, children, 1973 OID_AUTO, "trigger_dump", CTLTYPE_INT | CTLFLAG_RW, 1974 (void *)ha, 0, 1975 qlnx_trigger_dump_sysctl, "I", "trigger_dump"); 1976 1977 SYSCTL_ADD_PROC(ctx, children, 1978 OID_AUTO, "set_rx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW, 1979 (void *)ha, 0, 1980 qlnx_set_rx_coalesce, "I", 1981 "rx interrupt coalesce period microseconds"); 1982 1983 SYSCTL_ADD_PROC(ctx, children, 1984 OID_AUTO, "set_tx_coalesce_usecs", CTLTYPE_INT | CTLFLAG_RW, 1985 (void *)ha, 0, 1986 qlnx_set_tx_coalesce, "I", 1987 "tx interrupt coalesce period microseconds"); 1988 1989 SYSCTL_ADD_QUAD(ctx, children, 1990 OID_AUTO, "err_illegal_intr", CTLFLAG_RD, 1991 &ha->err_illegal_intr, "err_illegal_intr"); 1992 1993 SYSCTL_ADD_QUAD(ctx, children, 1994 OID_AUTO, "err_fp_null", CTLFLAG_RD, 1995 &ha->err_fp_null, "err_fp_null"); 1996 1997 SYSCTL_ADD_QUAD(ctx, children, 1998 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD, 1999 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type"); 2000 return; 2001 } 2002 2003 2004 2005 /***************************************************************************** 2006 * Operating System Network Interface Functions 2007 *****************************************************************************/ 2008 2009 static void 2010 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) 2011 { 2012 uint16_t device_id; 2013 struct ifnet *ifp; 2014 2015 ifp = ha->ifp = if_alloc(IFT_ETHER); 2016 2017 if (ifp == NULL) 2018 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 2019 2020 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2021 2022 device_id = pci_get_device(ha->pci_dev); 2023 2024 #if __FreeBSD_version >= 1000000 2025 2026 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) 2027 ifp->if_baudrate = IF_Gbps(40); 2028 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2029 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 2030 ifp->if_baudrate = IF_Gbps(25); 2031 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) 2032 ifp->if_baudrate = IF_Gbps(50); 2033 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) 2034 ifp->if_baudrate = IF_Gbps(100); 2035 2036 ifp->if_capabilities = IFCAP_LINKSTATE; 2037 #else 2038 ifp->if_mtu = ETHERMTU; 2039 ifp->if_baudrate = (1 * 1000 * 1000 *1000); 2040 2041 #endif /* #if __FreeBSD_version >= 1000000 */ 2042 2043 ifp->if_init = qlnx_init; 2044 ifp->if_softc = ha; 2045 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2046 ifp->if_ioctl = qlnx_ioctl; 2047 ifp->if_transmit = qlnx_transmit; 2048 ifp->if_qflush = qlnx_qflush; 2049 2050 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha)); 2051 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha); 2052 IFQ_SET_READY(&ifp->if_snd); 2053 2054 #if __FreeBSD_version >= 1100036 2055 if_setgetcounterfn(ifp, qlnx_get_counter); 2056 #endif 2057 2058 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2059 2060 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN); 2061 ether_ifattach(ifp, ha->primary_mac); 2062 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN); 2063 2064 ifp->if_capabilities = IFCAP_HWCSUM; 2065 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 2066 2067 ifp->if_capabilities |= IFCAP_VLAN_MTU; 2068 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 2069 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 2070 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 2071 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 2072 ifp->if_capabilities |= IFCAP_TSO4; 2073 ifp->if_capabilities |= IFCAP_TSO6; 2074 ifp->if_capabilities |= IFCAP_LRO; 2075 2076 ifp->if_hw_tsomax = QLNX_MAX_TSO_FRAME_SIZE - 2077 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 2078 ifp->if_hw_tsomaxsegcount = QLNX_MAX_SEGMENTS - 1 /* hdr */; 2079 ifp->if_hw_tsomaxsegsize = QLNX_MAX_TX_MBUF_SIZE; 2080 2081 2082 ifp->if_capenable = ifp->if_capabilities; 2083 2084 ifp->if_hwassist = CSUM_IP; 2085 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP; 2086 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; 2087 ifp->if_hwassist |= CSUM_TSO; 2088 2089 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 2090 2091 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\ 2092 qlnx_media_status); 2093 2094 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) { 2095 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL); 2096 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL); 2097 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL); 2098 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2099 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) { 2100 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL); 2101 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL); 2102 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) { 2103 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL); 2104 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL); 2105 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) { 2106 ifmedia_add(&ha->media, 2107 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL); 2108 ifmedia_add(&ha->media, 2109 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL); 2110 ifmedia_add(&ha->media, 2111 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL); 2112 } 2113 2114 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL); 2115 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 2116 2117 2118 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 2119 2120 QL_DPRINT2(ha, "exit\n"); 2121 2122 return; 2123 } 2124 2125 static void 2126 qlnx_init_locked(qlnx_host_t *ha) 2127 { 2128 struct ifnet *ifp = ha->ifp; 2129 2130 QL_DPRINT1(ha, "Driver Initialization start \n"); 2131 2132 qlnx_stop(ha); 2133 2134 if (qlnx_load(ha) == 0) { 2135 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2136 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2137 } 2138 2139 return; 2140 } 2141 2142 static void 2143 qlnx_init(void *arg) 2144 { 2145 qlnx_host_t *ha; 2146 2147 ha = (qlnx_host_t *)arg; 2148 2149 QL_DPRINT2(ha, "enter\n"); 2150 2151 QLNX_LOCK(ha); 2152 qlnx_init_locked(ha); 2153 QLNX_UNLOCK(ha); 2154 2155 QL_DPRINT2(ha, "exit\n"); 2156 2157 return; 2158 } 2159 2160 static int 2161 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac) 2162 { 2163 struct ecore_filter_mcast *mcast; 2164 struct ecore_dev *cdev; 2165 int rc; 2166 2167 cdev = &ha->cdev; 2168 2169 mcast = &ha->ecore_mcast; 2170 bzero(mcast, sizeof(struct ecore_filter_mcast)); 2171 2172 if (add_mac) 2173 mcast->opcode = ECORE_FILTER_ADD; 2174 else 2175 mcast->opcode = ECORE_FILTER_REMOVE; 2176 2177 mcast->num_mc_addrs = 1; 2178 memcpy(mcast->mac, mac_addr, ETH_ALEN); 2179 2180 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 2181 2182 return (rc); 2183 } 2184 2185 static int 2186 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta) 2187 { 2188 int i; 2189 2190 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2191 2192 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) 2193 return 0; /* its been already added */ 2194 } 2195 2196 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2197 2198 if ((ha->mcast[i].addr[0] == 0) && 2199 (ha->mcast[i].addr[1] == 0) && 2200 (ha->mcast[i].addr[2] == 0) && 2201 (ha->mcast[i].addr[3] == 0) && 2202 (ha->mcast[i].addr[4] == 0) && 2203 (ha->mcast[i].addr[5] == 0)) { 2204 2205 if (qlnx_config_mcast_mac_addr(ha, mta, 1)) 2206 return (-1); 2207 2208 bcopy(mta, ha->mcast[i].addr, ETH_ALEN); 2209 ha->nmcast++; 2210 2211 return 0; 2212 } 2213 } 2214 return 0; 2215 } 2216 2217 static int 2218 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta) 2219 { 2220 int i; 2221 2222 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2223 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { 2224 2225 if (qlnx_config_mcast_mac_addr(ha, mta, 0)) 2226 return (-1); 2227 2228 ha->mcast[i].addr[0] = 0; 2229 ha->mcast[i].addr[1] = 0; 2230 ha->mcast[i].addr[2] = 0; 2231 ha->mcast[i].addr[3] = 0; 2232 ha->mcast[i].addr[4] = 0; 2233 ha->mcast[i].addr[5] = 0; 2234 2235 ha->nmcast--; 2236 2237 return 0; 2238 } 2239 } 2240 return 0; 2241 } 2242 2243 /* 2244 * Name: qls_hw_set_multi 2245 * Function: Sets the Multicast Addresses provided the host O.S into the 2246 * hardware (for the given interface) 2247 */ 2248 static void 2249 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 2250 uint32_t add_mac) 2251 { 2252 int i; 2253 2254 for (i = 0; i < mcnt; i++) { 2255 if (add_mac) { 2256 if (qlnx_hw_add_mcast(ha, mta)) 2257 break; 2258 } else { 2259 if (qlnx_hw_del_mcast(ha, mta)) 2260 break; 2261 } 2262 2263 mta += ETHER_HDR_LEN; 2264 } 2265 return; 2266 } 2267 2268 2269 #define QLNX_MCAST_ADDRS_SIZE (QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN) 2270 static int 2271 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi) 2272 { 2273 uint8_t mta[QLNX_MCAST_ADDRS_SIZE]; 2274 struct ifmultiaddr *ifma; 2275 int mcnt = 0; 2276 struct ifnet *ifp = ha->ifp; 2277 int ret = 0; 2278 2279 if_maddr_rlock(ifp); 2280 2281 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2282 2283 if (ifma->ifma_addr->sa_family != AF_LINK) 2284 continue; 2285 2286 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS) 2287 break; 2288 2289 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 2290 &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN); 2291 2292 mcnt++; 2293 } 2294 2295 if_maddr_runlock(ifp); 2296 2297 QLNX_LOCK(ha); 2298 qlnx_hw_set_multi(ha, mta, mcnt, add_multi); 2299 QLNX_UNLOCK(ha); 2300 2301 return (ret); 2302 } 2303 2304 static int 2305 qlnx_set_promisc(qlnx_host_t *ha) 2306 { 2307 int rc = 0; 2308 uint8_t filter; 2309 2310 filter = ha->filter; 2311 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2312 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 2313 2314 rc = qlnx_set_rx_accept_filter(ha, filter); 2315 return (rc); 2316 } 2317 2318 static int 2319 qlnx_set_allmulti(qlnx_host_t *ha) 2320 { 2321 int rc = 0; 2322 uint8_t filter; 2323 2324 filter = ha->filter; 2325 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2326 rc = qlnx_set_rx_accept_filter(ha, filter); 2327 2328 return (rc); 2329 } 2330 2331 2332 static int 2333 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2334 { 2335 int ret = 0, mask; 2336 struct ifreq *ifr = (struct ifreq *)data; 2337 struct ifaddr *ifa = (struct ifaddr *)data; 2338 qlnx_host_t *ha; 2339 2340 ha = (qlnx_host_t *)ifp->if_softc; 2341 2342 switch (cmd) { 2343 case SIOCSIFADDR: 2344 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd); 2345 2346 if (ifa->ifa_addr->sa_family == AF_INET) { 2347 ifp->if_flags |= IFF_UP; 2348 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2349 QLNX_LOCK(ha); 2350 qlnx_init_locked(ha); 2351 QLNX_UNLOCK(ha); 2352 } 2353 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 2354 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)); 2355 2356 arp_ifinit(ifp, ifa); 2357 } else { 2358 ether_ioctl(ifp, cmd, data); 2359 } 2360 break; 2361 2362 case SIOCSIFMTU: 2363 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd); 2364 2365 if (ifr->ifr_mtu > QLNX_MAX_MTU) { 2366 ret = EINVAL; 2367 } else { 2368 QLNX_LOCK(ha); 2369 ifp->if_mtu = ifr->ifr_mtu; 2370 ha->max_frame_size = 2371 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2372 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2373 qlnx_init_locked(ha); 2374 } 2375 2376 QLNX_UNLOCK(ha); 2377 } 2378 2379 break; 2380 2381 case SIOCSIFFLAGS: 2382 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd); 2383 2384 QLNX_LOCK(ha); 2385 2386 if (ifp->if_flags & IFF_UP) { 2387 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2388 if ((ifp->if_flags ^ ha->if_flags) & 2389 IFF_PROMISC) { 2390 ret = qlnx_set_promisc(ha); 2391 } else if ((ifp->if_flags ^ ha->if_flags) & 2392 IFF_ALLMULTI) { 2393 ret = qlnx_set_allmulti(ha); 2394 } 2395 } else { 2396 ha->max_frame_size = ifp->if_mtu + 2397 ETHER_HDR_LEN + ETHER_CRC_LEN; 2398 qlnx_init_locked(ha); 2399 } 2400 } else { 2401 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2402 qlnx_stop(ha); 2403 ha->if_flags = ifp->if_flags; 2404 } 2405 2406 QLNX_UNLOCK(ha); 2407 break; 2408 2409 case SIOCADDMULTI: 2410 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd); 2411 2412 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2413 if (qlnx_set_multi(ha, 1)) 2414 ret = EINVAL; 2415 } 2416 break; 2417 2418 case SIOCDELMULTI: 2419 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd); 2420 2421 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2422 if (qlnx_set_multi(ha, 0)) 2423 ret = EINVAL; 2424 } 2425 break; 2426 2427 case SIOCSIFMEDIA: 2428 case SIOCGIFMEDIA: 2429 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd); 2430 2431 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 2432 break; 2433 2434 case SIOCSIFCAP: 2435 2436 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2437 2438 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd); 2439 2440 if (mask & IFCAP_HWCSUM) 2441 ifp->if_capenable ^= IFCAP_HWCSUM; 2442 if (mask & IFCAP_TSO4) 2443 ifp->if_capenable ^= IFCAP_TSO4; 2444 if (mask & IFCAP_TSO6) 2445 ifp->if_capenable ^= IFCAP_TSO6; 2446 if (mask & IFCAP_VLAN_HWTAGGING) 2447 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2448 if (mask & IFCAP_VLAN_HWTSO) 2449 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2450 if (mask & IFCAP_LRO) 2451 ifp->if_capenable ^= IFCAP_LRO; 2452 2453 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) 2454 qlnx_init(ha); 2455 2456 VLAN_CAPABILITIES(ifp); 2457 break; 2458 2459 #if (__FreeBSD_version >= 1100101) 2460 2461 case SIOCGI2C: 2462 { 2463 struct ifi2creq i2c; 2464 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 2465 struct ecore_ptt *p_ptt; 2466 2467 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 2468 2469 if (ret) 2470 break; 2471 2472 if ((i2c.len > sizeof (i2c.data)) || 2473 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) { 2474 ret = EINVAL; 2475 break; 2476 } 2477 2478 p_ptt = ecore_ptt_acquire(p_hwfn); 2479 2480 if (!p_ptt) { 2481 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 2482 ret = -1; 2483 break; 2484 } 2485 2486 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt, 2487 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset, 2488 i2c.len, &i2c.data[0]); 2489 2490 ecore_ptt_release(p_hwfn, p_ptt); 2491 2492 if (ret) { 2493 ret = -1; 2494 break; 2495 } 2496 2497 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 2498 2499 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \ 2500 len = %d addr = 0x%02x offset = 0x%04x \ 2501 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \ 2502 0x%02x 0x%02x 0x%02x\n", 2503 ret, i2c.len, i2c.dev_addr, i2c.offset, 2504 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3], 2505 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]); 2506 break; 2507 } 2508 #endif /* #if (__FreeBSD_version >= 1100101) */ 2509 2510 default: 2511 QL_DPRINT4(ha, "default (0x%lx)\n", cmd); 2512 ret = ether_ioctl(ifp, cmd, data); 2513 break; 2514 } 2515 2516 return (ret); 2517 } 2518 2519 static int 2520 qlnx_media_change(struct ifnet *ifp) 2521 { 2522 qlnx_host_t *ha; 2523 struct ifmedia *ifm; 2524 int ret = 0; 2525 2526 ha = (qlnx_host_t *)ifp->if_softc; 2527 2528 QL_DPRINT2(ha, "enter\n"); 2529 2530 ifm = &ha->media; 2531 2532 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2533 ret = EINVAL; 2534 2535 QL_DPRINT2(ha, "exit\n"); 2536 2537 return (ret); 2538 } 2539 2540 static void 2541 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2542 { 2543 qlnx_host_t *ha; 2544 2545 ha = (qlnx_host_t *)ifp->if_softc; 2546 2547 QL_DPRINT2(ha, "enter\n"); 2548 2549 ifmr->ifm_status = IFM_AVALID; 2550 ifmr->ifm_active = IFM_ETHER; 2551 2552 if (ha->link_up) { 2553 ifmr->ifm_status |= IFM_ACTIVE; 2554 ifmr->ifm_active |= 2555 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link)); 2556 2557 if (ha->if_link.link_partner_caps & 2558 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause)) 2559 ifmr->ifm_active |= 2560 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 2561 } 2562 2563 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down")); 2564 2565 return; 2566 } 2567 2568 2569 static void 2570 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2571 struct qlnx_tx_queue *txq) 2572 { 2573 u16 idx; 2574 struct mbuf *mp; 2575 bus_dmamap_t map; 2576 int i; 2577 struct eth_tx_bd *tx_data_bd; 2578 struct eth_tx_1st_bd *first_bd; 2579 int nbds = 0; 2580 2581 idx = txq->sw_tx_cons; 2582 mp = txq->sw_tx_ring[idx].mp; 2583 map = txq->sw_tx_ring[idx].map; 2584 2585 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){ 2586 2587 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL); 2588 2589 QL_DPRINT1(ha, "(mp == NULL) " 2590 " tx_idx = 0x%x" 2591 " ecore_prod_idx = 0x%x" 2592 " ecore_cons_idx = 0x%x" 2593 " hw_bd_cons = 0x%x" 2594 " txq_db_last = 0x%x" 2595 " elem_left = 0x%x\n", 2596 fp->rss_id, 2597 ecore_chain_get_prod_idx(&txq->tx_pbl), 2598 ecore_chain_get_cons_idx(&txq->tx_pbl), 2599 le16toh(*txq->hw_cons_ptr), 2600 txq->tx_db.raw, 2601 ecore_chain_get_elem_left(&txq->tx_pbl)); 2602 2603 fp->err_tx_free_pkt_null++; 2604 2605 //DEBUG 2606 qlnx_trigger_dump(ha); 2607 2608 return; 2609 } else { 2610 2611 QLNX_INC_OPACKETS((ha->ifp)); 2612 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len)); 2613 2614 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE); 2615 bus_dmamap_unload(ha->tx_tag, map); 2616 2617 fp->tx_pkts_freed++; 2618 fp->tx_pkts_completed++; 2619 2620 m_freem(mp); 2621 } 2622 2623 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl); 2624 nbds = first_bd->data.nbds; 2625 2626 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0); 2627 2628 for (i = 1; i < nbds; i++) { 2629 tx_data_bd = ecore_chain_consume(&txq->tx_pbl); 2630 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0); 2631 } 2632 txq->sw_tx_ring[idx].flags = 0; 2633 txq->sw_tx_ring[idx].mp = NULL; 2634 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0; 2635 2636 return; 2637 } 2638 2639 static void 2640 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2641 struct qlnx_tx_queue *txq) 2642 { 2643 u16 hw_bd_cons; 2644 u16 ecore_cons_idx; 2645 uint16_t diff; 2646 uint16_t idx, idx2; 2647 2648 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 2649 2650 while (hw_bd_cons != 2651 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 2652 2653 if (hw_bd_cons < ecore_cons_idx) { 2654 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 2655 } else { 2656 diff = hw_bd_cons - ecore_cons_idx; 2657 } 2658 if ((diff > TX_RING_SIZE) || 2659 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){ 2660 2661 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF); 2662 2663 QL_DPRINT1(ha, "(diff = 0x%x) " 2664 " tx_idx = 0x%x" 2665 " ecore_prod_idx = 0x%x" 2666 " ecore_cons_idx = 0x%x" 2667 " hw_bd_cons = 0x%x" 2668 " txq_db_last = 0x%x" 2669 " elem_left = 0x%x\n", 2670 diff, 2671 fp->rss_id, 2672 ecore_chain_get_prod_idx(&txq->tx_pbl), 2673 ecore_chain_get_cons_idx(&txq->tx_pbl), 2674 le16toh(*txq->hw_cons_ptr), 2675 txq->tx_db.raw, 2676 ecore_chain_get_elem_left(&txq->tx_pbl)); 2677 2678 fp->err_tx_cons_idx_conflict++; 2679 2680 //DEBUG 2681 qlnx_trigger_dump(ha); 2682 } 2683 2684 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 2685 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1); 2686 prefetch(txq->sw_tx_ring[idx].mp); 2687 prefetch(txq->sw_tx_ring[idx2].mp); 2688 2689 qlnx_free_tx_pkt(ha, fp, txq); 2690 2691 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 2692 } 2693 return; 2694 } 2695 2696 static int 2697 qlnx_transmit_locked(struct ifnet *ifp,struct qlnx_fastpath *fp, struct mbuf *mp) 2698 { 2699 int ret = 0; 2700 struct qlnx_tx_queue *txq; 2701 qlnx_host_t * ha; 2702 uint16_t elem_left; 2703 2704 txq = fp->txq[0]; 2705 ha = (qlnx_host_t *)fp->edev; 2706 2707 2708 if ((!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || (!ha->link_up)) { 2709 if(mp != NULL) 2710 ret = drbr_enqueue(ifp, fp->tx_br, mp); 2711 return (ret); 2712 } 2713 2714 if(mp != NULL) 2715 ret = drbr_enqueue(ifp, fp->tx_br, mp); 2716 2717 mp = drbr_peek(ifp, fp->tx_br); 2718 2719 while (mp != NULL) { 2720 2721 if (qlnx_send(ha, fp, &mp)) { 2722 2723 if (mp != NULL) { 2724 drbr_putback(ifp, fp->tx_br, mp); 2725 } else { 2726 fp->tx_pkts_processed++; 2727 drbr_advance(ifp, fp->tx_br); 2728 } 2729 goto qlnx_transmit_locked_exit; 2730 2731 } else { 2732 drbr_advance(ifp, fp->tx_br); 2733 fp->tx_pkts_transmitted++; 2734 fp->tx_pkts_processed++; 2735 } 2736 2737 mp = drbr_peek(ifp, fp->tx_br); 2738 } 2739 2740 qlnx_transmit_locked_exit: 2741 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) || 2742 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) 2743 < QLNX_TX_ELEM_MAX_THRESH)) 2744 (void)qlnx_tx_int(ha, fp, fp->txq[0]); 2745 2746 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret); 2747 return ret; 2748 } 2749 2750 2751 static int 2752 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp) 2753 { 2754 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc; 2755 struct qlnx_fastpath *fp; 2756 int rss_id = 0, ret = 0; 2757 2758 #ifdef QLNX_TRACEPERF_DATA 2759 uint64_t tx_pkts = 0, tx_compl = 0; 2760 #endif 2761 2762 QL_DPRINT2(ha, "enter\n"); 2763 2764 #if __FreeBSD_version >= 1100000 2765 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 2766 #else 2767 if (mp->m_flags & M_FLOWID) 2768 #endif 2769 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) % 2770 ha->num_rss; 2771 2772 fp = &ha->fp_array[rss_id]; 2773 2774 if (fp->tx_br == NULL) { 2775 ret = EINVAL; 2776 goto qlnx_transmit_exit; 2777 } 2778 2779 if (mtx_trylock(&fp->tx_mtx)) { 2780 2781 #ifdef QLNX_TRACEPERF_DATA 2782 tx_pkts = fp->tx_pkts_transmitted; 2783 tx_compl = fp->tx_pkts_completed; 2784 #endif 2785 2786 ret = qlnx_transmit_locked(ifp, fp, mp); 2787 2788 #ifdef QLNX_TRACEPERF_DATA 2789 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts); 2790 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl); 2791 #endif 2792 mtx_unlock(&fp->tx_mtx); 2793 } else { 2794 if (mp != NULL && (fp->fp_taskqueue != NULL)) { 2795 ret = drbr_enqueue(ifp, fp->tx_br, mp); 2796 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 2797 } 2798 } 2799 2800 qlnx_transmit_exit: 2801 2802 QL_DPRINT2(ha, "exit ret = %d\n", ret); 2803 return ret; 2804 } 2805 2806 static void 2807 qlnx_qflush(struct ifnet *ifp) 2808 { 2809 int rss_id; 2810 struct qlnx_fastpath *fp; 2811 struct mbuf *mp; 2812 qlnx_host_t *ha; 2813 2814 ha = (qlnx_host_t *)ifp->if_softc; 2815 2816 QL_DPRINT2(ha, "enter\n"); 2817 2818 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 2819 2820 fp = &ha->fp_array[rss_id]; 2821 2822 if (fp == NULL) 2823 continue; 2824 2825 if (fp->tx_br) { 2826 mtx_lock(&fp->tx_mtx); 2827 2828 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 2829 fp->tx_pkts_freed++; 2830 m_freem(mp); 2831 } 2832 mtx_unlock(&fp->tx_mtx); 2833 } 2834 } 2835 QL_DPRINT2(ha, "exit\n"); 2836 2837 return; 2838 } 2839 2840 static void 2841 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value) 2842 { 2843 struct ecore_dev *cdev; 2844 uint32_t offset; 2845 2846 cdev = &ha->cdev; 2847 2848 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)cdev->doorbells); 2849 2850 bus_write_4(ha->pci_dbells, offset, value); 2851 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ); 2852 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ); 2853 2854 return; 2855 } 2856 2857 static uint32_t 2858 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp) 2859 { 2860 struct ether_vlan_header *eh = NULL; 2861 struct ip *ip = NULL; 2862 struct ip6_hdr *ip6 = NULL; 2863 struct tcphdr *th = NULL; 2864 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0; 2865 uint16_t etype = 0; 2866 device_t dev; 2867 uint8_t buf[sizeof(struct ip6_hdr)]; 2868 2869 dev = ha->pci_dev; 2870 2871 eh = mtod(mp, struct ether_vlan_header *); 2872 2873 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2874 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2875 etype = ntohs(eh->evl_proto); 2876 } else { 2877 ehdrlen = ETHER_HDR_LEN; 2878 etype = ntohs(eh->evl_encap_proto); 2879 } 2880 2881 switch (etype) { 2882 2883 case ETHERTYPE_IP: 2884 ip = (struct ip *)(mp->m_data + ehdrlen); 2885 2886 ip_hlen = sizeof (struct ip); 2887 2888 if (mp->m_len < (ehdrlen + ip_hlen)) { 2889 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 2890 ip = (struct ip *)buf; 2891 } 2892 2893 th = (struct tcphdr *)(ip + 1); 2894 offset = ip_hlen + ehdrlen + (th->th_off << 2); 2895 break; 2896 2897 case ETHERTYPE_IPV6: 2898 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 2899 2900 ip_hlen = sizeof(struct ip6_hdr); 2901 2902 if (mp->m_len < (ehdrlen + ip_hlen)) { 2903 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 2904 buf); 2905 ip6 = (struct ip6_hdr *)buf; 2906 } 2907 th = (struct tcphdr *)(ip6 + 1); 2908 offset = ip_hlen + ehdrlen + (th->th_off << 2); 2909 break; 2910 2911 default: 2912 break; 2913 } 2914 2915 return (offset); 2916 } 2917 2918 static __inline int 2919 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs, 2920 uint32_t offset) 2921 { 2922 int i; 2923 uint32_t sum, nbds_in_hdr = 1; 2924 uint32_t window; 2925 bus_dma_segment_t *s_seg; 2926 2927 /* If the header spans mulitple segments, skip those segments */ 2928 2929 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM) 2930 return (0); 2931 2932 i = 0; 2933 2934 while ((i < nsegs) && (offset >= segs->ds_len)) { 2935 offset = offset - segs->ds_len; 2936 segs++; 2937 i++; 2938 nbds_in_hdr++; 2939 } 2940 2941 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr; 2942 2943 nsegs = nsegs - i; 2944 2945 while (nsegs >= window) { 2946 2947 sum = 0; 2948 s_seg = segs; 2949 2950 for (i = 0; i < window; i++){ 2951 sum += s_seg->ds_len; 2952 s_seg++; 2953 } 2954 2955 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) { 2956 fp->tx_lso_wnd_min_len++; 2957 return (-1); 2958 } 2959 2960 nsegs = nsegs - 1; 2961 segs++; 2962 } 2963 2964 return (0); 2965 } 2966 2967 static int 2968 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp) 2969 { 2970 bus_dma_segment_t *segs; 2971 bus_dmamap_t map = 0; 2972 uint32_t nsegs = 0; 2973 int ret = -1; 2974 struct mbuf *m_head = *m_headp; 2975 uint16_t idx = 0; 2976 uint16_t elem_left; 2977 2978 uint8_t nbd = 0; 2979 struct qlnx_tx_queue *txq; 2980 2981 struct eth_tx_1st_bd *first_bd; 2982 struct eth_tx_2nd_bd *second_bd; 2983 struct eth_tx_3rd_bd *third_bd; 2984 struct eth_tx_bd *tx_data_bd; 2985 2986 int seg_idx = 0; 2987 uint32_t nbds_in_hdr = 0; 2988 uint32_t offset = 0; 2989 2990 #ifdef QLNX_TRACE_PERF_DATA 2991 uint16_t bd_used; 2992 #endif 2993 2994 QL_DPRINT8(ha, "enter\n"); 2995 2996 if (!ha->link_up) 2997 return (-1); 2998 2999 first_bd = NULL; 3000 second_bd = NULL; 3001 third_bd = NULL; 3002 tx_data_bd = NULL; 3003 3004 txq = fp->txq[0]; 3005 3006 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) < 3007 QLNX_TX_ELEM_MIN_THRESH) { 3008 3009 fp->tx_nsegs_gt_elem_left++; 3010 fp->err_tx_nsegs_gt_elem_left++; 3011 3012 return (ENOBUFS); 3013 } 3014 3015 idx = txq->sw_tx_prod; 3016 3017 map = txq->sw_tx_ring[idx].map; 3018 segs = txq->segs; 3019 3020 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 3021 BUS_DMA_NOWAIT); 3022 3023 if (ha->dbg_trace_tso_pkt_len) { 3024 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3025 if (!fp->tx_tso_min_pkt_len) { 3026 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3027 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3028 } else { 3029 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len) 3030 fp->tx_tso_min_pkt_len = 3031 m_head->m_pkthdr.len; 3032 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len) 3033 fp->tx_tso_max_pkt_len = 3034 m_head->m_pkthdr.len; 3035 } 3036 } 3037 } 3038 3039 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3040 offset = qlnx_tcp_offset(ha, m_head); 3041 3042 if ((ret == EFBIG) || 3043 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && ( 3044 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) || 3045 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) && 3046 qlnx_tso_check(fp, segs, nsegs, offset))))) { 3047 3048 struct mbuf *m; 3049 3050 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len); 3051 3052 fp->tx_defrag++; 3053 3054 m = m_defrag(m_head, M_NOWAIT); 3055 if (m == NULL) { 3056 fp->err_tx_defrag++; 3057 fp->tx_pkts_freed++; 3058 m_freem(m_head); 3059 *m_headp = NULL; 3060 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret); 3061 return (ENOBUFS); 3062 } 3063 3064 m_head = m; 3065 *m_headp = m_head; 3066 3067 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 3068 segs, &nsegs, BUS_DMA_NOWAIT))) { 3069 3070 fp->err_tx_defrag_dmamap_load++; 3071 3072 QL_DPRINT1(ha, 3073 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n", 3074 ret, m_head->m_pkthdr.len); 3075 3076 fp->tx_pkts_freed++; 3077 m_freem(m_head); 3078 *m_headp = NULL; 3079 3080 return (ret); 3081 } 3082 3083 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && 3084 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) { 3085 3086 fp->err_tx_non_tso_max_seg++; 3087 3088 QL_DPRINT1(ha, 3089 "(%d) nsegs too many for non-TSO [%d, %d]\n", 3090 ret, nsegs, m_head->m_pkthdr.len); 3091 3092 fp->tx_pkts_freed++; 3093 m_freem(m_head); 3094 *m_headp = NULL; 3095 3096 return (ret); 3097 } 3098 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3099 offset = qlnx_tcp_offset(ha, m_head); 3100 3101 } else if (ret) { 3102 3103 fp->err_tx_dmamap_load++; 3104 3105 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n", 3106 ret, m_head->m_pkthdr.len); 3107 fp->tx_pkts_freed++; 3108 m_freem(m_head); 3109 *m_headp = NULL; 3110 return (ret); 3111 } 3112 3113 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet")); 3114 3115 if (ha->dbg_trace_tso_pkt_len) { 3116 if (nsegs < QLNX_FP_MAX_SEGS) 3117 fp->tx_pkts[(nsegs - 1)]++; 3118 else 3119 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; 3120 } 3121 3122 #ifdef QLNX_TRACE_PERF_DATA 3123 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3124 if(m_head->m_pkthdr.len <= 2048) 3125 fp->tx_pkts_hist[0]++; 3126 else if((m_head->m_pkthdr.len > 2048) && 3127 (m_head->m_pkthdr.len <= 4096)) 3128 fp->tx_pkts_hist[1]++; 3129 else if((m_head->m_pkthdr.len > 4096) && 3130 (m_head->m_pkthdr.len <= 8192)) 3131 fp->tx_pkts_hist[2]++; 3132 else if((m_head->m_pkthdr.len > 8192) && 3133 (m_head->m_pkthdr.len <= 12288 )) 3134 fp->tx_pkts_hist[3]++; 3135 else if((m_head->m_pkthdr.len > 11288) && 3136 (m_head->m_pkthdr.len <= 16394)) 3137 fp->tx_pkts_hist[4]++; 3138 else if((m_head->m_pkthdr.len > 16384) && 3139 (m_head->m_pkthdr.len <= 20480)) 3140 fp->tx_pkts_hist[5]++; 3141 else if((m_head->m_pkthdr.len > 20480) && 3142 (m_head->m_pkthdr.len <= 24576)) 3143 fp->tx_pkts_hist[6]++; 3144 else if((m_head->m_pkthdr.len > 24576) && 3145 (m_head->m_pkthdr.len <= 28672)) 3146 fp->tx_pkts_hist[7]++; 3147 else if((m_head->m_pkthdr.len > 28762) && 3148 (m_head->m_pkthdr.len <= 32768)) 3149 fp->tx_pkts_hist[8]++; 3150 else if((m_head->m_pkthdr.len > 32768) && 3151 (m_head->m_pkthdr.len <= 36864)) 3152 fp->tx_pkts_hist[9]++; 3153 else if((m_head->m_pkthdr.len > 36864) && 3154 (m_head->m_pkthdr.len <= 40960)) 3155 fp->tx_pkts_hist[10]++; 3156 else if((m_head->m_pkthdr.len > 40960) && 3157 (m_head->m_pkthdr.len <= 45056)) 3158 fp->tx_pkts_hist[11]++; 3159 else if((m_head->m_pkthdr.len > 45056) && 3160 (m_head->m_pkthdr.len <= 49152)) 3161 fp->tx_pkts_hist[12]++; 3162 else if((m_head->m_pkthdr.len > 49512) && 3163 m_head->m_pkthdr.len <= 53248)) 3164 fp->tx_pkts_hist[13]++; 3165 else if((m_head->m_pkthdr.len > 53248) && 3166 (m_head->m_pkthdr.len <= 57344)) 3167 fp->tx_pkts_hist[14]++; 3168 else if((m_head->m_pkthdr.len > 53248) && 3169 (m_head->m_pkthdr.len <= 57344)) 3170 fp->tx_pkts_hist[15]++; 3171 else if((m_head->m_pkthdr.len > 57344) && 3172 (m_head->m_pkthdr.len <= 61440)) 3173 fp->tx_pkts_hist[16]++; 3174 else 3175 fp->tx_pkts_hist[17]++; 3176 } 3177 3178 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3179 3180 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl); 3181 bd_used = TX_RING_SIZE - elem_left; 3182 3183 if(bd_used <= 100) 3184 fp->tx_pkts_q[0]++; 3185 else if((bd_used > 100) && (bd_used <= 500)) 3186 fp->tx_pkts_q[1]++; 3187 else if((bd_used > 500) && (bd_used <= 1000)) 3188 fp->tx_pkts_q[2]++; 3189 else if((bd_used > 1000) && (bd_used <= 2000)) 3190 fp->tx_pkts_q[3]++; 3191 else if((bd_used > 3000) && (bd_used <= 4000)) 3192 fp->tx_pkts_q[4]++; 3193 else if((bd_used > 4000) && (bd_used <= 5000)) 3194 fp->tx_pkts_q[5]++; 3195 else if((bd_used > 6000) && (bd_used <= 7000)) 3196 fp->tx_pkts_q[6]++; 3197 else if((bd_used > 7000) && (bd_used <= 8000)) 3198 fp->tx_pkts_q[7]++; 3199 else if((bd_used > 8000) && (bd_used <= 9000)) 3200 fp->tx_pkts_q[8]++; 3201 else if((bd_used > 9000) && (bd_used <= 10000)) 3202 fp->tx_pkts_q[9]++; 3203 else if((bd_used > 10000) && (bd_used <= 11000)) 3204 fp->tx_pkts_q[10]++; 3205 else if((bd_used > 11000) && (bd_used <= 12000)) 3206 fp->tx_pkts_q[11]++; 3207 else if((bd_used > 12000) && (bd_used <= 13000)) 3208 fp->tx_pkts_q[12]++; 3209 else if((bd_used > 13000) && (bd_used <= 14000)) 3210 fp->tx_pkts_q[13]++; 3211 else if((bd_used > 14000) && (bd_used <= 15000)) 3212 fp->tx_pkts_q[14]++; 3213 else if((bd_used > 15000) && (bd_used <= 16000)) 3214 fp->tx_pkts_q[15]++; 3215 else 3216 fp->tx_pkts_q[16]++; 3217 } 3218 3219 #endif /* end of QLNX_TRACE_PERF_DATA */ 3220 3221 if ((nsegs + QLNX_TX_ELEM_RESERVE) > 3222 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) { 3223 3224 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs" 3225 " in chain[%d] trying to free packets\n", 3226 nsegs, elem_left, fp->rss_id); 3227 3228 fp->tx_nsegs_gt_elem_left++; 3229 3230 (void)qlnx_tx_int(ha, fp, txq); 3231 3232 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left = 3233 ecore_chain_get_elem_left(&txq->tx_pbl))) { 3234 3235 QL_DPRINT1(ha, 3236 "(%d, 0x%x) insuffient BDs in chain[%d]\n", 3237 nsegs, elem_left, fp->rss_id); 3238 3239 fp->err_tx_nsegs_gt_elem_left++; 3240 fp->tx_ring_full = 1; 3241 if (ha->storm_stats_enable) 3242 ha->storm_stats_gather = 1; 3243 return (ENOBUFS); 3244 } 3245 } 3246 3247 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 3248 3249 txq->sw_tx_ring[idx].mp = m_head; 3250 3251 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); 3252 3253 memset(first_bd, 0, sizeof(*first_bd)); 3254 3255 first_bd->data.bd_flags.bitfields = 3256 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 3257 3258 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len); 3259 3260 nbd++; 3261 3262 if (m_head->m_pkthdr.csum_flags & CSUM_IP) { 3263 first_bd->data.bd_flags.bitfields |= 3264 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3265 } 3266 3267 if (m_head->m_pkthdr.csum_flags & 3268 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) { 3269 first_bd->data.bd_flags.bitfields |= 3270 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT); 3271 } 3272 3273 if (m_head->m_flags & M_VLANTAG) { 3274 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag; 3275 first_bd->data.bd_flags.bitfields |= 3276 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT); 3277 } 3278 3279 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3280 3281 first_bd->data.bd_flags.bitfields |= 3282 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); 3283 first_bd->data.bd_flags.bitfields |= 3284 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3285 3286 nbds_in_hdr = 1; 3287 3288 if (offset == segs->ds_len) { 3289 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3290 segs++; 3291 seg_idx++; 3292 3293 second_bd = (struct eth_tx_2nd_bd *) 3294 ecore_chain_produce(&txq->tx_pbl); 3295 memset(second_bd, 0, sizeof(*second_bd)); 3296 nbd++; 3297 3298 if (seg_idx < nsegs) { 3299 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3300 (segs->ds_addr), (segs->ds_len)); 3301 segs++; 3302 seg_idx++; 3303 } 3304 3305 third_bd = (struct eth_tx_3rd_bd *) 3306 ecore_chain_produce(&txq->tx_pbl); 3307 memset(third_bd, 0, sizeof(*third_bd)); 3308 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3309 third_bd->data.bitfields |= 3310 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3311 nbd++; 3312 3313 if (seg_idx < nsegs) { 3314 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3315 (segs->ds_addr), (segs->ds_len)); 3316 segs++; 3317 seg_idx++; 3318 } 3319 3320 for (; seg_idx < nsegs; seg_idx++) { 3321 tx_data_bd = (struct eth_tx_bd *) 3322 ecore_chain_produce(&txq->tx_pbl); 3323 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3324 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3325 segs->ds_addr,\ 3326 segs->ds_len); 3327 segs++; 3328 nbd++; 3329 } 3330 3331 } else if (offset < segs->ds_len) { 3332 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3333 3334 second_bd = (struct eth_tx_2nd_bd *) 3335 ecore_chain_produce(&txq->tx_pbl); 3336 memset(second_bd, 0, sizeof(*second_bd)); 3337 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3338 (segs->ds_addr + offset),\ 3339 (segs->ds_len - offset)); 3340 nbd++; 3341 segs++; 3342 3343 third_bd = (struct eth_tx_3rd_bd *) 3344 ecore_chain_produce(&txq->tx_pbl); 3345 memset(third_bd, 0, sizeof(*third_bd)); 3346 3347 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3348 segs->ds_addr,\ 3349 segs->ds_len); 3350 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3351 third_bd->data.bitfields |= 3352 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3353 segs++; 3354 nbd++; 3355 3356 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) { 3357 tx_data_bd = (struct eth_tx_bd *) 3358 ecore_chain_produce(&txq->tx_pbl); 3359 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3360 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3361 segs->ds_addr,\ 3362 segs->ds_len); 3363 segs++; 3364 nbd++; 3365 } 3366 3367 } else { 3368 offset = offset - segs->ds_len; 3369 segs++; 3370 3371 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3372 3373 if (offset) 3374 nbds_in_hdr++; 3375 3376 tx_data_bd = (struct eth_tx_bd *) 3377 ecore_chain_produce(&txq->tx_pbl); 3378 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3379 3380 if (second_bd == NULL) { 3381 second_bd = (struct eth_tx_2nd_bd *) 3382 tx_data_bd; 3383 } else if (third_bd == NULL) { 3384 third_bd = (struct eth_tx_3rd_bd *) 3385 tx_data_bd; 3386 } 3387 3388 if (offset && (offset < segs->ds_len)) { 3389 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3390 segs->ds_addr, offset); 3391 3392 tx_data_bd = (struct eth_tx_bd *) 3393 ecore_chain_produce(&txq->tx_pbl); 3394 3395 memset(tx_data_bd, 0, 3396 sizeof(*tx_data_bd)); 3397 3398 if (second_bd == NULL) { 3399 second_bd = 3400 (struct eth_tx_2nd_bd *)tx_data_bd; 3401 } else if (third_bd == NULL) { 3402 third_bd = 3403 (struct eth_tx_3rd_bd *)tx_data_bd; 3404 } 3405 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3406 (segs->ds_addr + offset), \ 3407 (segs->ds_len - offset)); 3408 nbd++; 3409 offset = 0; 3410 } else { 3411 if (offset) 3412 offset = offset - segs->ds_len; 3413 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3414 segs->ds_addr, segs->ds_len); 3415 } 3416 segs++; 3417 nbd++; 3418 } 3419 3420 if (third_bd == NULL) { 3421 third_bd = (struct eth_tx_3rd_bd *) 3422 ecore_chain_produce(&txq->tx_pbl); 3423 memset(third_bd, 0, sizeof(*third_bd)); 3424 } 3425 3426 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3427 third_bd->data.bitfields |= 3428 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3429 } 3430 fp->tx_tso_pkts++; 3431 } else { 3432 segs++; 3433 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3434 tx_data_bd = (struct eth_tx_bd *) 3435 ecore_chain_produce(&txq->tx_pbl); 3436 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3437 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\ 3438 segs->ds_len); 3439 segs++; 3440 nbd++; 3441 } 3442 first_bd->data.bitfields = 3443 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) 3444 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 3445 first_bd->data.bitfields = 3446 htole16(first_bd->data.bitfields); 3447 fp->tx_non_tso_pkts++; 3448 } 3449 3450 3451 first_bd->data.nbds = nbd; 3452 3453 if (ha->dbg_trace_tso_pkt_len) { 3454 if (fp->tx_tso_max_nsegs < nsegs) 3455 fp->tx_tso_max_nsegs = nsegs; 3456 3457 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs)) 3458 fp->tx_tso_min_nsegs = nsegs; 3459 } 3460 3461 txq->sw_tx_ring[idx].nsegs = nsegs; 3462 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1); 3463 3464 txq->tx_db.data.bd_prod = 3465 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl)); 3466 3467 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw); 3468 3469 QL_DPRINT8(ha, "exit\n"); 3470 return (0); 3471 } 3472 3473 static void 3474 qlnx_stop(qlnx_host_t *ha) 3475 { 3476 struct ifnet *ifp = ha->ifp; 3477 device_t dev; 3478 int i; 3479 3480 dev = ha->pci_dev; 3481 3482 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 3483 3484 /* 3485 * We simply lock and unlock each fp->tx_mtx to 3486 * propagate the if_drv_flags 3487 * state to each tx thread 3488 */ 3489 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state); 3490 3491 if (ha->state == QLNX_STATE_OPEN) { 3492 for (i = 0; i < ha->num_rss; i++) { 3493 struct qlnx_fastpath *fp = &ha->fp_array[i]; 3494 3495 mtx_lock(&fp->tx_mtx); 3496 mtx_unlock(&fp->tx_mtx); 3497 3498 if (fp->fp_taskqueue != NULL) 3499 taskqueue_enqueue(fp->fp_taskqueue, 3500 &fp->fp_task); 3501 } 3502 } 3503 3504 qlnx_unload(ha); 3505 3506 return; 3507 } 3508 3509 static int 3510 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha) 3511 { 3512 return(TX_RING_SIZE - 1); 3513 } 3514 3515 uint8_t * 3516 qlnx_get_mac_addr(qlnx_host_t *ha) 3517 { 3518 struct ecore_hwfn *p_hwfn; 3519 3520 p_hwfn = &ha->cdev.hwfns[0]; 3521 return (p_hwfn->hw_info.hw_mac_addr); 3522 } 3523 3524 static uint32_t 3525 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link) 3526 { 3527 uint32_t ifm_type = 0; 3528 3529 switch (if_link->media_type) { 3530 3531 case MEDIA_MODULE_FIBER: 3532 case MEDIA_UNSPECIFIED: 3533 if (if_link->speed == (100 * 1000)) 3534 ifm_type = QLNX_IFM_100G_SR4; 3535 else if (if_link->speed == (40 * 1000)) 3536 ifm_type = IFM_40G_SR4; 3537 else if (if_link->speed == (25 * 1000)) 3538 ifm_type = QLNX_IFM_25G_SR; 3539 else if (if_link->speed == (10 * 1000)) 3540 ifm_type = (IFM_10G_LR | IFM_10G_SR); 3541 else if (if_link->speed == (1 * 1000)) 3542 ifm_type = (IFM_1000_SX | IFM_1000_LX); 3543 3544 break; 3545 3546 case MEDIA_DA_TWINAX: 3547 if (if_link->speed == (100 * 1000)) 3548 ifm_type = QLNX_IFM_100G_CR4; 3549 else if (if_link->speed == (40 * 1000)) 3550 ifm_type = IFM_40G_CR4; 3551 else if (if_link->speed == (25 * 1000)) 3552 ifm_type = QLNX_IFM_25G_CR; 3553 else if (if_link->speed == (10 * 1000)) 3554 ifm_type = IFM_10G_TWINAX; 3555 3556 break; 3557 3558 default : 3559 ifm_type = IFM_UNKNOWN; 3560 break; 3561 } 3562 return (ifm_type); 3563 } 3564 3565 3566 3567 /***************************************************************************** 3568 * Interrupt Service Functions 3569 *****************************************************************************/ 3570 3571 static int 3572 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3573 struct mbuf *mp_head, uint16_t len) 3574 { 3575 struct mbuf *mp, *mpf, *mpl; 3576 struct sw_rx_data *sw_rx_data; 3577 struct qlnx_rx_queue *rxq; 3578 uint16_t len_in_buffer; 3579 3580 rxq = fp->rxq; 3581 mpf = mpl = mp = NULL; 3582 3583 while (len) { 3584 3585 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3586 3587 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3588 mp = sw_rx_data->data; 3589 3590 if (mp == NULL) { 3591 QL_DPRINT1(ha, "mp = NULL\n"); 3592 fp->err_rx_mp_null++; 3593 rxq->sw_rx_cons = 3594 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3595 3596 if (mpf != NULL) 3597 m_freem(mpf); 3598 3599 return (-1); 3600 } 3601 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3602 BUS_DMASYNC_POSTREAD); 3603 3604 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3605 3606 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 3607 " incoming packet and reusing its buffer\n"); 3608 3609 qlnx_reuse_rx_data(rxq); 3610 fp->err_rx_alloc_errors++; 3611 3612 if (mpf != NULL) 3613 m_freem(mpf); 3614 3615 return (-1); 3616 } 3617 ecore_chain_consume(&rxq->rx_bd_ring); 3618 3619 if (len > rxq->rx_buf_size) 3620 len_in_buffer = rxq->rx_buf_size; 3621 else 3622 len_in_buffer = len; 3623 3624 len = len - len_in_buffer; 3625 3626 mp->m_flags &= ~M_PKTHDR; 3627 mp->m_next = NULL; 3628 mp->m_len = len_in_buffer; 3629 3630 if (mpf == NULL) 3631 mpf = mpl = mp; 3632 else { 3633 mpl->m_next = mp; 3634 mpl = mp; 3635 } 3636 } 3637 3638 if (mpf != NULL) 3639 mp_head->m_next = mpf; 3640 3641 return (0); 3642 } 3643 3644 static void 3645 qlnx_tpa_start(qlnx_host_t *ha, 3646 struct qlnx_fastpath *fp, 3647 struct qlnx_rx_queue *rxq, 3648 struct eth_fast_path_rx_tpa_start_cqe *cqe) 3649 { 3650 uint32_t agg_index; 3651 struct ifnet *ifp = ha->ifp; 3652 struct mbuf *mp; 3653 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3654 struct sw_rx_data *sw_rx_data; 3655 dma_addr_t addr; 3656 bus_dmamap_t map; 3657 struct eth_rx_bd *rx_bd; 3658 int i; 3659 device_t dev; 3660 #if __FreeBSD_version >= 1100000 3661 uint8_t hash_type; 3662 #endif /* #if __FreeBSD_version >= 1100000 */ 3663 3664 dev = ha->pci_dev; 3665 agg_index = cqe->tpa_agg_index; 3666 3667 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \ 3668 \t type = 0x%x\n \ 3669 \t bitfields = 0x%x\n \ 3670 \t seg_len = 0x%x\n \ 3671 \t pars_flags = 0x%x\n \ 3672 \t vlan_tag = 0x%x\n \ 3673 \t rss_hash = 0x%x\n \ 3674 \t len_on_first_bd = 0x%x\n \ 3675 \t placement_offset = 0x%x\n \ 3676 \t tpa_agg_index = 0x%x\n \ 3677 \t header_len = 0x%x\n \ 3678 \t ext_bd_len_list[0] = 0x%x\n \ 3679 \t ext_bd_len_list[1] = 0x%x\n \ 3680 \t ext_bd_len_list[2] = 0x%x\n \ 3681 \t ext_bd_len_list[3] = 0x%x\n \ 3682 \t ext_bd_len_list[4] = 0x%x\n", 3683 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len, 3684 cqe->pars_flags.flags, cqe->vlan_tag, 3685 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset, 3686 cqe->tpa_agg_index, cqe->header_len, 3687 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1], 3688 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3], 3689 cqe->ext_bd_len_list[4]); 3690 3691 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3692 fp->err_rx_tpa_invalid_agg_num++; 3693 return; 3694 } 3695 3696 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3697 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); 3698 mp = sw_rx_data->data; 3699 3700 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp); 3701 3702 if (mp == NULL) { 3703 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id); 3704 fp->err_rx_mp_null++; 3705 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3706 3707 return; 3708 } 3709 3710 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) { 3711 3712 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error," 3713 " flags = %x, dropping incoming packet\n", fp->rss_id, 3714 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags)); 3715 3716 fp->err_rx_hw_errors++; 3717 3718 qlnx_reuse_rx_data(rxq); 3719 3720 QLNX_INC_IERRORS(ifp); 3721 3722 return; 3723 } 3724 3725 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3726 3727 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 3728 " dropping incoming packet and reusing its buffer\n", 3729 fp->rss_id); 3730 3731 fp->err_rx_alloc_errors++; 3732 QLNX_INC_IQDROPS(ifp); 3733 3734 /* 3735 * Load the tpa mbuf into the rx ring and save the 3736 * posted mbuf 3737 */ 3738 3739 map = sw_rx_data->map; 3740 addr = sw_rx_data->dma_addr; 3741 3742 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 3743 3744 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data; 3745 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr; 3746 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map; 3747 3748 rxq->tpa_info[agg_index].rx_buf.data = mp; 3749 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr; 3750 rxq->tpa_info[agg_index].rx_buf.map = map; 3751 3752 rx_bd = (struct eth_rx_bd *) 3753 ecore_chain_produce(&rxq->rx_bd_ring); 3754 3755 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr)); 3756 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr)); 3757 3758 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3759 BUS_DMASYNC_PREREAD); 3760 3761 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 3762 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3763 3764 ecore_chain_consume(&rxq->rx_bd_ring); 3765 3766 /* Now reuse any buffers posted in ext_bd_len_list */ 3767 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 3768 3769 if (cqe->ext_bd_len_list[i] == 0) 3770 break; 3771 3772 qlnx_reuse_rx_data(rxq); 3773 } 3774 3775 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 3776 return; 3777 } 3778 3779 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 3780 3781 QL_DPRINT7(ha, "[%d]: invalid aggregation state," 3782 " dropping incoming packet and reusing its buffer\n", 3783 fp->rss_id); 3784 3785 QLNX_INC_IQDROPS(ifp); 3786 3787 /* if we already have mbuf head in aggregation free it */ 3788 if (rxq->tpa_info[agg_index].mpf) { 3789 m_freem(rxq->tpa_info[agg_index].mpf); 3790 rxq->tpa_info[agg_index].mpl = NULL; 3791 } 3792 rxq->tpa_info[agg_index].mpf = mp; 3793 rxq->tpa_info[agg_index].mpl = NULL; 3794 3795 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3796 ecore_chain_consume(&rxq->rx_bd_ring); 3797 3798 /* Now reuse any buffers posted in ext_bd_len_list */ 3799 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 3800 3801 if (cqe->ext_bd_len_list[i] == 0) 3802 break; 3803 3804 qlnx_reuse_rx_data(rxq); 3805 } 3806 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 3807 3808 return; 3809 } 3810 3811 /* 3812 * first process the ext_bd_len_list 3813 * if this fails then we simply drop the packet 3814 */ 3815 ecore_chain_consume(&rxq->rx_bd_ring); 3816 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3817 3818 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 3819 3820 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id); 3821 3822 if (cqe->ext_bd_len_list[i] == 0) 3823 break; 3824 3825 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3826 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3827 BUS_DMASYNC_POSTREAD); 3828 3829 mpc = sw_rx_data->data; 3830 3831 if (mpc == NULL) { 3832 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 3833 fp->err_rx_mp_null++; 3834 if (mpf != NULL) 3835 m_freem(mpf); 3836 mpf = mpl = NULL; 3837 rxq->tpa_info[agg_index].agg_state = 3838 QLNX_AGG_STATE_ERROR; 3839 ecore_chain_consume(&rxq->rx_bd_ring); 3840 rxq->sw_rx_cons = 3841 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3842 continue; 3843 } 3844 3845 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3846 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 3847 " dropping incoming packet and reusing its" 3848 " buffer\n", fp->rss_id); 3849 3850 qlnx_reuse_rx_data(rxq); 3851 3852 if (mpf != NULL) 3853 m_freem(mpf); 3854 mpf = mpl = NULL; 3855 3856 rxq->tpa_info[agg_index].agg_state = 3857 QLNX_AGG_STATE_ERROR; 3858 3859 ecore_chain_consume(&rxq->rx_bd_ring); 3860 rxq->sw_rx_cons = 3861 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3862 3863 continue; 3864 } 3865 3866 mpc->m_flags &= ~M_PKTHDR; 3867 mpc->m_next = NULL; 3868 mpc->m_len = cqe->ext_bd_len_list[i]; 3869 3870 3871 if (mpf == NULL) { 3872 mpf = mpl = mpc; 3873 } else { 3874 mpl->m_len = ha->rx_buf_size; 3875 mpl->m_next = mpc; 3876 mpl = mpc; 3877 } 3878 3879 ecore_chain_consume(&rxq->rx_bd_ring); 3880 rxq->sw_rx_cons = 3881 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3882 } 3883 3884 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 3885 3886 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping" 3887 " incoming packet and reusing its buffer\n", 3888 fp->rss_id); 3889 3890 QLNX_INC_IQDROPS(ifp); 3891 3892 rxq->tpa_info[agg_index].mpf = mp; 3893 rxq->tpa_info[agg_index].mpl = NULL; 3894 3895 return; 3896 } 3897 3898 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset; 3899 3900 if (mpf != NULL) { 3901 mp->m_len = ha->rx_buf_size; 3902 mp->m_next = mpf; 3903 rxq->tpa_info[agg_index].mpf = mp; 3904 rxq->tpa_info[agg_index].mpl = mpl; 3905 } else { 3906 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset; 3907 rxq->tpa_info[agg_index].mpf = mp; 3908 rxq->tpa_info[agg_index].mpl = mp; 3909 mp->m_next = NULL; 3910 } 3911 3912 mp->m_flags |= M_PKTHDR; 3913 3914 /* assign packet to this interface interface */ 3915 mp->m_pkthdr.rcvif = ifp; 3916 3917 /* assume no hardware checksum has complated */ 3918 mp->m_pkthdr.csum_flags = 0; 3919 3920 //mp->m_pkthdr.flowid = fp->rss_id; 3921 mp->m_pkthdr.flowid = cqe->rss_hash; 3922 3923 #if __FreeBSD_version >= 1100000 3924 3925 hash_type = cqe->bitfields & 3926 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 3927 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 3928 3929 switch (hash_type) { 3930 3931 case RSS_HASH_TYPE_IPV4: 3932 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 3933 break; 3934 3935 case RSS_HASH_TYPE_TCP_IPV4: 3936 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 3937 break; 3938 3939 case RSS_HASH_TYPE_IPV6: 3940 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 3941 break; 3942 3943 case RSS_HASH_TYPE_TCP_IPV6: 3944 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 3945 break; 3946 3947 default: 3948 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 3949 break; 3950 } 3951 3952 #else 3953 mp->m_flags |= M_FLOWID; 3954 #endif 3955 3956 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | 3957 CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 3958 3959 mp->m_pkthdr.csum_data = 0xFFFF; 3960 3961 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) { 3962 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag); 3963 mp->m_flags |= M_VLANTAG; 3964 } 3965 3966 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START; 3967 3968 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n", 3969 fp->rss_id, rxq->tpa_info[agg_index].agg_state, 3970 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl); 3971 3972 return; 3973 } 3974 3975 static void 3976 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3977 struct qlnx_rx_queue *rxq, 3978 struct eth_fast_path_rx_tpa_cont_cqe *cqe) 3979 { 3980 struct sw_rx_data *sw_rx_data; 3981 int i; 3982 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3983 struct mbuf *mp; 3984 uint32_t agg_index; 3985 device_t dev; 3986 3987 dev = ha->pci_dev; 3988 3989 QL_DPRINT7(ha, "[%d]: enter\n \ 3990 \t type = 0x%x\n \ 3991 \t tpa_agg_index = 0x%x\n \ 3992 \t len_list[0] = 0x%x\n \ 3993 \t len_list[1] = 0x%x\n \ 3994 \t len_list[2] = 0x%x\n \ 3995 \t len_list[3] = 0x%x\n \ 3996 \t len_list[4] = 0x%x\n \ 3997 \t len_list[5] = 0x%x\n", 3998 fp->rss_id, cqe->type, cqe->tpa_agg_index, 3999 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4000 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]); 4001 4002 agg_index = cqe->tpa_agg_index; 4003 4004 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4005 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4006 fp->err_rx_tpa_invalid_agg_num++; 4007 return; 4008 } 4009 4010 4011 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) { 4012 4013 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4014 4015 if (cqe->len_list[i] == 0) 4016 break; 4017 4018 if (rxq->tpa_info[agg_index].agg_state != 4019 QLNX_AGG_STATE_START) { 4020 qlnx_reuse_rx_data(rxq); 4021 continue; 4022 } 4023 4024 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4025 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4026 BUS_DMASYNC_POSTREAD); 4027 4028 mpc = sw_rx_data->data; 4029 4030 if (mpc == NULL) { 4031 4032 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4033 4034 fp->err_rx_mp_null++; 4035 if (mpf != NULL) 4036 m_freem(mpf); 4037 mpf = mpl = NULL; 4038 rxq->tpa_info[agg_index].agg_state = 4039 QLNX_AGG_STATE_ERROR; 4040 ecore_chain_consume(&rxq->rx_bd_ring); 4041 rxq->sw_rx_cons = 4042 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4043 continue; 4044 } 4045 4046 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4047 4048 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4049 " dropping incoming packet and reusing its" 4050 " buffer\n", fp->rss_id); 4051 4052 qlnx_reuse_rx_data(rxq); 4053 4054 if (mpf != NULL) 4055 m_freem(mpf); 4056 mpf = mpl = NULL; 4057 4058 rxq->tpa_info[agg_index].agg_state = 4059 QLNX_AGG_STATE_ERROR; 4060 4061 ecore_chain_consume(&rxq->rx_bd_ring); 4062 rxq->sw_rx_cons = 4063 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4064 4065 continue; 4066 } 4067 4068 mpc->m_flags &= ~M_PKTHDR; 4069 mpc->m_next = NULL; 4070 mpc->m_len = cqe->len_list[i]; 4071 4072 4073 if (mpf == NULL) { 4074 mpf = mpl = mpc; 4075 } else { 4076 mpl->m_len = ha->rx_buf_size; 4077 mpl->m_next = mpc; 4078 mpl = mpc; 4079 } 4080 4081 ecore_chain_consume(&rxq->rx_bd_ring); 4082 rxq->sw_rx_cons = 4083 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4084 } 4085 4086 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n", 4087 fp->rss_id, mpf, mpl); 4088 4089 if (mpf != NULL) { 4090 mp = rxq->tpa_info[agg_index].mpl; 4091 mp->m_len = ha->rx_buf_size; 4092 mp->m_next = mpf; 4093 rxq->tpa_info[agg_index].mpl = mpl; 4094 } 4095 4096 return; 4097 } 4098 4099 static int 4100 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4101 struct qlnx_rx_queue *rxq, 4102 struct eth_fast_path_rx_tpa_end_cqe *cqe) 4103 { 4104 struct sw_rx_data *sw_rx_data; 4105 int i; 4106 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4107 struct mbuf *mp; 4108 uint32_t agg_index; 4109 uint32_t len = 0; 4110 struct ifnet *ifp = ha->ifp; 4111 device_t dev; 4112 4113 dev = ha->pci_dev; 4114 4115 QL_DPRINT7(ha, "[%d]: enter\n \ 4116 \t type = 0x%x\n \ 4117 \t tpa_agg_index = 0x%x\n \ 4118 \t total_packet_len = 0x%x\n \ 4119 \t num_of_bds = 0x%x\n \ 4120 \t end_reason = 0x%x\n \ 4121 \t num_of_coalesced_segs = 0x%x\n \ 4122 \t ts_delta = 0x%x\n \ 4123 \t len_list[0] = 0x%x\n \ 4124 \t len_list[1] = 0x%x\n \ 4125 \t len_list[2] = 0x%x\n \ 4126 \t len_list[3] = 0x%x\n", 4127 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4128 cqe->total_packet_len, cqe->num_of_bds, 4129 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta, 4130 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4131 cqe->len_list[3]); 4132 4133 agg_index = cqe->tpa_agg_index; 4134 4135 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4136 4137 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4138 4139 fp->err_rx_tpa_invalid_agg_num++; 4140 return (0); 4141 } 4142 4143 4144 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) { 4145 4146 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4147 4148 if (cqe->len_list[i] == 0) 4149 break; 4150 4151 if (rxq->tpa_info[agg_index].agg_state != 4152 QLNX_AGG_STATE_START) { 4153 4154 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id); 4155 4156 qlnx_reuse_rx_data(rxq); 4157 continue; 4158 } 4159 4160 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4161 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4162 BUS_DMASYNC_POSTREAD); 4163 4164 mpc = sw_rx_data->data; 4165 4166 if (mpc == NULL) { 4167 4168 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4169 4170 fp->err_rx_mp_null++; 4171 if (mpf != NULL) 4172 m_freem(mpf); 4173 mpf = mpl = NULL; 4174 rxq->tpa_info[agg_index].agg_state = 4175 QLNX_AGG_STATE_ERROR; 4176 ecore_chain_consume(&rxq->rx_bd_ring); 4177 rxq->sw_rx_cons = 4178 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4179 continue; 4180 } 4181 4182 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4183 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4184 " dropping incoming packet and reusing its" 4185 " buffer\n", fp->rss_id); 4186 4187 qlnx_reuse_rx_data(rxq); 4188 4189 if (mpf != NULL) 4190 m_freem(mpf); 4191 mpf = mpl = NULL; 4192 4193 rxq->tpa_info[agg_index].agg_state = 4194 QLNX_AGG_STATE_ERROR; 4195 4196 ecore_chain_consume(&rxq->rx_bd_ring); 4197 rxq->sw_rx_cons = 4198 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4199 4200 continue; 4201 } 4202 4203 mpc->m_flags &= ~M_PKTHDR; 4204 mpc->m_next = NULL; 4205 mpc->m_len = cqe->len_list[i]; 4206 4207 4208 if (mpf == NULL) { 4209 mpf = mpl = mpc; 4210 } else { 4211 mpl->m_len = ha->rx_buf_size; 4212 mpl->m_next = mpc; 4213 mpl = mpc; 4214 } 4215 4216 ecore_chain_consume(&rxq->rx_bd_ring); 4217 rxq->sw_rx_cons = 4218 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4219 } 4220 4221 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id); 4222 4223 if (mpf != NULL) { 4224 4225 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id); 4226 4227 mp = rxq->tpa_info[agg_index].mpl; 4228 mp->m_len = ha->rx_buf_size; 4229 mp->m_next = mpf; 4230 } 4231 4232 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { 4233 4234 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id); 4235 4236 if (rxq->tpa_info[agg_index].mpf != NULL) 4237 m_freem(rxq->tpa_info[agg_index].mpf); 4238 rxq->tpa_info[agg_index].mpf = NULL; 4239 rxq->tpa_info[agg_index].mpl = NULL; 4240 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4241 return (0); 4242 } 4243 4244 mp = rxq->tpa_info[agg_index].mpf; 4245 m_adj(mp, rxq->tpa_info[agg_index].placement_offset); 4246 mp->m_pkthdr.len = cqe->total_packet_len; 4247 4248 if (mp->m_next == NULL) 4249 mp->m_len = mp->m_pkthdr.len; 4250 else { 4251 /* compute the total packet length */ 4252 mpf = mp; 4253 while (mpf != NULL) { 4254 len += mpf->m_len; 4255 mpf = mpf->m_next; 4256 } 4257 4258 if (cqe->total_packet_len > len) { 4259 mpl = rxq->tpa_info[agg_index].mpl; 4260 mpl->m_len += (cqe->total_packet_len - len); 4261 } 4262 } 4263 4264 QLNX_INC_IPACKETS(ifp); 4265 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len)); 4266 4267 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%lx\n \ 4268 m_len = 0x%x m_pkthdr_len = 0x%x\n", 4269 fp->rss_id, mp->m_pkthdr.csum_data, 4270 mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len); 4271 4272 (*ifp->if_input)(ifp, mp); 4273 4274 rxq->tpa_info[agg_index].mpf = NULL; 4275 rxq->tpa_info[agg_index].mpl = NULL; 4276 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4277 4278 return (cqe->num_of_coalesced_segs); 4279 } 4280 4281 static int 4282 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 4283 int lro_enable) 4284 { 4285 uint16_t hw_comp_cons, sw_comp_cons; 4286 int rx_pkt = 0; 4287 struct qlnx_rx_queue *rxq = fp->rxq; 4288 struct ifnet *ifp = ha->ifp; 4289 struct ecore_dev *cdev = &ha->cdev; 4290 struct ecore_hwfn *p_hwfn; 4291 4292 #ifdef QLNX_SOFT_LRO 4293 struct lro_ctrl *lro; 4294 4295 lro = &rxq->lro; 4296 #endif /* #ifdef QLNX_SOFT_LRO */ 4297 4298 hw_comp_cons = le16toh(*rxq->hw_cons_ptr); 4299 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4300 4301 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)]; 4302 4303 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 4304 * / BD in the while-loop before reading hw_comp_cons. If the CQE is 4305 * read before it is written by FW, then FW writes CQE and SB, and then 4306 * the CPU reads the hw_comp_cons, it will use an old CQE. 4307 */ 4308 4309 /* Loop to complete all indicated BDs */ 4310 while (sw_comp_cons != hw_comp_cons) { 4311 union eth_rx_cqe *cqe; 4312 struct eth_fast_path_rx_reg_cqe *fp_cqe; 4313 struct sw_rx_data *sw_rx_data; 4314 register struct mbuf *mp; 4315 enum eth_rx_cqe_type cqe_type; 4316 uint16_t len, pad, len_on_first_bd; 4317 uint8_t *data; 4318 #if __FreeBSD_version >= 1100000 4319 uint8_t hash_type; 4320 #endif /* #if __FreeBSD_version >= 1100000 */ 4321 4322 /* Get the CQE from the completion ring */ 4323 cqe = (union eth_rx_cqe *) 4324 ecore_chain_consume(&rxq->rx_comp_ring); 4325 cqe_type = cqe->fast_path_regular.type; 4326 4327 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { 4328 QL_DPRINT3(ha, "Got a slowath CQE\n"); 4329 4330 ecore_eth_cqe_completion(p_hwfn, 4331 (struct eth_slow_path_rx_cqe *)cqe); 4332 goto next_cqe; 4333 } 4334 4335 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { 4336 4337 switch (cqe_type) { 4338 4339 case ETH_RX_CQE_TYPE_TPA_START: 4340 qlnx_tpa_start(ha, fp, rxq, 4341 &cqe->fast_path_tpa_start); 4342 fp->tpa_start++; 4343 break; 4344 4345 case ETH_RX_CQE_TYPE_TPA_CONT: 4346 qlnx_tpa_cont(ha, fp, rxq, 4347 &cqe->fast_path_tpa_cont); 4348 fp->tpa_cont++; 4349 break; 4350 4351 case ETH_RX_CQE_TYPE_TPA_END: 4352 rx_pkt += qlnx_tpa_end(ha, fp, rxq, 4353 &cqe->fast_path_tpa_end); 4354 fp->tpa_end++; 4355 break; 4356 4357 default: 4358 break; 4359 } 4360 4361 goto next_cqe; 4362 } 4363 4364 /* Get the data from the SW ring */ 4365 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4366 mp = sw_rx_data->data; 4367 4368 if (mp == NULL) { 4369 QL_DPRINT1(ha, "mp = NULL\n"); 4370 fp->err_rx_mp_null++; 4371 rxq->sw_rx_cons = 4372 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4373 goto next_cqe; 4374 } 4375 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4376 BUS_DMASYNC_POSTREAD); 4377 4378 /* non GRO */ 4379 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */ 4380 len = le16toh(fp_cqe->pkt_len); 4381 pad = fp_cqe->placement_offset; 4382 4383 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x," 4384 " len %u, parsing flags = %d pad = %d\n", 4385 cqe_type, fp_cqe->bitfields, 4386 le16toh(fp_cqe->vlan_tag), 4387 len, le16toh(fp_cqe->pars_flags.flags), pad); 4388 4389 data = mtod(mp, uint8_t *); 4390 data = data + pad; 4391 4392 if (0) 4393 qlnx_dump_buf8(ha, __func__, data, len); 4394 4395 /* For every Rx BD consumed, we allocate a new BD so the BD ring 4396 * is always with a fixed size. If allocation fails, we take the 4397 * consumed BD and return it to the ring in the PROD position. 4398 * The packet that was received on that BD will be dropped (and 4399 * not passed to the upper stack). 4400 */ 4401 /* If this is an error packet then drop it */ 4402 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) & 4403 CQE_FLAGS_ERR) { 4404 4405 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x," 4406 " dropping incoming packet\n", sw_comp_cons, 4407 le16toh(cqe->fast_path_regular.pars_flags.flags)); 4408 fp->err_rx_hw_errors++; 4409 4410 qlnx_reuse_rx_data(rxq); 4411 4412 QLNX_INC_IERRORS(ifp); 4413 4414 goto next_cqe; 4415 } 4416 4417 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4418 4419 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 4420 " incoming packet and reusing its buffer\n"); 4421 qlnx_reuse_rx_data(rxq); 4422 4423 fp->err_rx_alloc_errors++; 4424 4425 QLNX_INC_IQDROPS(ifp); 4426 4427 goto next_cqe; 4428 } 4429 4430 ecore_chain_consume(&rxq->rx_bd_ring); 4431 4432 len_on_first_bd = fp_cqe->len_on_first_bd; 4433 m_adj(mp, pad); 4434 mp->m_pkthdr.len = len; 4435 4436 QL_DPRINT1(ha, "len = %d len_on_first_bd = %d\n", 4437 len, len_on_first_bd); 4438 if ((len > 60 ) && (len > len_on_first_bd)) { 4439 4440 mp->m_len = len_on_first_bd; 4441 4442 if (qlnx_rx_jumbo_chain(ha, fp, mp, 4443 (len - len_on_first_bd)) != 0) { 4444 4445 m_freem(mp); 4446 4447 QLNX_INC_IQDROPS(ifp); 4448 4449 goto next_cqe; 4450 } 4451 4452 } else if (len_on_first_bd < len) { 4453 fp->err_rx_jumbo_chain_pkts++; 4454 } else { 4455 mp->m_len = len; 4456 } 4457 4458 mp->m_flags |= M_PKTHDR; 4459 4460 /* assign packet to this interface interface */ 4461 mp->m_pkthdr.rcvif = ifp; 4462 4463 /* assume no hardware checksum has complated */ 4464 mp->m_pkthdr.csum_flags = 0; 4465 4466 mp->m_pkthdr.flowid = fp_cqe->rss_hash; 4467 4468 #if __FreeBSD_version >= 1100000 4469 4470 hash_type = fp_cqe->bitfields & 4471 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4472 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4473 4474 switch (hash_type) { 4475 4476 case RSS_HASH_TYPE_IPV4: 4477 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4478 break; 4479 4480 case RSS_HASH_TYPE_TCP_IPV4: 4481 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4482 break; 4483 4484 case RSS_HASH_TYPE_IPV6: 4485 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4486 break; 4487 4488 case RSS_HASH_TYPE_TCP_IPV6: 4489 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4490 break; 4491 4492 default: 4493 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4494 break; 4495 } 4496 4497 #else 4498 mp->m_flags |= M_FLOWID; 4499 #endif 4500 4501 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) { 4502 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4503 } 4504 4505 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) { 4506 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4507 } 4508 4509 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) { 4510 mp->m_pkthdr.csum_data = 0xFFFF; 4511 mp->m_pkthdr.csum_flags |= 4512 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4513 } 4514 4515 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) { 4516 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag); 4517 mp->m_flags |= M_VLANTAG; 4518 } 4519 4520 QLNX_INC_IPACKETS(ifp); 4521 QLNX_INC_IBYTES(ifp, len); 4522 4523 #ifdef QLNX_SOFT_LRO 4524 4525 if (lro_enable) { 4526 4527 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 4528 4529 tcp_lro_queue_mbuf(lro, mp); 4530 4531 #else 4532 4533 if (tcp_lro_rx(lro, mp, 0)) 4534 (*ifp->if_input)(ifp, mp); 4535 4536 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 4537 4538 } else { 4539 (*ifp->if_input)(ifp, mp); 4540 } 4541 #else 4542 4543 (*ifp->if_input)(ifp, mp); 4544 4545 #endif /* #ifdef QLNX_SOFT_LRO */ 4546 4547 rx_pkt++; 4548 4549 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4550 4551 next_cqe: /* don't consume bd rx buffer */ 4552 ecore_chain_recycle_consumed(&rxq->rx_comp_ring); 4553 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4554 4555 /* CR TPA - revisit how to handle budget in TPA perhaps 4556 increase on "end" */ 4557 if (rx_pkt == budget) 4558 break; 4559 } /* repeat while sw_comp_cons != hw_comp_cons... */ 4560 4561 /* Update producers */ 4562 qlnx_update_rx_prod(p_hwfn, rxq); 4563 4564 return rx_pkt; 4565 } 4566 4567 4568 /* 4569 * fast path interrupt 4570 */ 4571 4572 static void 4573 qlnx_fp_isr(void *arg) 4574 { 4575 qlnx_ivec_t *ivec = arg; 4576 qlnx_host_t *ha; 4577 struct qlnx_fastpath *fp = NULL; 4578 int idx; 4579 4580 ha = ivec->ha; 4581 4582 if (ha->state != QLNX_STATE_OPEN) { 4583 return; 4584 } 4585 4586 idx = ivec->rss_idx; 4587 4588 if ((idx = ivec->rss_idx) >= ha->num_rss) { 4589 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx); 4590 ha->err_illegal_intr++; 4591 return; 4592 } 4593 fp = &ha->fp_array[idx]; 4594 4595 if (fp == NULL) { 4596 ha->err_fp_null++; 4597 } else { 4598 4599 #ifdef QLNX_RCV_IN_TASKQ 4600 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); 4601 if (fp->fp_taskqueue != NULL) 4602 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 4603 #else 4604 int rx_int = 0, total_rx_count = 0; 4605 int lro_enable, tc; 4606 struct qlnx_tx_queue *txq; 4607 uint16_t elem_left; 4608 4609 lro_enable = ha->ifp->if_capenable & IFCAP_LRO; 4610 4611 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); 4612 4613 do { 4614 for (tc = 0; tc < ha->num_tc; tc++) { 4615 4616 txq = fp->txq[tc]; 4617 4618 if((int)(elem_left = 4619 ecore_chain_get_elem_left(&txq->tx_pbl)) < 4620 QLNX_TX_ELEM_THRESH) { 4621 4622 if (mtx_trylock(&fp->tx_mtx)) { 4623 #ifdef QLNX_TRACE_PERF_DATA 4624 tx_compl = fp->tx_pkts_completed; 4625 #endif 4626 4627 qlnx_tx_int(ha, fp, fp->txq[tc]); 4628 #ifdef QLNX_TRACE_PERF_DATA 4629 fp->tx_pkts_compl_intr += 4630 (fp->tx_pkts_completed - tx_compl); 4631 if ((fp->tx_pkts_completed - tx_compl) <= 32) 4632 fp->tx_comInt[0]++; 4633 else if (((fp->tx_pkts_completed - tx_compl) > 32) && 4634 ((fp->tx_pkts_completed - tx_compl) <= 64)) 4635 fp->tx_comInt[1]++; 4636 else if(((fp->tx_pkts_completed - tx_compl) > 64) && 4637 ((fp->tx_pkts_completed - tx_compl) <= 128)) 4638 fp->tx_comInt[2]++; 4639 else if(((fp->tx_pkts_completed - tx_compl) > 128)) 4640 fp->tx_comInt[3]++; 4641 #endif 4642 mtx_unlock(&fp->tx_mtx); 4643 } 4644 } 4645 } 4646 4647 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, 4648 lro_enable); 4649 4650 if (rx_int) { 4651 fp->rx_pkts += rx_int; 4652 total_rx_count += rx_int; 4653 } 4654 4655 } while (rx_int); 4656 4657 #ifdef QLNX_SOFT_LRO 4658 { 4659 struct lro_ctrl *lro; 4660 4661 lro = &fp->rxq->lro; 4662 4663 if (lro_enable && total_rx_count) { 4664 4665 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 4666 4667 #ifdef QLNX_TRACE_LRO_CNT 4668 if (lro->lro_mbuf_count & ~1023) 4669 fp->lro_cnt_1024++; 4670 else if (lro->lro_mbuf_count & ~511) 4671 fp->lro_cnt_512++; 4672 else if (lro->lro_mbuf_count & ~255) 4673 fp->lro_cnt_256++; 4674 else if (lro->lro_mbuf_count & ~127) 4675 fp->lro_cnt_128++; 4676 else if (lro->lro_mbuf_count & ~63) 4677 fp->lro_cnt_64++; 4678 #endif /* #ifdef QLNX_TRACE_LRO_CNT */ 4679 4680 tcp_lro_flush_all(lro); 4681 4682 #else 4683 struct lro_entry *queued; 4684 4685 while ((!SLIST_EMPTY(&lro->lro_active))) { 4686 queued = SLIST_FIRST(&lro->lro_active); 4687 SLIST_REMOVE_HEAD(&lro->lro_active, \ 4688 next); 4689 tcp_lro_flush(lro, queued); 4690 } 4691 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 4692 } 4693 } 4694 #endif /* #ifdef QLNX_SOFT_LRO */ 4695 4696 ecore_sb_update_sb_idx(fp->sb_info); 4697 rmb(); 4698 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 4699 4700 #endif /* #ifdef QLNX_RCV_IN_TASKQ */ 4701 } 4702 4703 return; 4704 } 4705 4706 4707 /* 4708 * slow path interrupt processing function 4709 * can be invoked in polled mode or in interrupt mode via taskqueue. 4710 */ 4711 void 4712 qlnx_sp_isr(void *arg) 4713 { 4714 struct ecore_hwfn *p_hwfn; 4715 qlnx_host_t *ha; 4716 4717 p_hwfn = arg; 4718 4719 ha = (qlnx_host_t *)p_hwfn->p_dev; 4720 4721 ha->sp_interrupts++; 4722 4723 QL_DPRINT2(ha, "enter\n"); 4724 4725 ecore_int_sp_dpc(p_hwfn); 4726 4727 QL_DPRINT2(ha, "exit\n"); 4728 4729 return; 4730 } 4731 4732 /***************************************************************************** 4733 * Support Functions for DMA'able Memory 4734 *****************************************************************************/ 4735 4736 static void 4737 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 4738 { 4739 *((bus_addr_t *)arg) = 0; 4740 4741 if (error) { 4742 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 4743 return; 4744 } 4745 4746 *((bus_addr_t *)arg) = segs[0].ds_addr; 4747 4748 return; 4749 } 4750 4751 static int 4752 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 4753 { 4754 int ret = 0; 4755 device_t dev; 4756 bus_addr_t b_addr; 4757 4758 dev = ha->pci_dev; 4759 4760 ret = bus_dma_tag_create( 4761 ha->parent_tag,/* parent */ 4762 dma_buf->alignment, 4763 ((bus_size_t)(1ULL << 32)),/* boundary */ 4764 BUS_SPACE_MAXADDR, /* lowaddr */ 4765 BUS_SPACE_MAXADDR, /* highaddr */ 4766 NULL, NULL, /* filter, filterarg */ 4767 dma_buf->size, /* maxsize */ 4768 1, /* nsegments */ 4769 dma_buf->size, /* maxsegsize */ 4770 0, /* flags */ 4771 NULL, NULL, /* lockfunc, lockarg */ 4772 &dma_buf->dma_tag); 4773 4774 if (ret) { 4775 QL_DPRINT1(ha, "could not create dma tag\n"); 4776 goto qlnx_alloc_dmabuf_exit; 4777 } 4778 ret = bus_dmamem_alloc(dma_buf->dma_tag, 4779 (void **)&dma_buf->dma_b, 4780 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 4781 &dma_buf->dma_map); 4782 if (ret) { 4783 bus_dma_tag_destroy(dma_buf->dma_tag); 4784 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n"); 4785 goto qlnx_alloc_dmabuf_exit; 4786 } 4787 4788 ret = bus_dmamap_load(dma_buf->dma_tag, 4789 dma_buf->dma_map, 4790 dma_buf->dma_b, 4791 dma_buf->size, 4792 qlnx_dmamap_callback, 4793 &b_addr, BUS_DMA_NOWAIT); 4794 4795 if (ret || !b_addr) { 4796 bus_dma_tag_destroy(dma_buf->dma_tag); 4797 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 4798 dma_buf->dma_map); 4799 ret = -1; 4800 goto qlnx_alloc_dmabuf_exit; 4801 } 4802 4803 dma_buf->dma_addr = b_addr; 4804 4805 qlnx_alloc_dmabuf_exit: 4806 4807 return ret; 4808 } 4809 4810 static void 4811 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 4812 { 4813 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 4814 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 4815 bus_dma_tag_destroy(dma_buf->dma_tag); 4816 return; 4817 } 4818 4819 void * 4820 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size) 4821 { 4822 qlnx_dma_t dma_buf; 4823 qlnx_dma_t *dma_p; 4824 qlnx_host_t *ha; 4825 device_t dev; 4826 4827 ha = (qlnx_host_t *)ecore_dev; 4828 dev = ha->pci_dev; 4829 4830 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4831 4832 memset(&dma_buf, 0, sizeof (qlnx_dma_t)); 4833 4834 dma_buf.size = size + PAGE_SIZE; 4835 dma_buf.alignment = 8; 4836 4837 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0) 4838 return (NULL); 4839 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size); 4840 4841 *phys = dma_buf.dma_addr; 4842 4843 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size); 4844 4845 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t)); 4846 /* 4847 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 4848 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag, 4849 dma_buf.dma_b, (void *)dma_buf.dma_addr, size); 4850 */ 4851 return (dma_buf.dma_b); 4852 } 4853 4854 void 4855 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys, 4856 uint32_t size) 4857 { 4858 qlnx_dma_t dma_buf, *dma_p; 4859 qlnx_host_t *ha; 4860 device_t dev; 4861 4862 ha = (qlnx_host_t *)ecore_dev; 4863 dev = ha->pci_dev; 4864 4865 if (v_addr == NULL) 4866 return; 4867 4868 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4869 4870 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size); 4871 /* 4872 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 4873 (void *)dma_p->dma_map, (void *)dma_p->dma_tag, 4874 dma_p->dma_b, (void *)dma_p->dma_addr, size); 4875 */ 4876 dma_buf = *dma_p; 4877 4878 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf); 4879 return; 4880 } 4881 4882 static int 4883 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha) 4884 { 4885 int ret; 4886 device_t dev; 4887 4888 dev = ha->pci_dev; 4889 4890 /* 4891 * Allocate parent DMA Tag 4892 */ 4893 ret = bus_dma_tag_create( 4894 bus_get_dma_tag(dev), /* parent */ 4895 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 4896 BUS_SPACE_MAXADDR, /* lowaddr */ 4897 BUS_SPACE_MAXADDR, /* highaddr */ 4898 NULL, NULL, /* filter, filterarg */ 4899 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 4900 0, /* nsegments */ 4901 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 4902 0, /* flags */ 4903 NULL, NULL, /* lockfunc, lockarg */ 4904 &ha->parent_tag); 4905 4906 if (ret) { 4907 QL_DPRINT1(ha, "could not create parent dma tag\n"); 4908 return (-1); 4909 } 4910 4911 ha->flags.parent_tag = 1; 4912 4913 return (0); 4914 } 4915 4916 static void 4917 qlnx_free_parent_dma_tag(qlnx_host_t *ha) 4918 { 4919 if (ha->parent_tag != NULL) { 4920 bus_dma_tag_destroy(ha->parent_tag); 4921 ha->parent_tag = NULL; 4922 } 4923 return; 4924 } 4925 4926 static int 4927 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha) 4928 { 4929 if (bus_dma_tag_create(NULL, /* parent */ 4930 1, 0, /* alignment, bounds */ 4931 BUS_SPACE_MAXADDR, /* lowaddr */ 4932 BUS_SPACE_MAXADDR, /* highaddr */ 4933 NULL, NULL, /* filter, filterarg */ 4934 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */ 4935 QLNX_MAX_SEGMENTS, /* nsegments */ 4936 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */ 4937 0, /* flags */ 4938 NULL, /* lockfunc */ 4939 NULL, /* lockfuncarg */ 4940 &ha->tx_tag)) { 4941 4942 QL_DPRINT1(ha, "tx_tag alloc failed\n"); 4943 return (-1); 4944 } 4945 4946 return (0); 4947 } 4948 4949 static void 4950 qlnx_free_tx_dma_tag(qlnx_host_t *ha) 4951 { 4952 if (ha->tx_tag != NULL) { 4953 bus_dma_tag_destroy(ha->tx_tag); 4954 ha->tx_tag = NULL; 4955 } 4956 return; 4957 } 4958 4959 static int 4960 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha) 4961 { 4962 if (bus_dma_tag_create(NULL, /* parent */ 4963 1, 0, /* alignment, bounds */ 4964 BUS_SPACE_MAXADDR, /* lowaddr */ 4965 BUS_SPACE_MAXADDR, /* highaddr */ 4966 NULL, NULL, /* filter, filterarg */ 4967 MJUM9BYTES, /* maxsize */ 4968 1, /* nsegments */ 4969 MJUM9BYTES, /* maxsegsize */ 4970 0, /* flags */ 4971 NULL, /* lockfunc */ 4972 NULL, /* lockfuncarg */ 4973 &ha->rx_tag)) { 4974 4975 QL_DPRINT1(ha, " rx_tag alloc failed\n"); 4976 4977 return (-1); 4978 } 4979 return (0); 4980 } 4981 4982 static void 4983 qlnx_free_rx_dma_tag(qlnx_host_t *ha) 4984 { 4985 if (ha->rx_tag != NULL) { 4986 bus_dma_tag_destroy(ha->rx_tag); 4987 ha->rx_tag = NULL; 4988 } 4989 return; 4990 } 4991 4992 /********************************* 4993 * Exported functions 4994 *********************************/ 4995 uint32_t 4996 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id) 4997 { 4998 uint32_t bar_size; 4999 5000 bar_id = bar_id * 2; 5001 5002 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev, 5003 SYS_RES_MEMORY, 5004 PCIR_BAR(bar_id)); 5005 5006 return (bar_size); 5007 } 5008 5009 uint32_t 5010 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value) 5011 { 5012 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5013 pci_reg, 1); 5014 return 0; 5015 } 5016 5017 uint32_t 5018 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg, 5019 uint16_t *reg_value) 5020 { 5021 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5022 pci_reg, 2); 5023 return 0; 5024 } 5025 5026 uint32_t 5027 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg, 5028 uint32_t *reg_value) 5029 { 5030 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5031 pci_reg, 4); 5032 return 0; 5033 } 5034 5035 void 5036 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value) 5037 { 5038 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5039 pci_reg, reg_value, 1); 5040 return; 5041 } 5042 5043 void 5044 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg, 5045 uint16_t reg_value) 5046 { 5047 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5048 pci_reg, reg_value, 2); 5049 return; 5050 } 5051 5052 void 5053 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg, 5054 uint32_t reg_value) 5055 { 5056 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5057 pci_reg, reg_value, 4); 5058 return; 5059 } 5060 5061 5062 int 5063 qlnx_pci_find_capability(void *ecore_dev, int cap) 5064 { 5065 int reg; 5066 qlnx_host_t *ha; 5067 5068 ha = ecore_dev; 5069 5070 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0) 5071 return reg; 5072 else { 5073 QL_DPRINT1(ha, "failed\n"); 5074 return 0; 5075 } 5076 } 5077 5078 uint32_t 5079 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr) 5080 { 5081 uint32_t data32; 5082 struct ecore_dev *cdev; 5083 struct ecore_hwfn *p_hwfn; 5084 5085 p_hwfn = hwfn; 5086 5087 cdev = p_hwfn->p_dev; 5088 5089 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) - 5090 (uint8_t *)(cdev->regview)) + reg_addr; 5091 5092 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr); 5093 5094 return (data32); 5095 } 5096 5097 void 5098 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5099 { 5100 struct ecore_dev *cdev; 5101 struct ecore_hwfn *p_hwfn; 5102 5103 p_hwfn = hwfn; 5104 5105 cdev = p_hwfn->p_dev; 5106 5107 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) - 5108 (uint8_t *)(cdev->regview)) + reg_addr; 5109 5110 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value); 5111 5112 return; 5113 } 5114 5115 void 5116 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value) 5117 { 5118 struct ecore_dev *cdev; 5119 struct ecore_hwfn *p_hwfn; 5120 5121 p_hwfn = hwfn; 5122 5123 cdev = p_hwfn->p_dev; 5124 5125 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->regview) - 5126 (uint8_t *)(cdev->regview)) + reg_addr; 5127 5128 bus_write_2(((qlnx_host_t *)cdev)->pci_reg, reg_addr, value); 5129 5130 return; 5131 } 5132 5133 void 5134 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5135 { 5136 struct ecore_dev *cdev; 5137 struct ecore_hwfn *p_hwfn; 5138 5139 p_hwfn = hwfn; 5140 5141 cdev = p_hwfn->p_dev; 5142 5143 reg_addr = (uint32_t)((uint8_t *)(p_hwfn->doorbells) - 5144 (uint8_t *)(cdev->doorbells)) + reg_addr; 5145 5146 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, reg_addr, value); 5147 5148 return; 5149 } 5150 5151 uint32_t 5152 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr) 5153 { 5154 uint32_t data32; 5155 uint32_t offset; 5156 struct ecore_dev *cdev; 5157 5158 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5159 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5160 5161 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset); 5162 5163 return (data32); 5164 } 5165 5166 void 5167 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value) 5168 { 5169 uint32_t offset; 5170 struct ecore_dev *cdev; 5171 5172 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5173 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5174 5175 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5176 5177 return; 5178 } 5179 5180 void 5181 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value) 5182 { 5183 uint32_t offset; 5184 struct ecore_dev *cdev; 5185 5186 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5187 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5188 5189 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5190 return; 5191 } 5192 5193 void * 5194 qlnx_zalloc(uint32_t size) 5195 { 5196 caddr_t va; 5197 5198 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT); 5199 bzero(va, size); 5200 return ((void *)va); 5201 } 5202 5203 void 5204 qlnx_barrier(void *p_hwfn) 5205 { 5206 qlnx_host_t *ha; 5207 5208 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5209 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE); 5210 } 5211 5212 void 5213 qlnx_link_update(void *p_hwfn) 5214 { 5215 qlnx_host_t *ha; 5216 int prev_link_state; 5217 5218 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5219 5220 qlnx_fill_link(p_hwfn, &ha->if_link); 5221 5222 prev_link_state = ha->link_up; 5223 ha->link_up = ha->if_link.link_up; 5224 5225 if (prev_link_state != ha->link_up) { 5226 if (ha->link_up) { 5227 if_link_state_change(ha->ifp, LINK_STATE_UP); 5228 } else { 5229 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 5230 } 5231 } 5232 return; 5233 } 5234 5235 void 5236 qlnx_fill_link(struct ecore_hwfn *hwfn, struct qlnx_link_output *if_link) 5237 { 5238 struct ecore_mcp_link_params link_params; 5239 struct ecore_mcp_link_state link_state; 5240 5241 memset(if_link, 0, sizeof(*if_link)); 5242 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params)); 5243 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state)); 5244 5245 /* Prepare source inputs */ 5246 /* we only deal with physical functions */ 5247 memcpy(&link_params, ecore_mcp_get_link_params(hwfn), 5248 sizeof(link_params)); 5249 memcpy(&link_state, ecore_mcp_get_link_state(hwfn), 5250 sizeof(link_state)); 5251 5252 ecore_mcp_get_media_type(hwfn->p_dev, &if_link->media_type); 5253 5254 /* Set the link parameters to pass to protocol driver */ 5255 if (link_state.link_up) { 5256 if_link->link_up = true; 5257 if_link->speed = link_state.speed; 5258 } 5259 5260 if_link->supported_caps = QLNX_LINK_CAP_FIBRE; 5261 5262 if (link_params.speed.autoneg) 5263 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg; 5264 5265 if (link_params.pause.autoneg || 5266 (link_params.pause.forced_rx && link_params.pause.forced_tx)) 5267 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause; 5268 5269 if (link_params.pause.autoneg || link_params.pause.forced_rx || 5270 link_params.pause.forced_tx) 5271 if_link->supported_caps |= QLNX_LINK_CAP_Pause; 5272 5273 if (link_params.speed.advertised_speeds & 5274 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 5275 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half | 5276 QLNX_LINK_CAP_1000baseT_Full; 5277 5278 if (link_params.speed.advertised_speeds & 5279 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 5280 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5281 5282 if (link_params.speed.advertised_speeds & 5283 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 5284 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5285 5286 if (link_params.speed.advertised_speeds & 5287 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) 5288 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5289 5290 if (link_params.speed.advertised_speeds & 5291 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 5292 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5293 5294 if (link_params.speed.advertised_speeds & 5295 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 5296 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5297 5298 if_link->advertised_caps = if_link->supported_caps; 5299 5300 if_link->autoneg = link_params.speed.autoneg; 5301 if_link->duplex = QLNX_LINK_DUPLEX; 5302 5303 /* Link partner capabilities */ 5304 5305 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD) 5306 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half; 5307 5308 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD) 5309 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full; 5310 5311 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G) 5312 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5313 5314 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G) 5315 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5316 5317 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G) 5318 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5319 5320 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G) 5321 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5322 5323 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G) 5324 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5325 5326 if (link_state.an_complete) 5327 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg; 5328 5329 if (link_state.partner_adv_pause) 5330 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause; 5331 5332 if ((link_state.partner_adv_pause == 5333 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) || 5334 (link_state.partner_adv_pause == 5335 ECORE_LINK_PARTNER_BOTH_PAUSE)) 5336 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause; 5337 5338 return; 5339 } 5340 5341 static int 5342 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params) 5343 { 5344 int rc, i; 5345 5346 for (i = 0; i < cdev->num_hwfns; i++) { 5347 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 5348 p_hwfn->pf_params = *func_params; 5349 } 5350 5351 rc = ecore_resc_alloc(cdev); 5352 if (rc) 5353 goto qlnx_nic_setup_exit; 5354 5355 ecore_resc_setup(cdev); 5356 5357 qlnx_nic_setup_exit: 5358 5359 return rc; 5360 } 5361 5362 static int 5363 qlnx_nic_start(struct ecore_dev *cdev) 5364 { 5365 int rc; 5366 struct ecore_hw_init_params params; 5367 5368 bzero(¶ms, sizeof (struct ecore_hw_init_params)); 5369 5370 params.p_tunn = NULL; 5371 params.b_hw_start = true; 5372 params.int_mode = cdev->int_mode; 5373 params.allow_npar_tx_switch = true; 5374 params.bin_fw_data = NULL; 5375 5376 rc = ecore_hw_init(cdev, ¶ms); 5377 if (rc) { 5378 ecore_resc_free(cdev); 5379 return rc; 5380 } 5381 5382 return 0; 5383 } 5384 5385 static int 5386 qlnx_slowpath_start(qlnx_host_t *ha) 5387 { 5388 struct ecore_dev *cdev; 5389 struct ecore_pf_params pf_params; 5390 int rc; 5391 5392 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 5393 pf_params.eth_pf_params.num_cons = 5394 (ha->num_rss) * (ha->num_tc + 1); 5395 5396 cdev = &ha->cdev; 5397 5398 rc = qlnx_nic_setup(cdev, &pf_params); 5399 if (rc) 5400 goto qlnx_slowpath_start_exit; 5401 5402 cdev->int_mode = ECORE_INT_MODE_MSIX; 5403 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; 5404 5405 #ifdef QLNX_MAX_COALESCE 5406 cdev->rx_coalesce_usecs = 255; 5407 cdev->tx_coalesce_usecs = 255; 5408 #endif 5409 5410 rc = qlnx_nic_start(cdev); 5411 5412 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs; 5413 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs; 5414 5415 qlnx_slowpath_start_exit: 5416 5417 return (rc); 5418 } 5419 5420 static int 5421 qlnx_slowpath_stop(qlnx_host_t *ha) 5422 { 5423 struct ecore_dev *cdev; 5424 device_t dev = ha->pci_dev; 5425 int i; 5426 5427 cdev = &ha->cdev; 5428 5429 ecore_hw_stop(cdev); 5430 5431 for (i = 0; i < ha->cdev.num_hwfns; i++) { 5432 5433 if (ha->sp_handle[i]) 5434 (void)bus_teardown_intr(dev, ha->sp_irq[i], 5435 ha->sp_handle[i]); 5436 5437 ha->sp_handle[i] = NULL; 5438 5439 if (ha->sp_irq[i]) 5440 (void) bus_release_resource(dev, SYS_RES_IRQ, 5441 ha->sp_irq_rid[i], ha->sp_irq[i]); 5442 ha->sp_irq[i] = NULL; 5443 } 5444 5445 ecore_resc_free(cdev); 5446 5447 return 0; 5448 } 5449 5450 static void 5451 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 5452 char ver_str[VER_SIZE]) 5453 { 5454 int i; 5455 5456 memcpy(cdev->name, name, NAME_SIZE); 5457 5458 for_each_hwfn(cdev, i) { 5459 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 5460 } 5461 5462 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD; 5463 5464 return ; 5465 } 5466 5467 void 5468 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats) 5469 { 5470 enum ecore_mcp_protocol_type type; 5471 union ecore_mcp_protocol_stats *stats; 5472 struct ecore_eth_stats eth_stats; 5473 qlnx_host_t *ha; 5474 5475 ha = cdev; 5476 stats = proto_stats; 5477 type = proto_type; 5478 5479 switch (type) { 5480 5481 case ECORE_MCP_LAN_STATS: 5482 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats); 5483 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; 5484 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts; 5485 stats->lan_stats.fcs_err = -1; 5486 break; 5487 5488 default: 5489 ha->err_get_proto_invalid_type++; 5490 5491 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type); 5492 break; 5493 } 5494 return; 5495 } 5496 5497 static int 5498 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver) 5499 { 5500 struct ecore_hwfn *p_hwfn; 5501 struct ecore_ptt *p_ptt; 5502 5503 p_hwfn = &ha->cdev.hwfns[0]; 5504 p_ptt = ecore_ptt_acquire(p_hwfn); 5505 5506 if (p_ptt == NULL) { 5507 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 5508 return (-1); 5509 } 5510 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL); 5511 5512 ecore_ptt_release(p_hwfn, p_ptt); 5513 5514 return (0); 5515 } 5516 5517 static int 5518 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size) 5519 { 5520 struct ecore_hwfn *p_hwfn; 5521 struct ecore_ptt *p_ptt; 5522 5523 p_hwfn = &ha->cdev.hwfns[0]; 5524 p_ptt = ecore_ptt_acquire(p_hwfn); 5525 5526 if (p_ptt == NULL) { 5527 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n"); 5528 return (-1); 5529 } 5530 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size); 5531 5532 ecore_ptt_release(p_hwfn, p_ptt); 5533 5534 return (0); 5535 } 5536 5537 static int 5538 qlnx_alloc_mem_arrays(qlnx_host_t *ha) 5539 { 5540 struct ecore_dev *cdev; 5541 5542 cdev = &ha->cdev; 5543 5544 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS)); 5545 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS)); 5546 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS)); 5547 5548 return 0; 5549 } 5550 5551 static void 5552 qlnx_init_fp(qlnx_host_t *ha) 5553 { 5554 int rss_id, txq_array_index, tc; 5555 5556 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 5557 5558 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 5559 5560 fp->rss_id = rss_id; 5561 fp->edev = ha; 5562 fp->sb_info = &ha->sb_array[rss_id]; 5563 fp->rxq = &ha->rxq_array[rss_id]; 5564 fp->rxq->rxq_id = rss_id; 5565 5566 for (tc = 0; tc < ha->num_tc; tc++) { 5567 txq_array_index = tc * ha->num_rss + rss_id; 5568 fp->txq[tc] = &ha->txq_array[txq_array_index]; 5569 fp->txq[tc]->index = txq_array_index; 5570 } 5571 5572 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str, 5573 rss_id); 5574 5575 fp->tx_ring_full = 0; 5576 5577 /* reset all the statistics counters */ 5578 5579 fp->tx_pkts_processed = 0; 5580 fp->tx_pkts_freed = 0; 5581 fp->tx_pkts_transmitted = 0; 5582 fp->tx_pkts_completed = 0; 5583 5584 #ifdef QLNX_TRACE_PERF_DATA 5585 fp->tx_pkts_trans_ctx = 0; 5586 fp->tx_pkts_compl_ctx = 0; 5587 fp->tx_pkts_trans_fp = 0; 5588 fp->tx_pkts_compl_fp = 0; 5589 fp->tx_pkts_compl_intr = 0; 5590 #endif 5591 fp->tx_lso_wnd_min_len = 0; 5592 fp->tx_defrag = 0; 5593 fp->tx_nsegs_gt_elem_left = 0; 5594 fp->tx_tso_max_nsegs = 0; 5595 fp->tx_tso_min_nsegs = 0; 5596 fp->err_tx_nsegs_gt_elem_left = 0; 5597 fp->err_tx_dmamap_create = 0; 5598 fp->err_tx_defrag_dmamap_load = 0; 5599 fp->err_tx_non_tso_max_seg = 0; 5600 fp->err_tx_dmamap_load = 0; 5601 fp->err_tx_defrag = 0; 5602 fp->err_tx_free_pkt_null = 0; 5603 fp->err_tx_cons_idx_conflict = 0; 5604 5605 fp->rx_pkts = 0; 5606 fp->err_m_getcl = 0; 5607 fp->err_m_getjcl = 0; 5608 } 5609 return; 5610 } 5611 5612 static void 5613 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info) 5614 { 5615 struct ecore_dev *cdev; 5616 5617 cdev = &ha->cdev; 5618 5619 if (sb_info->sb_virt) { 5620 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt), 5621 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt))); 5622 sb_info->sb_virt = NULL; 5623 } 5624 } 5625 5626 static int 5627 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info, 5628 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id) 5629 { 5630 struct ecore_hwfn *p_hwfn; 5631 int hwfn_index, rc; 5632 u16 rel_sb_id; 5633 5634 hwfn_index = sb_id % cdev->num_hwfns; 5635 p_hwfn = &cdev->hwfns[hwfn_index]; 5636 rel_sb_id = sb_id / cdev->num_hwfns; 5637 5638 QL_DPRINT2(((qlnx_host_t *)cdev), 5639 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \ 5640 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n", 5641 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info, 5642 sb_virt_addr, (void *)sb_phy_addr); 5643 5644 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, 5645 sb_virt_addr, sb_phy_addr, rel_sb_id); 5646 5647 return rc; 5648 } 5649 5650 /* This function allocates fast-path status block memory */ 5651 static int 5652 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id) 5653 { 5654 struct status_block_e4 *sb_virt; 5655 bus_addr_t sb_phys; 5656 int rc; 5657 uint32_t size; 5658 struct ecore_dev *cdev; 5659 5660 cdev = &ha->cdev; 5661 5662 size = sizeof(*sb_virt); 5663 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size); 5664 5665 if (!sb_virt) { 5666 QL_DPRINT1(ha, "Status block allocation failed\n"); 5667 return -ENOMEM; 5668 } 5669 5670 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id); 5671 if (rc) { 5672 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size); 5673 } 5674 5675 return rc; 5676 } 5677 5678 static void 5679 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5680 { 5681 int i; 5682 struct sw_rx_data *rx_buf; 5683 5684 for (i = 0; i < rxq->num_rx_buffers; i++) { 5685 5686 rx_buf = &rxq->sw_rx_ring[i]; 5687 5688 if (rx_buf->data != NULL) { 5689 if (rx_buf->map != NULL) { 5690 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 5691 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 5692 rx_buf->map = NULL; 5693 } 5694 m_freem(rx_buf->data); 5695 rx_buf->data = NULL; 5696 } 5697 } 5698 return; 5699 } 5700 5701 static void 5702 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5703 { 5704 struct ecore_dev *cdev; 5705 int i; 5706 5707 cdev = &ha->cdev; 5708 5709 qlnx_free_rx_buffers(ha, rxq); 5710 5711 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 5712 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]); 5713 if (rxq->tpa_info[i].mpf != NULL) 5714 m_freem(rxq->tpa_info[i].mpf); 5715 } 5716 5717 bzero((void *)&rxq->sw_rx_ring[0], 5718 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 5719 5720 /* Free the real RQ ring used by FW */ 5721 if (rxq->rx_bd_ring.p_virt_addr) { 5722 ecore_chain_free(cdev, &rxq->rx_bd_ring); 5723 rxq->rx_bd_ring.p_virt_addr = NULL; 5724 } 5725 5726 /* Free the real completion ring used by FW */ 5727 if (rxq->rx_comp_ring.p_virt_addr && 5728 rxq->rx_comp_ring.pbl_sp.p_virt_table) { 5729 ecore_chain_free(cdev, &rxq->rx_comp_ring); 5730 rxq->rx_comp_ring.p_virt_addr = NULL; 5731 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL; 5732 } 5733 5734 #ifdef QLNX_SOFT_LRO 5735 { 5736 struct lro_ctrl *lro; 5737 5738 lro = &rxq->lro; 5739 tcp_lro_free(lro); 5740 } 5741 #endif /* #ifdef QLNX_SOFT_LRO */ 5742 5743 return; 5744 } 5745 5746 static int 5747 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5748 { 5749 register struct mbuf *mp; 5750 uint16_t rx_buf_size; 5751 struct sw_rx_data *sw_rx_data; 5752 struct eth_rx_bd *rx_bd; 5753 dma_addr_t dma_addr; 5754 bus_dmamap_t map; 5755 bus_dma_segment_t segs[1]; 5756 int nsegs; 5757 int ret; 5758 struct ecore_dev *cdev; 5759 5760 cdev = &ha->cdev; 5761 5762 rx_buf_size = rxq->rx_buf_size; 5763 5764 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 5765 5766 if (mp == NULL) { 5767 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 5768 return -ENOMEM; 5769 } 5770 5771 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 5772 5773 map = (bus_dmamap_t)0; 5774 5775 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 5776 BUS_DMA_NOWAIT); 5777 dma_addr = segs[0].ds_addr; 5778 5779 if (ret || !dma_addr || (nsegs != 1)) { 5780 m_freem(mp); 5781 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 5782 ret, (long long unsigned int)dma_addr, nsegs); 5783 return -ENOMEM; 5784 } 5785 5786 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 5787 sw_rx_data->data = mp; 5788 sw_rx_data->dma_addr = dma_addr; 5789 sw_rx_data->map = map; 5790 5791 /* Advance PROD and get BD pointer */ 5792 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); 5793 rx_bd->addr.hi = htole32(U64_HI(dma_addr)); 5794 rx_bd->addr.lo = htole32(U64_LO(dma_addr)); 5795 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 5796 5797 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 5798 5799 return 0; 5800 } 5801 5802 static int 5803 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 5804 struct qlnx_agg_info *tpa) 5805 { 5806 struct mbuf *mp; 5807 dma_addr_t dma_addr; 5808 bus_dmamap_t map; 5809 bus_dma_segment_t segs[1]; 5810 int nsegs; 5811 int ret; 5812 struct sw_rx_data *rx_buf; 5813 5814 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 5815 5816 if (mp == NULL) { 5817 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 5818 return -ENOMEM; 5819 } 5820 5821 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 5822 5823 map = (bus_dmamap_t)0; 5824 5825 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 5826 BUS_DMA_NOWAIT); 5827 dma_addr = segs[0].ds_addr; 5828 5829 if (ret || !dma_addr || (nsegs != 1)) { 5830 m_freem(mp); 5831 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 5832 ret, (long long unsigned int)dma_addr, nsegs); 5833 return -ENOMEM; 5834 } 5835 5836 rx_buf = &tpa->rx_buf; 5837 5838 memset(rx_buf, 0, sizeof (struct sw_rx_data)); 5839 5840 rx_buf->data = mp; 5841 rx_buf->dma_addr = dma_addr; 5842 rx_buf->map = map; 5843 5844 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 5845 5846 return (0); 5847 } 5848 5849 static void 5850 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa) 5851 { 5852 struct sw_rx_data *rx_buf; 5853 5854 rx_buf = &tpa->rx_buf; 5855 5856 if (rx_buf->data != NULL) { 5857 if (rx_buf->map != NULL) { 5858 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 5859 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 5860 rx_buf->map = NULL; 5861 } 5862 m_freem(rx_buf->data); 5863 rx_buf->data = NULL; 5864 } 5865 return; 5866 } 5867 5868 /* This function allocates all memory needed per Rx queue */ 5869 static int 5870 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5871 { 5872 int i, rc, num_allocated; 5873 struct ifnet *ifp; 5874 struct ecore_dev *cdev; 5875 5876 cdev = &ha->cdev; 5877 ifp = ha->ifp; 5878 5879 rxq->num_rx_buffers = RX_RING_SIZE; 5880 5881 rxq->rx_buf_size = ha->rx_buf_size; 5882 5883 /* Allocate the parallel driver ring for Rx buffers */ 5884 bzero((void *)&rxq->sw_rx_ring[0], 5885 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 5886 5887 /* Allocate FW Rx ring */ 5888 5889 rc = ecore_chain_alloc(cdev, 5890 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 5891 ECORE_CHAIN_MODE_NEXT_PTR, 5892 ECORE_CHAIN_CNT_TYPE_U16, 5893 RX_RING_SIZE, 5894 sizeof(struct eth_rx_bd), 5895 &rxq->rx_bd_ring, NULL); 5896 5897 if (rc) 5898 goto err; 5899 5900 /* Allocate FW completion ring */ 5901 rc = ecore_chain_alloc(cdev, 5902 ECORE_CHAIN_USE_TO_CONSUME, 5903 ECORE_CHAIN_MODE_PBL, 5904 ECORE_CHAIN_CNT_TYPE_U16, 5905 RX_RING_SIZE, 5906 sizeof(union eth_rx_cqe), 5907 &rxq->rx_comp_ring, NULL); 5908 5909 if (rc) 5910 goto err; 5911 5912 /* Allocate buffers for the Rx ring */ 5913 5914 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 5915 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size, 5916 &rxq->tpa_info[i]); 5917 if (rc) 5918 break; 5919 5920 } 5921 5922 for (i = 0; i < rxq->num_rx_buffers; i++) { 5923 rc = qlnx_alloc_rx_buffer(ha, rxq); 5924 if (rc) 5925 break; 5926 } 5927 num_allocated = i; 5928 if (!num_allocated) { 5929 QL_DPRINT1(ha, "Rx buffers allocation failed\n"); 5930 goto err; 5931 } else if (num_allocated < rxq->num_rx_buffers) { 5932 QL_DPRINT1(ha, "Allocated less buffers than" 5933 " desired (%d allocated)\n", num_allocated); 5934 } 5935 5936 #ifdef QLNX_SOFT_LRO 5937 5938 { 5939 struct lro_ctrl *lro; 5940 5941 lro = &rxq->lro; 5942 5943 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 5944 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) { 5945 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 5946 rxq->rxq_id); 5947 goto err; 5948 } 5949 #else 5950 if (tcp_lro_init(lro)) { 5951 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 5952 rxq->rxq_id); 5953 goto err; 5954 } 5955 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 5956 5957 lro->ifp = ha->ifp; 5958 } 5959 #endif /* #ifdef QLNX_SOFT_LRO */ 5960 return 0; 5961 5962 err: 5963 qlnx_free_mem_rxq(ha, rxq); 5964 return -ENOMEM; 5965 } 5966 5967 5968 static void 5969 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 5970 struct qlnx_tx_queue *txq) 5971 { 5972 struct ecore_dev *cdev; 5973 5974 cdev = &ha->cdev; 5975 5976 bzero((void *)&txq->sw_tx_ring[0], 5977 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 5978 5979 /* Free the real RQ ring used by FW */ 5980 if (txq->tx_pbl.p_virt_addr) { 5981 ecore_chain_free(cdev, &txq->tx_pbl); 5982 txq->tx_pbl.p_virt_addr = NULL; 5983 } 5984 return; 5985 } 5986 5987 /* This function allocates all memory needed per Tx queue */ 5988 static int 5989 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 5990 struct qlnx_tx_queue *txq) 5991 { 5992 int ret = ECORE_SUCCESS; 5993 union eth_tx_bd_types *p_virt; 5994 struct ecore_dev *cdev; 5995 5996 cdev = &ha->cdev; 5997 5998 bzero((void *)&txq->sw_tx_ring[0], 5999 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6000 6001 /* Allocate the real Tx ring to be used by FW */ 6002 ret = ecore_chain_alloc(cdev, 6003 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6004 ECORE_CHAIN_MODE_PBL, 6005 ECORE_CHAIN_CNT_TYPE_U16, 6006 TX_RING_SIZE, 6007 sizeof(*p_virt), 6008 &txq->tx_pbl, NULL); 6009 6010 if (ret != ECORE_SUCCESS) { 6011 goto err; 6012 } 6013 6014 txq->num_tx_buffers = TX_RING_SIZE; 6015 6016 return 0; 6017 6018 err: 6019 qlnx_free_mem_txq(ha, fp, txq); 6020 return -ENOMEM; 6021 } 6022 6023 static void 6024 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6025 { 6026 struct mbuf *mp; 6027 struct ifnet *ifp = ha->ifp; 6028 6029 if (mtx_initialized(&fp->tx_mtx)) { 6030 6031 if (fp->tx_br != NULL) { 6032 6033 mtx_lock(&fp->tx_mtx); 6034 6035 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 6036 fp->tx_pkts_freed++; 6037 m_freem(mp); 6038 } 6039 6040 mtx_unlock(&fp->tx_mtx); 6041 6042 buf_ring_free(fp->tx_br, M_DEVBUF); 6043 fp->tx_br = NULL; 6044 } 6045 mtx_destroy(&fp->tx_mtx); 6046 } 6047 return; 6048 } 6049 6050 static void 6051 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6052 { 6053 int tc; 6054 6055 qlnx_free_mem_sb(ha, fp->sb_info); 6056 6057 qlnx_free_mem_rxq(ha, fp->rxq); 6058 6059 for (tc = 0; tc < ha->num_tc; tc++) 6060 qlnx_free_mem_txq(ha, fp, fp->txq[tc]); 6061 6062 return; 6063 } 6064 6065 static int 6066 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6067 { 6068 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 6069 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id); 6070 6071 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 6072 6073 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF, 6074 M_NOWAIT, &fp->tx_mtx); 6075 if (fp->tx_br == NULL) { 6076 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n", 6077 ha->dev_unit, fp->rss_id); 6078 return -ENOMEM; 6079 } 6080 return 0; 6081 } 6082 6083 static int 6084 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6085 { 6086 int rc, tc; 6087 6088 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id); 6089 if (rc) 6090 goto err; 6091 6092 if (ha->rx_jumbo_buf_eq_mtu) { 6093 if (ha->max_frame_size <= MCLBYTES) 6094 ha->rx_buf_size = MCLBYTES; 6095 else if (ha->max_frame_size <= MJUMPAGESIZE) 6096 ha->rx_buf_size = MJUMPAGESIZE; 6097 else if (ha->max_frame_size <= MJUM9BYTES) 6098 ha->rx_buf_size = MJUM9BYTES; 6099 else if (ha->max_frame_size <= MJUM16BYTES) 6100 ha->rx_buf_size = MJUM16BYTES; 6101 } else { 6102 if (ha->max_frame_size <= MCLBYTES) 6103 ha->rx_buf_size = MCLBYTES; 6104 else 6105 ha->rx_buf_size = MJUMPAGESIZE; 6106 } 6107 6108 rc = qlnx_alloc_mem_rxq(ha, fp->rxq); 6109 if (rc) 6110 goto err; 6111 6112 for (tc = 0; tc < ha->num_tc; tc++) { 6113 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]); 6114 if (rc) 6115 goto err; 6116 } 6117 6118 return 0; 6119 6120 err: 6121 qlnx_free_mem_fp(ha, fp); 6122 return -ENOMEM; 6123 } 6124 6125 static void 6126 qlnx_free_mem_load(qlnx_host_t *ha) 6127 { 6128 int i; 6129 struct ecore_dev *cdev; 6130 6131 cdev = &ha->cdev; 6132 6133 for (i = 0; i < ha->num_rss; i++) { 6134 struct qlnx_fastpath *fp = &ha->fp_array[i]; 6135 6136 qlnx_free_mem_fp(ha, fp); 6137 } 6138 return; 6139 } 6140 6141 static int 6142 qlnx_alloc_mem_load(qlnx_host_t *ha) 6143 { 6144 int rc = 0, rss_id; 6145 6146 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 6147 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 6148 6149 rc = qlnx_alloc_mem_fp(ha, fp); 6150 if (rc) 6151 break; 6152 } 6153 return (rc); 6154 } 6155 6156 static int 6157 qlnx_start_vport(struct ecore_dev *cdev, 6158 u8 vport_id, 6159 u16 mtu, 6160 u8 drop_ttl0_flg, 6161 u8 inner_vlan_removal_en_flg, 6162 u8 tx_switching, 6163 u8 hw_lro_enable) 6164 { 6165 int rc, i; 6166 struct ecore_sp_vport_start_params vport_start_params = { 0 }; 6167 qlnx_host_t *ha; 6168 6169 ha = (qlnx_host_t *)cdev; 6170 6171 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg; 6172 vport_start_params.tx_switching = 0; 6173 vport_start_params.handle_ptp_pkts = 0; 6174 vport_start_params.only_untagged = 0; 6175 vport_start_params.drop_ttl0 = drop_ttl0_flg; 6176 6177 vport_start_params.tpa_mode = 6178 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE); 6179 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6180 6181 vport_start_params.vport_id = vport_id; 6182 vport_start_params.mtu = mtu; 6183 6184 6185 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id); 6186 6187 for_each_hwfn(cdev, i) { 6188 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6189 6190 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid; 6191 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6192 6193 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params); 6194 6195 if (rc) { 6196 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d" 6197 " with MTU %d\n" , vport_id, mtu); 6198 return -ENOMEM; 6199 } 6200 6201 ecore_hw_start_fastpath(p_hwfn); 6202 6203 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n", 6204 vport_id, mtu); 6205 } 6206 return 0; 6207 } 6208 6209 6210 static int 6211 qlnx_update_vport(struct ecore_dev *cdev, 6212 struct qlnx_update_vport_params *params) 6213 { 6214 struct ecore_sp_vport_update_params sp_params; 6215 int rc, i, j, fp_index; 6216 struct ecore_hwfn *p_hwfn; 6217 struct ecore_rss_params *rss; 6218 qlnx_host_t *ha = (qlnx_host_t *)cdev; 6219 struct qlnx_fastpath *fp; 6220 6221 memset(&sp_params, 0, sizeof(sp_params)); 6222 /* Translate protocol params into sp params */ 6223 sp_params.vport_id = params->vport_id; 6224 6225 sp_params.update_vport_active_rx_flg = 6226 params->update_vport_active_rx_flg; 6227 sp_params.vport_active_rx_flg = params->vport_active_rx_flg; 6228 6229 sp_params.update_vport_active_tx_flg = 6230 params->update_vport_active_tx_flg; 6231 sp_params.vport_active_tx_flg = params->vport_active_tx_flg; 6232 6233 sp_params.update_inner_vlan_removal_flg = 6234 params->update_inner_vlan_removal_flg; 6235 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; 6236 6237 sp_params.sge_tpa_params = params->sge_tpa_params; 6238 6239 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 6240 * We need to re-fix the rss values per engine for CMT. 6241 */ 6242 if (params->rss_params->update_rss_config) 6243 sp_params.rss_params = params->rss_params; 6244 else 6245 sp_params.rss_params = NULL; 6246 6247 for_each_hwfn(cdev, i) { 6248 6249 p_hwfn = &cdev->hwfns[i]; 6250 6251 if ((cdev->num_hwfns > 1) && 6252 params->rss_params->update_rss_config && 6253 params->rss_params->rss_enable) { 6254 6255 rss = params->rss_params; 6256 6257 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) { 6258 6259 fp_index = ((cdev->num_hwfns * j) + i) % 6260 ha->num_rss; 6261 6262 fp = &ha->fp_array[fp_index]; 6263 rss->rss_ind_table[j] = fp->rxq->handle; 6264 } 6265 6266 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) { 6267 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n", 6268 rss->rss_ind_table[j], 6269 rss->rss_ind_table[j+1], 6270 rss->rss_ind_table[j+2], 6271 rss->rss_ind_table[j+3], 6272 rss->rss_ind_table[j+4], 6273 rss->rss_ind_table[j+5], 6274 rss->rss_ind_table[j+6], 6275 rss->rss_ind_table[j+7]); 6276 j += 8; 6277 } 6278 } 6279 6280 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6281 6282 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id); 6283 6284 rc = ecore_sp_vport_update(p_hwfn, &sp_params, 6285 ECORE_SPQ_MODE_EBLOCK, NULL); 6286 if (rc) { 6287 QL_DPRINT1(ha, "Failed to update VPORT\n"); 6288 return rc; 6289 } 6290 6291 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \ 6292 rx_active_flag %d [tx_update %d], [rx_update %d]\n", 6293 params->vport_id, params->vport_active_tx_flg, 6294 params->vport_active_rx_flg, 6295 params->update_vport_active_tx_flg, 6296 params->update_vport_active_rx_flg); 6297 } 6298 6299 return 0; 6300 } 6301 6302 static void 6303 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq) 6304 { 6305 struct eth_rx_bd *rx_bd_cons = 6306 ecore_chain_consume(&rxq->rx_bd_ring); 6307 struct eth_rx_bd *rx_bd_prod = 6308 ecore_chain_produce(&rxq->rx_bd_ring); 6309 struct sw_rx_data *sw_rx_data_cons = 6310 &rxq->sw_rx_ring[rxq->sw_rx_cons]; 6311 struct sw_rx_data *sw_rx_data_prod = 6312 &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6313 6314 sw_rx_data_prod->data = sw_rx_data_cons->data; 6315 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); 6316 6317 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 6318 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6319 6320 return; 6321 } 6322 6323 static void 6324 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq) 6325 { 6326 6327 uint16_t bd_prod; 6328 uint16_t cqe_prod; 6329 union { 6330 struct eth_rx_prod_data rx_prod_data; 6331 uint32_t data32; 6332 } rx_prods; 6333 6334 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); 6335 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); 6336 6337 /* Update producers */ 6338 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod); 6339 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod); 6340 6341 /* Make sure that the BD and SGE data is updated before updating the 6342 * producers since FW might read the BD/SGE right after the producer 6343 * is updated. 6344 */ 6345 wmb(); 6346 6347 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr, 6348 sizeof(rx_prods), &rx_prods.data32); 6349 6350 /* mmiowb is needed to synchronize doorbell writes from more than one 6351 * processor. It guarantees that the write arrives to the device before 6352 * the napi lock is released and another qlnx_poll is called (possibly 6353 * on another CPU). Without this barrier, the next doorbell can bypass 6354 * this doorbell. This is applicable to IA64/Altix systems. 6355 */ 6356 wmb(); 6357 6358 return; 6359 } 6360 6361 static uint32_t qlnx_hash_key[] = { 6362 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda), 6363 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2), 6364 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d), 6365 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0), 6366 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb), 6367 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4), 6368 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3), 6369 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c), 6370 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b), 6371 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)}; 6372 6373 static int 6374 qlnx_start_queues(qlnx_host_t *ha) 6375 { 6376 int rc, tc, i, vport_id = 0, 6377 drop_ttl0_flg = 1, vlan_removal_en = 1, 6378 tx_switching = 0, hw_lro_enable = 0; 6379 struct ecore_dev *cdev = &ha->cdev; 6380 struct ecore_rss_params *rss_params = &ha->rss_params; 6381 struct qlnx_update_vport_params vport_update_params; 6382 struct ifnet *ifp; 6383 struct ecore_hwfn *p_hwfn; 6384 struct ecore_sge_tpa_params tpa_params; 6385 struct ecore_queue_start_common_params qparams; 6386 struct qlnx_fastpath *fp; 6387 6388 ifp = ha->ifp; 6389 6390 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss); 6391 6392 if (!ha->num_rss) { 6393 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there" 6394 " are no Rx queues\n"); 6395 return -EINVAL; 6396 } 6397 6398 #ifndef QLNX_SOFT_LRO 6399 hw_lro_enable = ifp->if_capenable & IFCAP_LRO; 6400 #endif /* #ifndef QLNX_SOFT_LRO */ 6401 6402 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg, 6403 vlan_removal_en, tx_switching, hw_lro_enable); 6404 6405 if (rc) { 6406 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc); 6407 return rc; 6408 } 6409 6410 QL_DPRINT2(ha, "Start vport ramrod passed, " 6411 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n", 6412 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en); 6413 6414 for_each_rss(i) { 6415 struct ecore_rxq_start_ret_params rx_ret_params; 6416 struct ecore_txq_start_ret_params tx_ret_params; 6417 6418 fp = &ha->fp_array[i]; 6419 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)]; 6420 6421 bzero(&qparams, sizeof(struct ecore_queue_start_common_params)); 6422 bzero(&rx_ret_params, 6423 sizeof (struct ecore_rxq_start_ret_params)); 6424 6425 qparams.queue_id = i ; 6426 qparams.vport_id = vport_id; 6427 qparams.stats_id = vport_id; 6428 qparams.p_sb = fp->sb_info; 6429 qparams.sb_idx = RX_PI; 6430 6431 6432 rc = ecore_eth_rx_queue_start(p_hwfn, 6433 p_hwfn->hw_info.opaque_fid, 6434 &qparams, 6435 fp->rxq->rx_buf_size, /* bd_max_bytes */ 6436 /* bd_chain_phys_addr */ 6437 fp->rxq->rx_bd_ring.p_phys_addr, 6438 /* cqe_pbl_addr */ 6439 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring), 6440 /* cqe_pbl_size */ 6441 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring), 6442 &rx_ret_params); 6443 6444 if (rc) { 6445 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc); 6446 return rc; 6447 } 6448 6449 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod; 6450 fp->rxq->handle = rx_ret_params.p_handle; 6451 fp->rxq->hw_cons_ptr = 6452 &fp->sb_info->sb_virt->pi_array[RX_PI]; 6453 6454 qlnx_update_rx_prod(p_hwfn, fp->rxq); 6455 6456 for (tc = 0; tc < ha->num_tc; tc++) { 6457 struct qlnx_tx_queue *txq = fp->txq[tc]; 6458 6459 bzero(&qparams, 6460 sizeof(struct ecore_queue_start_common_params)); 6461 bzero(&tx_ret_params, 6462 sizeof (struct ecore_txq_start_ret_params)); 6463 6464 qparams.queue_id = txq->index / cdev->num_hwfns ; 6465 qparams.vport_id = vport_id; 6466 qparams.stats_id = vport_id; 6467 qparams.p_sb = fp->sb_info; 6468 qparams.sb_idx = TX_PI(tc); 6469 6470 rc = ecore_eth_tx_queue_start(p_hwfn, 6471 p_hwfn->hw_info.opaque_fid, 6472 &qparams, tc, 6473 /* bd_chain_phys_addr */ 6474 ecore_chain_get_pbl_phys(&txq->tx_pbl), 6475 ecore_chain_get_page_cnt(&txq->tx_pbl), 6476 &tx_ret_params); 6477 6478 if (rc) { 6479 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n", 6480 txq->index, rc); 6481 return rc; 6482 } 6483 6484 txq->doorbell_addr = tx_ret_params.p_doorbell; 6485 txq->handle = tx_ret_params.p_handle; 6486 6487 txq->hw_cons_ptr = 6488 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; 6489 SET_FIELD(txq->tx_db.data.params, 6490 ETH_DB_DATA_DEST, DB_DEST_XCM); 6491 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, 6492 DB_AGG_CMD_SET); 6493 SET_FIELD(txq->tx_db.data.params, 6494 ETH_DB_DATA_AGG_VAL_SEL, 6495 DQ_XCM_ETH_TX_BD_PROD_CMD); 6496 6497 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 6498 } 6499 } 6500 6501 /* Fill struct with RSS params */ 6502 if (ha->num_rss > 1) { 6503 6504 rss_params->update_rss_config = 1; 6505 rss_params->rss_enable = 1; 6506 rss_params->update_rss_capabilities = 1; 6507 rss_params->update_rss_ind_table = 1; 6508 rss_params->update_rss_key = 1; 6509 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | 6510 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; 6511 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */ 6512 6513 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 6514 fp = &ha->fp_array[(i % ha->num_rss)]; 6515 rss_params->rss_ind_table[i] = fp->rxq->handle; 6516 } 6517 6518 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 6519 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i]; 6520 6521 } else { 6522 memset(rss_params, 0, sizeof(*rss_params)); 6523 } 6524 6525 6526 /* Prepare and send the vport enable */ 6527 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6528 vport_update_params.vport_id = vport_id; 6529 vport_update_params.update_vport_active_tx_flg = 1; 6530 vport_update_params.vport_active_tx_flg = 1; 6531 vport_update_params.update_vport_active_rx_flg = 1; 6532 vport_update_params.vport_active_rx_flg = 1; 6533 vport_update_params.rss_params = rss_params; 6534 vport_update_params.update_inner_vlan_removal_flg = 1; 6535 vport_update_params.inner_vlan_removal_flg = 1; 6536 6537 if (hw_lro_enable) { 6538 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params)); 6539 6540 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6541 6542 tpa_params.update_tpa_en_flg = 1; 6543 tpa_params.tpa_ipv4_en_flg = 1; 6544 tpa_params.tpa_ipv6_en_flg = 1; 6545 6546 tpa_params.update_tpa_param_flg = 1; 6547 tpa_params.tpa_pkt_split_flg = 0; 6548 tpa_params.tpa_hdr_data_split_flg = 0; 6549 tpa_params.tpa_gro_consistent_flg = 0; 6550 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 6551 tpa_params.tpa_max_size = (uint16_t)(-1); 6552 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2; 6553 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2; 6554 6555 vport_update_params.sge_tpa_params = &tpa_params; 6556 } 6557 6558 rc = qlnx_update_vport(cdev, &vport_update_params); 6559 if (rc) { 6560 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc); 6561 return rc; 6562 } 6563 6564 return 0; 6565 } 6566 6567 static int 6568 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6569 struct qlnx_tx_queue *txq) 6570 { 6571 uint16_t hw_bd_cons; 6572 uint16_t ecore_cons_idx; 6573 6574 QL_DPRINT2(ha, "enter\n"); 6575 6576 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6577 6578 while (hw_bd_cons != 6579 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 6580 6581 mtx_lock(&fp->tx_mtx); 6582 6583 (void)qlnx_tx_int(ha, fp, txq); 6584 6585 mtx_unlock(&fp->tx_mtx); 6586 6587 qlnx_mdelay(__func__, 2); 6588 6589 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6590 } 6591 6592 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index); 6593 6594 return 0; 6595 } 6596 6597 static int 6598 qlnx_stop_queues(qlnx_host_t *ha) 6599 { 6600 struct qlnx_update_vport_params vport_update_params; 6601 struct ecore_dev *cdev; 6602 struct qlnx_fastpath *fp; 6603 int rc, tc, i; 6604 6605 cdev = &ha->cdev; 6606 6607 /* Disable the vport */ 6608 6609 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6610 6611 vport_update_params.vport_id = 0; 6612 vport_update_params.update_vport_active_tx_flg = 1; 6613 vport_update_params.vport_active_tx_flg = 0; 6614 vport_update_params.update_vport_active_rx_flg = 1; 6615 vport_update_params.vport_active_rx_flg = 0; 6616 vport_update_params.rss_params = &ha->rss_params; 6617 vport_update_params.rss_params->update_rss_config = 0; 6618 vport_update_params.rss_params->rss_enable = 0; 6619 vport_update_params.update_inner_vlan_removal_flg = 0; 6620 vport_update_params.inner_vlan_removal_flg = 0; 6621 6622 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id); 6623 6624 rc = qlnx_update_vport(cdev, &vport_update_params); 6625 if (rc) { 6626 QL_DPRINT1(ha, "Failed to update vport\n"); 6627 return rc; 6628 } 6629 6630 /* Flush Tx queues. If needed, request drain from MCP */ 6631 for_each_rss(i) { 6632 fp = &ha->fp_array[i]; 6633 6634 for (tc = 0; tc < ha->num_tc; tc++) { 6635 struct qlnx_tx_queue *txq = fp->txq[tc]; 6636 6637 rc = qlnx_drain_txq(ha, fp, txq); 6638 if (rc) 6639 return rc; 6640 } 6641 } 6642 6643 /* Stop all Queues in reverse order*/ 6644 for (i = ha->num_rss - 1; i >= 0; i--) { 6645 6646 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)]; 6647 6648 fp = &ha->fp_array[i]; 6649 6650 /* Stop the Tx Queue(s)*/ 6651 for (tc = 0; tc < ha->num_tc; tc++) { 6652 int tx_queue_id; 6653 6654 tx_queue_id = tc * ha->num_rss + i; 6655 rc = ecore_eth_tx_queue_stop(p_hwfn, 6656 fp->txq[tc]->handle); 6657 6658 if (rc) { 6659 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n", 6660 tx_queue_id); 6661 return rc; 6662 } 6663 } 6664 6665 /* Stop the Rx Queue*/ 6666 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false, 6667 false); 6668 if (rc) { 6669 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i); 6670 return rc; 6671 } 6672 } 6673 6674 /* Stop the vport */ 6675 for_each_hwfn(cdev, i) { 6676 6677 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6678 6679 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0); 6680 6681 if (rc) { 6682 QL_DPRINT1(ha, "Failed to stop VPORT\n"); 6683 return rc; 6684 } 6685 } 6686 6687 return rc; 6688 } 6689 6690 static int 6691 qlnx_set_ucast_rx_mac(qlnx_host_t *ha, 6692 enum ecore_filter_opcode opcode, 6693 unsigned char mac[ETH_ALEN]) 6694 { 6695 struct ecore_filter_ucast ucast; 6696 struct ecore_dev *cdev; 6697 int rc; 6698 6699 cdev = &ha->cdev; 6700 6701 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6702 6703 ucast.opcode = opcode; 6704 ucast.type = ECORE_FILTER_MAC; 6705 ucast.is_rx_filter = 1; 6706 ucast.vport_to_add_to = 0; 6707 memcpy(&ucast.mac[0], mac, ETH_ALEN); 6708 6709 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6710 6711 return (rc); 6712 } 6713 6714 static int 6715 qlnx_remove_all_ucast_mac(qlnx_host_t *ha) 6716 { 6717 struct ecore_filter_ucast ucast; 6718 struct ecore_dev *cdev; 6719 int rc; 6720 6721 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6722 6723 ucast.opcode = ECORE_FILTER_REPLACE; 6724 ucast.type = ECORE_FILTER_MAC; 6725 ucast.is_rx_filter = 1; 6726 6727 cdev = &ha->cdev; 6728 6729 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6730 6731 return (rc); 6732 } 6733 6734 static int 6735 qlnx_remove_all_mcast_mac(qlnx_host_t *ha) 6736 { 6737 struct ecore_filter_mcast *mcast; 6738 struct ecore_dev *cdev; 6739 int rc, i; 6740 6741 cdev = &ha->cdev; 6742 6743 mcast = &ha->ecore_mcast; 6744 bzero(mcast, sizeof(struct ecore_filter_mcast)); 6745 6746 mcast->opcode = ECORE_FILTER_REMOVE; 6747 6748 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 6749 6750 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] || 6751 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] || 6752 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) { 6753 6754 memcpy(&mcast->mac[i], &ha->mcast[i].addr[0], ETH_ALEN); 6755 mcast->num_mc_addrs++; 6756 } 6757 } 6758 mcast = &ha->ecore_mcast; 6759 6760 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 6761 6762 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS)); 6763 ha->nmcast = 0; 6764 6765 return (rc); 6766 } 6767 6768 static int 6769 qlnx_clean_filters(qlnx_host_t *ha) 6770 { 6771 int rc = 0; 6772 6773 /* Remove all unicast macs */ 6774 rc = qlnx_remove_all_ucast_mac(ha); 6775 if (rc) 6776 return rc; 6777 6778 /* Remove all multicast macs */ 6779 rc = qlnx_remove_all_mcast_mac(ha); 6780 if (rc) 6781 return rc; 6782 6783 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac); 6784 6785 return (rc); 6786 } 6787 6788 static int 6789 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter) 6790 { 6791 struct ecore_filter_accept_flags accept; 6792 int rc = 0; 6793 struct ecore_dev *cdev; 6794 6795 cdev = &ha->cdev; 6796 6797 bzero(&accept, sizeof(struct ecore_filter_accept_flags)); 6798 6799 accept.update_rx_mode_config = 1; 6800 accept.rx_accept_filter = filter; 6801 6802 accept.update_tx_mode_config = 1; 6803 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 6804 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST; 6805 6806 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false, 6807 ECORE_SPQ_MODE_CB, NULL); 6808 6809 return (rc); 6810 } 6811 6812 static int 6813 qlnx_set_rx_mode(qlnx_host_t *ha) 6814 { 6815 int rc = 0; 6816 uint8_t filter; 6817 6818 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac); 6819 if (rc) 6820 return rc; 6821 6822 rc = qlnx_remove_all_mcast_mac(ha); 6823 if (rc) 6824 return rc; 6825 6826 filter = ECORE_ACCEPT_UCAST_MATCHED | 6827 ECORE_ACCEPT_MCAST_MATCHED | 6828 ECORE_ACCEPT_BCAST; 6829 ha->filter = filter; 6830 6831 rc = qlnx_set_rx_accept_filter(ha, filter); 6832 6833 return (rc); 6834 } 6835 6836 static int 6837 qlnx_set_link(qlnx_host_t *ha, bool link_up) 6838 { 6839 int i, rc = 0; 6840 struct ecore_dev *cdev; 6841 struct ecore_hwfn *hwfn; 6842 struct ecore_ptt *ptt; 6843 6844 cdev = &ha->cdev; 6845 6846 for_each_hwfn(cdev, i) { 6847 6848 hwfn = &cdev->hwfns[i]; 6849 6850 ptt = ecore_ptt_acquire(hwfn); 6851 if (!ptt) 6852 return -EBUSY; 6853 6854 rc = ecore_mcp_set_link(hwfn, ptt, link_up); 6855 6856 ecore_ptt_release(hwfn, ptt); 6857 6858 if (rc) 6859 return rc; 6860 } 6861 return (rc); 6862 } 6863 6864 #if __FreeBSD_version >= 1100000 6865 static uint64_t 6866 qlnx_get_counter(if_t ifp, ift_counter cnt) 6867 { 6868 qlnx_host_t *ha; 6869 uint64_t count; 6870 6871 ha = (qlnx_host_t *)if_getsoftc(ifp); 6872 6873 switch (cnt) { 6874 6875 case IFCOUNTER_IPACKETS: 6876 count = ha->hw_stats.common.rx_ucast_pkts + 6877 ha->hw_stats.common.rx_mcast_pkts + 6878 ha->hw_stats.common.rx_bcast_pkts; 6879 break; 6880 6881 case IFCOUNTER_IERRORS: 6882 count = ha->hw_stats.common.rx_crc_errors + 6883 ha->hw_stats.common.rx_align_errors + 6884 ha->hw_stats.common.rx_oversize_packets + 6885 ha->hw_stats.common.rx_undersize_packets; 6886 break; 6887 6888 case IFCOUNTER_OPACKETS: 6889 count = ha->hw_stats.common.tx_ucast_pkts + 6890 ha->hw_stats.common.tx_mcast_pkts + 6891 ha->hw_stats.common.tx_bcast_pkts; 6892 break; 6893 6894 case IFCOUNTER_OERRORS: 6895 count = ha->hw_stats.common.tx_err_drop_pkts; 6896 break; 6897 6898 case IFCOUNTER_COLLISIONS: 6899 return (0); 6900 6901 case IFCOUNTER_IBYTES: 6902 count = ha->hw_stats.common.rx_ucast_bytes + 6903 ha->hw_stats.common.rx_mcast_bytes + 6904 ha->hw_stats.common.rx_bcast_bytes; 6905 break; 6906 6907 case IFCOUNTER_OBYTES: 6908 count = ha->hw_stats.common.tx_ucast_bytes + 6909 ha->hw_stats.common.tx_mcast_bytes + 6910 ha->hw_stats.common.tx_bcast_bytes; 6911 break; 6912 6913 case IFCOUNTER_IMCASTS: 6914 count = ha->hw_stats.common.rx_mcast_bytes; 6915 break; 6916 6917 case IFCOUNTER_OMCASTS: 6918 count = ha->hw_stats.common.tx_mcast_bytes; 6919 break; 6920 6921 case IFCOUNTER_IQDROPS: 6922 case IFCOUNTER_OQDROPS: 6923 case IFCOUNTER_NOPROTO: 6924 6925 default: 6926 return (if_get_counter_default(ifp, cnt)); 6927 } 6928 return (count); 6929 } 6930 #endif 6931 6932 6933 static void 6934 qlnx_timer(void *arg) 6935 { 6936 qlnx_host_t *ha; 6937 6938 ha = (qlnx_host_t *)arg; 6939 6940 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats); 6941 6942 if (ha->storm_stats_gather) 6943 qlnx_sample_storm_stats(ha); 6944 6945 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 6946 6947 return; 6948 } 6949 6950 static int 6951 qlnx_load(qlnx_host_t *ha) 6952 { 6953 int i; 6954 int rc = 0; 6955 struct ecore_dev *cdev; 6956 device_t dev; 6957 6958 cdev = &ha->cdev; 6959 dev = ha->pci_dev; 6960 6961 QL_DPRINT2(ha, "enter\n"); 6962 6963 rc = qlnx_alloc_mem_arrays(ha); 6964 if (rc) 6965 goto qlnx_load_exit0; 6966 6967 qlnx_init_fp(ha); 6968 6969 rc = qlnx_alloc_mem_load(ha); 6970 if (rc) 6971 goto qlnx_load_exit1; 6972 6973 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n", 6974 ha->num_rss, ha->num_tc); 6975 6976 for (i = 0; i < ha->num_rss; i++) { 6977 6978 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq, 6979 (INTR_TYPE_NET | INTR_MPSAFE), 6980 NULL, qlnx_fp_isr, &ha->irq_vec[i], 6981 &ha->irq_vec[i].handle))) { 6982 6983 QL_DPRINT1(ha, "could not setup interrupt\n"); 6984 goto qlnx_load_exit2; 6985 } 6986 6987 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \ 6988 irq %p handle %p\n", i, 6989 ha->irq_vec[i].irq_rid, 6990 ha->irq_vec[i].irq, ha->irq_vec[i].handle); 6991 6992 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus)); 6993 } 6994 6995 rc = qlnx_start_queues(ha); 6996 if (rc) 6997 goto qlnx_load_exit2; 6998 6999 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n"); 7000 7001 /* Add primary mac and set Rx filters */ 7002 rc = qlnx_set_rx_mode(ha); 7003 if (rc) 7004 goto qlnx_load_exit2; 7005 7006 /* Ask for link-up using current configuration */ 7007 qlnx_set_link(ha, true); 7008 7009 ha->state = QLNX_STATE_OPEN; 7010 7011 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats)); 7012 7013 if (ha->flags.callout_init) 7014 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7015 7016 goto qlnx_load_exit0; 7017 7018 qlnx_load_exit2: 7019 qlnx_free_mem_load(ha); 7020 7021 qlnx_load_exit1: 7022 ha->num_rss = 0; 7023 7024 qlnx_load_exit0: 7025 QL_DPRINT2(ha, "exit [%d]\n", rc); 7026 return rc; 7027 } 7028 7029 static void 7030 qlnx_drain_soft_lro(qlnx_host_t *ha) 7031 { 7032 #ifdef QLNX_SOFT_LRO 7033 7034 struct ifnet *ifp; 7035 int i; 7036 7037 ifp = ha->ifp; 7038 7039 7040 if (ifp->if_capenable & IFCAP_LRO) { 7041 7042 for (i = 0; i < ha->num_rss; i++) { 7043 7044 struct qlnx_fastpath *fp = &ha->fp_array[i]; 7045 struct lro_ctrl *lro; 7046 7047 lro = &fp->rxq->lro; 7048 7049 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 7050 7051 tcp_lro_flush_all(lro); 7052 7053 #else 7054 struct lro_entry *queued; 7055 7056 while ((!SLIST_EMPTY(&lro->lro_active))){ 7057 queued = SLIST_FIRST(&lro->lro_active); 7058 SLIST_REMOVE_HEAD(&lro->lro_active, next); 7059 tcp_lro_flush(lro, queued); 7060 } 7061 7062 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 7063 7064 } 7065 } 7066 7067 #endif /* #ifdef QLNX_SOFT_LRO */ 7068 7069 return; 7070 } 7071 7072 static void 7073 qlnx_unload(qlnx_host_t *ha) 7074 { 7075 struct ecore_dev *cdev; 7076 device_t dev; 7077 int i; 7078 7079 cdev = &ha->cdev; 7080 dev = ha->pci_dev; 7081 7082 QL_DPRINT2(ha, "enter\n"); 7083 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state); 7084 7085 if (ha->state == QLNX_STATE_OPEN) { 7086 7087 qlnx_set_link(ha, false); 7088 qlnx_clean_filters(ha); 7089 qlnx_stop_queues(ha); 7090 ecore_hw_stop_fastpath(cdev); 7091 7092 for (i = 0; i < ha->num_rss; i++) { 7093 if (ha->irq_vec[i].handle) { 7094 (void)bus_teardown_intr(dev, 7095 ha->irq_vec[i].irq, 7096 ha->irq_vec[i].handle); 7097 ha->irq_vec[i].handle = NULL; 7098 } 7099 } 7100 7101 qlnx_drain_fp_taskqueues(ha); 7102 qlnx_drain_soft_lro(ha); 7103 qlnx_free_mem_load(ha); 7104 } 7105 7106 if (ha->flags.callout_init) 7107 callout_drain(&ha->qlnx_callout); 7108 7109 qlnx_mdelay(__func__, 1000); 7110 7111 ha->state = QLNX_STATE_CLOSED; 7112 7113 QL_DPRINT2(ha, "exit\n"); 7114 return; 7115 } 7116 7117 static int 7118 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7119 { 7120 int rval = -1; 7121 struct ecore_hwfn *p_hwfn; 7122 struct ecore_ptt *p_ptt; 7123 7124 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7125 7126 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7127 p_ptt = ecore_ptt_acquire(p_hwfn); 7128 7129 if (!p_ptt) { 7130 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7131 return (rval); 7132 } 7133 7134 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7135 7136 if (rval == DBG_STATUS_OK) 7137 rval = 0; 7138 else { 7139 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed" 7140 "[0x%x]\n", rval); 7141 } 7142 7143 ecore_ptt_release(p_hwfn, p_ptt); 7144 7145 return (rval); 7146 } 7147 7148 static int 7149 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7150 { 7151 int rval = -1; 7152 struct ecore_hwfn *p_hwfn; 7153 struct ecore_ptt *p_ptt; 7154 7155 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7156 7157 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7158 p_ptt = ecore_ptt_acquire(p_hwfn); 7159 7160 if (!p_ptt) { 7161 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7162 return (rval); 7163 } 7164 7165 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7166 7167 if (rval == DBG_STATUS_OK) 7168 rval = 0; 7169 else { 7170 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed" 7171 " [0x%x]\n", rval); 7172 } 7173 7174 ecore_ptt_release(p_hwfn, p_ptt); 7175 7176 return (rval); 7177 } 7178 7179 7180 static void 7181 qlnx_sample_storm_stats(qlnx_host_t *ha) 7182 { 7183 int i, index; 7184 struct ecore_dev *cdev; 7185 qlnx_storm_stats_t *s_stats; 7186 uint32_t reg; 7187 struct ecore_ptt *p_ptt; 7188 struct ecore_hwfn *hwfn; 7189 7190 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) { 7191 ha->storm_stats_gather = 0; 7192 return; 7193 } 7194 7195 cdev = &ha->cdev; 7196 7197 for_each_hwfn(cdev, i) { 7198 7199 hwfn = &cdev->hwfns[i]; 7200 7201 p_ptt = ecore_ptt_acquire(hwfn); 7202 if (!p_ptt) 7203 return; 7204 7205 index = ha->storm_stats_index + 7206 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN); 7207 7208 s_stats = &ha->storm_stats[index]; 7209 7210 /* XSTORM */ 7211 reg = XSEM_REG_FAST_MEMORY + 7212 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7213 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7214 7215 reg = XSEM_REG_FAST_MEMORY + 7216 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7217 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7218 7219 reg = XSEM_REG_FAST_MEMORY + 7220 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7221 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7222 7223 reg = XSEM_REG_FAST_MEMORY + 7224 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7225 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7226 7227 /* YSTORM */ 7228 reg = YSEM_REG_FAST_MEMORY + 7229 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7230 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7231 7232 reg = YSEM_REG_FAST_MEMORY + 7233 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7234 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7235 7236 reg = YSEM_REG_FAST_MEMORY + 7237 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7238 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7239 7240 reg = YSEM_REG_FAST_MEMORY + 7241 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7242 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7243 7244 /* PSTORM */ 7245 reg = PSEM_REG_FAST_MEMORY + 7246 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7247 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7248 7249 reg = PSEM_REG_FAST_MEMORY + 7250 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7251 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7252 7253 reg = PSEM_REG_FAST_MEMORY + 7254 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7255 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7256 7257 reg = PSEM_REG_FAST_MEMORY + 7258 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7259 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7260 7261 /* TSTORM */ 7262 reg = TSEM_REG_FAST_MEMORY + 7263 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7264 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7265 7266 reg = TSEM_REG_FAST_MEMORY + 7267 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7268 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7269 7270 reg = TSEM_REG_FAST_MEMORY + 7271 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7272 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7273 7274 reg = TSEM_REG_FAST_MEMORY + 7275 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7276 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7277 7278 /* MSTORM */ 7279 reg = MSEM_REG_FAST_MEMORY + 7280 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7281 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7282 7283 reg = MSEM_REG_FAST_MEMORY + 7284 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7285 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7286 7287 reg = MSEM_REG_FAST_MEMORY + 7288 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7289 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7290 7291 reg = MSEM_REG_FAST_MEMORY + 7292 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7293 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7294 7295 /* USTORM */ 7296 reg = USEM_REG_FAST_MEMORY + 7297 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7298 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7299 7300 reg = USEM_REG_FAST_MEMORY + 7301 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7302 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7303 7304 reg = USEM_REG_FAST_MEMORY + 7305 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7306 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7307 7308 reg = USEM_REG_FAST_MEMORY + 7309 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7310 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7311 7312 ecore_ptt_release(hwfn, p_ptt); 7313 } 7314 7315 ha->storm_stats_index++; 7316 7317 return; 7318 } 7319 7320 /* 7321 * Name: qlnx_dump_buf8 7322 * Function: dumps a buffer as bytes 7323 */ 7324 static void 7325 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len) 7326 { 7327 device_t dev; 7328 uint32_t i = 0; 7329 uint8_t *buf; 7330 7331 dev = ha->pci_dev; 7332 buf = dbuf; 7333 7334 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len); 7335 7336 while (len >= 16) { 7337 device_printf(dev,"0x%08x:" 7338 " %02x %02x %02x %02x %02x %02x %02x %02x" 7339 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7340 buf[0], buf[1], buf[2], buf[3], 7341 buf[4], buf[5], buf[6], buf[7], 7342 buf[8], buf[9], buf[10], buf[11], 7343 buf[12], buf[13], buf[14], buf[15]); 7344 i += 16; 7345 len -= 16; 7346 buf += 16; 7347 } 7348 switch (len) { 7349 case 1: 7350 device_printf(dev,"0x%08x: %02x\n", i, buf[0]); 7351 break; 7352 case 2: 7353 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]); 7354 break; 7355 case 3: 7356 device_printf(dev,"0x%08x: %02x %02x %02x\n", 7357 i, buf[0], buf[1], buf[2]); 7358 break; 7359 case 4: 7360 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i, 7361 buf[0], buf[1], buf[2], buf[3]); 7362 break; 7363 case 5: 7364 device_printf(dev,"0x%08x:" 7365 " %02x %02x %02x %02x %02x\n", i, 7366 buf[0], buf[1], buf[2], buf[3], buf[4]); 7367 break; 7368 case 6: 7369 device_printf(dev,"0x%08x:" 7370 " %02x %02x %02x %02x %02x %02x\n", i, 7371 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); 7372 break; 7373 case 7: 7374 device_printf(dev,"0x%08x:" 7375 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7376 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); 7377 break; 7378 case 8: 7379 device_printf(dev,"0x%08x:" 7380 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7381 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7382 buf[7]); 7383 break; 7384 case 9: 7385 device_printf(dev,"0x%08x:" 7386 " %02x %02x %02x %02x %02x %02x %02x %02x" 7387 " %02x\n", i, 7388 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7389 buf[7], buf[8]); 7390 break; 7391 case 10: 7392 device_printf(dev,"0x%08x:" 7393 " %02x %02x %02x %02x %02x %02x %02x %02x" 7394 " %02x %02x\n", i, 7395 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7396 buf[7], buf[8], buf[9]); 7397 break; 7398 case 11: 7399 device_printf(dev,"0x%08x:" 7400 " %02x %02x %02x %02x %02x %02x %02x %02x" 7401 " %02x %02x %02x\n", i, 7402 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7403 buf[7], buf[8], buf[9], buf[10]); 7404 break; 7405 case 12: 7406 device_printf(dev,"0x%08x:" 7407 " %02x %02x %02x %02x %02x %02x %02x %02x" 7408 " %02x %02x %02x %02x\n", i, 7409 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7410 buf[7], buf[8], buf[9], buf[10], buf[11]); 7411 break; 7412 case 13: 7413 device_printf(dev,"0x%08x:" 7414 " %02x %02x %02x %02x %02x %02x %02x %02x" 7415 " %02x %02x %02x %02x %02x\n", i, 7416 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7417 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]); 7418 break; 7419 case 14: 7420 device_printf(dev,"0x%08x:" 7421 " %02x %02x %02x %02x %02x %02x %02x %02x" 7422 " %02x %02x %02x %02x %02x %02x\n", i, 7423 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7424 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7425 buf[13]); 7426 break; 7427 case 15: 7428 device_printf(dev,"0x%08x:" 7429 " %02x %02x %02x %02x %02x %02x %02x %02x" 7430 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7431 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7432 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7433 buf[13], buf[14]); 7434 break; 7435 default: 7436 break; 7437 } 7438 7439 device_printf(dev, "%s: %s dump end\n", __func__, msg); 7440 7441 return; 7442 } 7443 7444