1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 29 /* 30 * File: qlnx_os.c 31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "qlnx_os.h" 38 #include "bcm_osal.h" 39 #include "reg_addr.h" 40 #include "ecore_gtt_reg_addr.h" 41 #include "ecore.h" 42 #include "ecore_chain.h" 43 #include "ecore_status.h" 44 #include "ecore_hw.h" 45 #include "ecore_rt_defs.h" 46 #include "ecore_init_ops.h" 47 #include "ecore_int.h" 48 #include "ecore_cxt.h" 49 #include "ecore_spq.h" 50 #include "ecore_init_fw_funcs.h" 51 #include "ecore_sp_commands.h" 52 #include "ecore_dev_api.h" 53 #include "ecore_l2_api.h" 54 #include "ecore_mcp.h" 55 #include "ecore_hw_defs.h" 56 #include "mcp_public.h" 57 #include "ecore_iro.h" 58 #include "nvm_cfg.h" 59 #include "ecore_dev_api.h" 60 #include "ecore_dbg_fw_funcs.h" 61 #include "ecore_iov_api.h" 62 #include "ecore_vf_api.h" 63 64 #include "qlnx_ioctl.h" 65 #include "qlnx_def.h" 66 #include "qlnx_ver.h" 67 68 #ifdef QLNX_ENABLE_IWARP 69 #include "qlnx_rdma.h" 70 #endif /* #ifdef QLNX_ENABLE_IWARP */ 71 72 #include <sys/smp.h> 73 74 75 /* 76 * static functions 77 */ 78 /* 79 * ioctl related functions 80 */ 81 static void qlnx_add_sysctls(qlnx_host_t *ha); 82 83 /* 84 * main driver 85 */ 86 static void qlnx_release(qlnx_host_t *ha); 87 static void qlnx_fp_isr(void *arg); 88 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha); 89 static void qlnx_init(void *arg); 90 static void qlnx_init_locked(qlnx_host_t *ha); 91 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi); 92 static int qlnx_set_promisc(qlnx_host_t *ha); 93 static int qlnx_set_allmulti(qlnx_host_t *ha); 94 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 95 static int qlnx_media_change(struct ifnet *ifp); 96 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 97 static void qlnx_stop(qlnx_host_t *ha); 98 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, 99 struct mbuf **m_headp); 100 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha); 101 static uint32_t qlnx_get_optics(qlnx_host_t *ha, 102 struct qlnx_link_output *if_link); 103 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp); 104 static int qlnx_transmit_locked(struct ifnet *ifp, struct qlnx_fastpath *fp, 105 struct mbuf *mp); 106 static void qlnx_qflush(struct ifnet *ifp); 107 108 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha); 109 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha); 110 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha); 111 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha); 112 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha); 113 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha); 114 115 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver); 116 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size); 117 118 static int qlnx_nic_setup(struct ecore_dev *cdev, 119 struct ecore_pf_params *func_params); 120 static int qlnx_nic_start(struct ecore_dev *cdev); 121 static int qlnx_slowpath_start(qlnx_host_t *ha); 122 static int qlnx_slowpath_stop(qlnx_host_t *ha); 123 static int qlnx_init_hw(qlnx_host_t *ha); 124 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 125 char ver_str[VER_SIZE]); 126 static void qlnx_unload(qlnx_host_t *ha); 127 static int qlnx_load(qlnx_host_t *ha); 128 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 129 uint32_t add_mac); 130 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, 131 uint32_t len); 132 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq); 133 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq); 134 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, 135 struct qlnx_rx_queue *rxq); 136 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter); 137 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, 138 int hwfn_index); 139 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, 140 int hwfn_index); 141 static void qlnx_timer(void *arg); 142 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 143 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 144 static void qlnx_trigger_dump(qlnx_host_t *ha); 145 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 146 struct qlnx_tx_queue *txq); 147 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 148 struct qlnx_tx_queue *txq); 149 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 150 int lro_enable); 151 static void qlnx_fp_taskqueue(void *context, int pending); 152 static void qlnx_sample_storm_stats(qlnx_host_t *ha); 153 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 154 struct qlnx_agg_info *tpa); 155 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa); 156 157 #if __FreeBSD_version >= 1100000 158 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt); 159 #endif 160 161 162 /* 163 * Hooks to the Operating Systems 164 */ 165 static int qlnx_pci_probe (device_t); 166 static int qlnx_pci_attach (device_t); 167 static int qlnx_pci_detach (device_t); 168 169 #ifndef QLNX_VF 170 171 #ifdef CONFIG_ECORE_SRIOV 172 173 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params); 174 static void qlnx_iov_uninit(device_t dev); 175 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params); 176 static void qlnx_initialize_sriov(qlnx_host_t *ha); 177 static void qlnx_pf_taskqueue(void *context, int pending); 178 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha); 179 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha); 180 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha); 181 182 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 183 184 static device_method_t qlnx_pci_methods[] = { 185 /* Device interface */ 186 DEVMETHOD(device_probe, qlnx_pci_probe), 187 DEVMETHOD(device_attach, qlnx_pci_attach), 188 DEVMETHOD(device_detach, qlnx_pci_detach), 189 190 #ifdef CONFIG_ECORE_SRIOV 191 DEVMETHOD(pci_iov_init, qlnx_iov_init), 192 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit), 193 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf), 194 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 195 { 0, 0 } 196 }; 197 198 static driver_t qlnx_pci_driver = { 199 "ql", qlnx_pci_methods, sizeof (qlnx_host_t), 200 }; 201 202 static devclass_t qlnx_devclass; 203 204 MODULE_VERSION(if_qlnxe,1); 205 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0); 206 207 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1); 208 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1); 209 210 #else 211 212 static device_method_t qlnxv_pci_methods[] = { 213 /* Device interface */ 214 DEVMETHOD(device_probe, qlnx_pci_probe), 215 DEVMETHOD(device_attach, qlnx_pci_attach), 216 DEVMETHOD(device_detach, qlnx_pci_detach), 217 { 0, 0 } 218 }; 219 220 static driver_t qlnxv_pci_driver = { 221 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t), 222 }; 223 224 static devclass_t qlnxv_devclass; 225 MODULE_VERSION(if_qlnxev,1); 226 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, qlnxv_devclass, 0, 0); 227 228 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1); 229 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1); 230 231 #endif /* #ifdef QLNX_VF */ 232 233 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver"); 234 235 char qlnx_dev_str[128]; 236 char qlnx_ver_str[VER_SIZE]; 237 char qlnx_name_str[NAME_SIZE]; 238 239 /* 240 * Some PCI Configuration Space Related Defines 241 */ 242 243 #ifndef PCI_VENDOR_QLOGIC 244 #define PCI_VENDOR_QLOGIC 0x1077 245 #endif 246 247 /* 40G Adapter QLE45xxx*/ 248 #ifndef QLOGIC_PCI_DEVICE_ID_1634 249 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634 250 #endif 251 252 /* 100G Adapter QLE45xxx*/ 253 #ifndef QLOGIC_PCI_DEVICE_ID_1644 254 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644 255 #endif 256 257 /* 25G Adapter QLE45xxx*/ 258 #ifndef QLOGIC_PCI_DEVICE_ID_1656 259 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656 260 #endif 261 262 /* 50G Adapter QLE45xxx*/ 263 #ifndef QLOGIC_PCI_DEVICE_ID_1654 264 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654 265 #endif 266 267 /* 10G/25G/40G Adapter QLE41xxx*/ 268 #ifndef QLOGIC_PCI_DEVICE_ID_8070 269 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070 270 #endif 271 272 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/ 273 #ifndef QLOGIC_PCI_DEVICE_ID_8090 274 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090 275 #endif 276 277 278 279 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD, 0, "qlnxe driver parameters"); 280 281 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */ 282 static int qlnxe_queue_count = QLNX_DEFAULT_RSS; 283 284 #if __FreeBSD_version < 1100000 285 286 TUNABLE_INT("hw.qlnxe.queue_count", &qlnxe_queue_count); 287 288 #endif 289 290 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 291 &qlnxe_queue_count, 0, "Multi-Queue queue count"); 292 293 294 /* 295 * Note on RDMA personality setting 296 * 297 * Read the personality configured in NVRAM 298 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and 299 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT 300 * use the personality in NVRAM. 301 302 * Otherwise use t the personality configured in sysctl. 303 * 304 */ 305 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */ 306 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */ 307 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */ 308 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */ 309 #define QLNX_PERSONALITY_BITS_PER_FUNC 4 310 #define QLNX_PERSONALIY_MASK 0xF 311 312 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/ 313 static uint64_t qlnxe_rdma_configuration = 0x22222222; 314 315 #if __FreeBSD_version < 1100000 316 317 TUNABLE_QUAD("hw.qlnxe.rdma_configuration", &qlnxe_rdma_configuration); 318 319 SYSCTL_UQUAD(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN, 320 &qlnxe_rdma_configuration, 0, "RDMA Configuration"); 321 322 #else 323 324 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN, 325 &qlnxe_rdma_configuration, 0, "RDMA Configuration"); 326 327 #endif /* #if __FreeBSD_version < 1100000 */ 328 329 int 330 qlnx_vf_device(qlnx_host_t *ha) 331 { 332 uint16_t device_id; 333 334 device_id = ha->device_id; 335 336 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 337 return 0; 338 339 return -1; 340 } 341 342 static int 343 qlnx_valid_device(qlnx_host_t *ha) 344 { 345 uint16_t device_id; 346 347 device_id = ha->device_id; 348 349 #ifndef QLNX_VF 350 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 351 (device_id == QLOGIC_PCI_DEVICE_ID_1644) || 352 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 353 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 354 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 355 return 0; 356 #else 357 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 358 return 0; 359 360 #endif /* #ifndef QLNX_VF */ 361 return -1; 362 } 363 364 #ifdef QLNX_ENABLE_IWARP 365 static int 366 qlnx_rdma_supported(struct qlnx_host *ha) 367 { 368 uint16_t device_id; 369 370 device_id = pci_get_device(ha->pci_dev); 371 372 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 373 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 374 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 375 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 376 return (0); 377 378 return (-1); 379 } 380 #endif /* #ifdef QLNX_ENABLE_IWARP */ 381 382 /* 383 * Name: qlnx_pci_probe 384 * Function: Validate the PCI device to be a QLA80XX device 385 */ 386 static int 387 qlnx_pci_probe(device_t dev) 388 { 389 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d", 390 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); 391 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx"); 392 393 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) { 394 return (ENXIO); 395 } 396 397 switch (pci_get_device(dev)) { 398 399 #ifndef QLNX_VF 400 401 case QLOGIC_PCI_DEVICE_ID_1644: 402 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 403 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function", 404 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 405 QLNX_VERSION_BUILD); 406 device_set_desc_copy(dev, qlnx_dev_str); 407 408 break; 409 410 case QLOGIC_PCI_DEVICE_ID_1634: 411 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 412 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function", 413 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 414 QLNX_VERSION_BUILD); 415 device_set_desc_copy(dev, qlnx_dev_str); 416 417 break; 418 419 case QLOGIC_PCI_DEVICE_ID_1656: 420 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 421 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function", 422 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 423 QLNX_VERSION_BUILD); 424 device_set_desc_copy(dev, qlnx_dev_str); 425 426 break; 427 428 case QLOGIC_PCI_DEVICE_ID_1654: 429 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 430 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function", 431 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 432 QLNX_VERSION_BUILD); 433 device_set_desc_copy(dev, qlnx_dev_str); 434 435 break; 436 437 case QLOGIC_PCI_DEVICE_ID_8070: 438 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 439 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)" 440 " Adapter-Ethernet Function", 441 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 442 QLNX_VERSION_BUILD); 443 device_set_desc_copy(dev, qlnx_dev_str); 444 445 break; 446 447 #else 448 case QLOGIC_PCI_DEVICE_ID_8090: 449 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 450 "Qlogic SRIOV PCI CNA (AH) " 451 "Adapter-Ethernet Function", 452 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 453 QLNX_VERSION_BUILD); 454 device_set_desc_copy(dev, qlnx_dev_str); 455 456 break; 457 458 #endif /* #ifndef QLNX_VF */ 459 460 default: 461 return (ENXIO); 462 } 463 464 #ifdef QLNX_ENABLE_IWARP 465 qlnx_rdma_init(); 466 #endif /* #ifdef QLNX_ENABLE_IWARP */ 467 468 return (BUS_PROBE_DEFAULT); 469 } 470 471 static uint16_t 472 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 473 struct qlnx_tx_queue *txq) 474 { 475 u16 hw_bd_cons; 476 u16 ecore_cons_idx; 477 uint16_t diff; 478 479 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 480 481 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl); 482 if (hw_bd_cons < ecore_cons_idx) { 483 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 484 } else { 485 diff = hw_bd_cons - ecore_cons_idx; 486 } 487 return diff; 488 } 489 490 491 static void 492 qlnx_sp_intr(void *arg) 493 { 494 struct ecore_hwfn *p_hwfn; 495 qlnx_host_t *ha; 496 int i; 497 498 p_hwfn = arg; 499 500 if (p_hwfn == NULL) { 501 printf("%s: spurious slowpath intr\n", __func__); 502 return; 503 } 504 505 ha = (qlnx_host_t *)p_hwfn->p_dev; 506 507 QL_DPRINT2(ha, "enter\n"); 508 509 for (i = 0; i < ha->cdev.num_hwfns; i++) { 510 if (&ha->cdev.hwfns[i] == p_hwfn) { 511 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]); 512 break; 513 } 514 } 515 QL_DPRINT2(ha, "exit\n"); 516 517 return; 518 } 519 520 static void 521 qlnx_sp_taskqueue(void *context, int pending) 522 { 523 struct ecore_hwfn *p_hwfn; 524 525 p_hwfn = context; 526 527 if (p_hwfn != NULL) { 528 qlnx_sp_isr(p_hwfn); 529 } 530 return; 531 } 532 533 static int 534 qlnx_create_sp_taskqueues(qlnx_host_t *ha) 535 { 536 int i; 537 uint8_t tq_name[32]; 538 539 for (i = 0; i < ha->cdev.num_hwfns; i++) { 540 541 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 542 543 bzero(tq_name, sizeof (tq_name)); 544 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i); 545 546 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn); 547 548 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT, 549 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]); 550 551 if (ha->sp_taskqueue[i] == NULL) 552 return (-1); 553 554 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s", 555 tq_name); 556 557 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]); 558 } 559 560 return (0); 561 } 562 563 static void 564 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha) 565 { 566 int i; 567 568 for (i = 0; i < ha->cdev.num_hwfns; i++) { 569 if (ha->sp_taskqueue[i] != NULL) { 570 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]); 571 taskqueue_free(ha->sp_taskqueue[i]); 572 } 573 } 574 return; 575 } 576 577 static void 578 qlnx_fp_taskqueue(void *context, int pending) 579 { 580 struct qlnx_fastpath *fp; 581 qlnx_host_t *ha; 582 struct ifnet *ifp; 583 584 fp = context; 585 586 if (fp == NULL) 587 return; 588 589 ha = (qlnx_host_t *)fp->edev; 590 591 ifp = ha->ifp; 592 593 if(ifp->if_drv_flags & IFF_DRV_RUNNING) { 594 595 if (!drbr_empty(ifp, fp->tx_br)) { 596 597 if(mtx_trylock(&fp->tx_mtx)) { 598 599 #ifdef QLNX_TRACE_PERF_DATA 600 tx_pkts = fp->tx_pkts_transmitted; 601 tx_compl = fp->tx_pkts_completed; 602 #endif 603 604 qlnx_transmit_locked(ifp, fp, NULL); 605 606 #ifdef QLNX_TRACE_PERF_DATA 607 fp->tx_pkts_trans_fp += 608 (fp->tx_pkts_transmitted - tx_pkts); 609 fp->tx_pkts_compl_fp += 610 (fp->tx_pkts_completed - tx_compl); 611 #endif 612 mtx_unlock(&fp->tx_mtx); 613 } 614 } 615 } 616 617 QL_DPRINT2(ha, "exit \n"); 618 return; 619 } 620 621 static int 622 qlnx_create_fp_taskqueues(qlnx_host_t *ha) 623 { 624 int i; 625 uint8_t tq_name[32]; 626 struct qlnx_fastpath *fp; 627 628 for (i = 0; i < ha->num_rss; i++) { 629 630 fp = &ha->fp_array[i]; 631 632 bzero(tq_name, sizeof (tq_name)); 633 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 634 635 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp); 636 637 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 638 taskqueue_thread_enqueue, 639 &fp->fp_taskqueue); 640 641 if (fp->fp_taskqueue == NULL) 642 return (-1); 643 644 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 645 tq_name); 646 647 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue); 648 } 649 650 return (0); 651 } 652 653 static void 654 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha) 655 { 656 int i; 657 struct qlnx_fastpath *fp; 658 659 for (i = 0; i < ha->num_rss; i++) { 660 661 fp = &ha->fp_array[i]; 662 663 if (fp->fp_taskqueue != NULL) { 664 665 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 666 taskqueue_free(fp->fp_taskqueue); 667 fp->fp_taskqueue = NULL; 668 } 669 } 670 return; 671 } 672 673 static void 674 qlnx_drain_fp_taskqueues(qlnx_host_t *ha) 675 { 676 int i; 677 struct qlnx_fastpath *fp; 678 679 for (i = 0; i < ha->num_rss; i++) { 680 fp = &ha->fp_array[i]; 681 682 if (fp->fp_taskqueue != NULL) { 683 QLNX_UNLOCK(ha); 684 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 685 QLNX_LOCK(ha); 686 } 687 } 688 return; 689 } 690 691 static void 692 qlnx_get_params(qlnx_host_t *ha) 693 { 694 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) { 695 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n", 696 qlnxe_queue_count); 697 qlnxe_queue_count = 0; 698 } 699 return; 700 } 701 702 static void 703 qlnx_error_recovery_taskqueue(void *context, int pending) 704 { 705 qlnx_host_t *ha; 706 707 ha = context; 708 709 QL_DPRINT2(ha, "enter\n"); 710 711 QLNX_LOCK(ha); 712 qlnx_stop(ha); 713 QLNX_UNLOCK(ha); 714 715 #ifdef QLNX_ENABLE_IWARP 716 qlnx_rdma_dev_remove(ha); 717 #endif /* #ifdef QLNX_ENABLE_IWARP */ 718 719 qlnx_slowpath_stop(ha); 720 qlnx_slowpath_start(ha); 721 722 #ifdef QLNX_ENABLE_IWARP 723 qlnx_rdma_dev_add(ha); 724 #endif /* #ifdef QLNX_ENABLE_IWARP */ 725 726 qlnx_init(ha); 727 728 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 729 730 QL_DPRINT2(ha, "exit\n"); 731 732 return; 733 } 734 735 static int 736 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha) 737 { 738 uint8_t tq_name[32]; 739 740 bzero(tq_name, sizeof (tq_name)); 741 snprintf(tq_name, sizeof (tq_name), "ql_err_tq"); 742 743 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha); 744 745 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 746 taskqueue_thread_enqueue, &ha->err_taskqueue); 747 748 749 if (ha->err_taskqueue == NULL) 750 return (-1); 751 752 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name); 753 754 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue); 755 756 return (0); 757 } 758 759 static void 760 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha) 761 { 762 if (ha->err_taskqueue != NULL) { 763 taskqueue_drain(ha->err_taskqueue, &ha->err_task); 764 taskqueue_free(ha->err_taskqueue); 765 } 766 767 ha->err_taskqueue = NULL; 768 769 return; 770 } 771 772 /* 773 * Name: qlnx_pci_attach 774 * Function: attaches the device to the operating system 775 */ 776 static int 777 qlnx_pci_attach(device_t dev) 778 { 779 qlnx_host_t *ha = NULL; 780 uint32_t rsrc_len_reg = 0; 781 uint32_t rsrc_len_dbells = 0; 782 uint32_t rsrc_len_msix = 0; 783 int i; 784 uint32_t mfw_ver; 785 uint32_t num_sp_msix = 0; 786 uint32_t num_rdma_irqs = 0; 787 788 if ((ha = device_get_softc(dev)) == NULL) { 789 device_printf(dev, "cannot get softc\n"); 790 return (ENOMEM); 791 } 792 793 memset(ha, 0, sizeof (qlnx_host_t)); 794 795 ha->device_id = pci_get_device(dev); 796 797 if (qlnx_valid_device(ha) != 0) { 798 device_printf(dev, "device is not valid device\n"); 799 return (ENXIO); 800 } 801 ha->pci_func = pci_get_function(dev); 802 803 ha->pci_dev = dev; 804 805 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 806 807 ha->flags.lock_init = 1; 808 809 pci_enable_busmaster(dev); 810 811 /* 812 * map the PCI BARs 813 */ 814 815 ha->reg_rid = PCIR_BAR(0); 816 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 817 RF_ACTIVE); 818 819 if (ha->pci_reg == NULL) { 820 device_printf(dev, "unable to map BAR0\n"); 821 goto qlnx_pci_attach_err; 822 } 823 824 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 825 ha->reg_rid); 826 827 ha->dbells_rid = PCIR_BAR(2); 828 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, 829 SYS_RES_MEMORY, 830 ha->dbells_rid); 831 if (rsrc_len_dbells) { 832 833 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 834 &ha->dbells_rid, RF_ACTIVE); 835 836 if (ha->pci_dbells == NULL) { 837 device_printf(dev, "unable to map BAR1\n"); 838 goto qlnx_pci_attach_err; 839 } 840 ha->dbells_phys_addr = (uint64_t) 841 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid); 842 843 ha->dbells_size = rsrc_len_dbells; 844 } else { 845 if (qlnx_vf_device(ha) != 0) { 846 device_printf(dev, " BAR1 size is zero\n"); 847 goto qlnx_pci_attach_err; 848 } 849 } 850 851 ha->msix_rid = PCIR_BAR(4); 852 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 853 &ha->msix_rid, RF_ACTIVE); 854 855 if (ha->msix_bar == NULL) { 856 device_printf(dev, "unable to map BAR2\n"); 857 goto qlnx_pci_attach_err; 858 } 859 860 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 861 ha->msix_rid); 862 863 ha->dbg_level = 0x0000; 864 865 QL_DPRINT1(ha, "\n\t\t\t" 866 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 867 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 868 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 869 " msix_avail = 0x%x " 870 "\n\t\t\t[ncpus = %d]\n", 871 ha->pci_dev, ha->pci_reg, rsrc_len_reg, 872 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 873 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 874 mp_ncpus); 875 /* 876 * allocate dma tags 877 */ 878 879 if (qlnx_alloc_parent_dma_tag(ha)) 880 goto qlnx_pci_attach_err; 881 882 if (qlnx_alloc_tx_dma_tag(ha)) 883 goto qlnx_pci_attach_err; 884 885 if (qlnx_alloc_rx_dma_tag(ha)) 886 goto qlnx_pci_attach_err; 887 888 889 if (qlnx_init_hw(ha) != 0) 890 goto qlnx_pci_attach_err; 891 892 ha->flags.hw_init = 1; 893 894 qlnx_get_params(ha); 895 896 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) && 897 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) { 898 qlnxe_queue_count = QLNX_MAX_RSS; 899 } 900 901 /* 902 * Allocate MSI-x vectors 903 */ 904 if (qlnx_vf_device(ha) != 0) { 905 906 if (qlnxe_queue_count == 0) 907 ha->num_rss = QLNX_DEFAULT_RSS; 908 else 909 ha->num_rss = qlnxe_queue_count; 910 911 num_sp_msix = ha->cdev.num_hwfns; 912 } else { 913 uint8_t max_rxq; 914 uint8_t max_txq; 915 916 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq); 917 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq); 918 919 if (max_rxq < max_txq) 920 ha->num_rss = max_rxq; 921 else 922 ha->num_rss = max_txq; 923 924 if (ha->num_rss > QLNX_MAX_VF_RSS) 925 ha->num_rss = QLNX_MAX_VF_RSS; 926 927 num_sp_msix = 0; 928 } 929 930 if (ha->num_rss > mp_ncpus) 931 ha->num_rss = mp_ncpus; 932 933 ha->num_tc = QLNX_MAX_TC; 934 935 ha->msix_count = pci_msix_count(dev); 936 937 #ifdef QLNX_ENABLE_IWARP 938 939 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha); 940 941 #endif /* #ifdef QLNX_ENABLE_IWARP */ 942 943 if (!ha->msix_count || 944 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) { 945 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 946 ha->msix_count); 947 goto qlnx_pci_attach_err; 948 } 949 950 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs)) 951 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs; 952 else 953 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs); 954 955 QL_DPRINT1(ha, "\n\t\t\t" 956 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 957 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 958 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 959 " msix_avail = 0x%x msix_alloc = 0x%x" 960 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n", 961 ha->pci_reg, rsrc_len_reg, 962 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 963 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 964 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc); 965 966 if (pci_alloc_msix(dev, &ha->msix_count)) { 967 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__, 968 ha->msix_count); 969 ha->msix_count = 0; 970 goto qlnx_pci_attach_err; 971 } 972 973 /* 974 * Initialize slow path interrupt and task queue 975 */ 976 977 if (num_sp_msix) { 978 979 if (qlnx_create_sp_taskqueues(ha) != 0) 980 goto qlnx_pci_attach_err; 981 982 for (i = 0; i < ha->cdev.num_hwfns; i++) { 983 984 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 985 986 ha->sp_irq_rid[i] = i + 1; 987 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, 988 &ha->sp_irq_rid[i], 989 (RF_ACTIVE | RF_SHAREABLE)); 990 if (ha->sp_irq[i] == NULL) { 991 device_printf(dev, 992 "could not allocate mbx interrupt\n"); 993 goto qlnx_pci_attach_err; 994 } 995 996 if (bus_setup_intr(dev, ha->sp_irq[i], 997 (INTR_TYPE_NET | INTR_MPSAFE), NULL, 998 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) { 999 device_printf(dev, 1000 "could not setup slow path interrupt\n"); 1001 goto qlnx_pci_attach_err; 1002 } 1003 1004 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d" 1005 " sp_irq %p sp_handle %p\n", p_hwfn, 1006 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]); 1007 } 1008 } 1009 1010 /* 1011 * initialize fast path interrupt 1012 */ 1013 if (qlnx_create_fp_taskqueues(ha) != 0) 1014 goto qlnx_pci_attach_err; 1015 1016 for (i = 0; i < ha->num_rss; i++) { 1017 ha->irq_vec[i].rss_idx = i; 1018 ha->irq_vec[i].ha = ha; 1019 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i; 1020 1021 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1022 &ha->irq_vec[i].irq_rid, 1023 (RF_ACTIVE | RF_SHAREABLE)); 1024 1025 if (ha->irq_vec[i].irq == NULL) { 1026 device_printf(dev, 1027 "could not allocate interrupt[%d] irq_rid = %d\n", 1028 i, ha->irq_vec[i].irq_rid); 1029 goto qlnx_pci_attach_err; 1030 } 1031 1032 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) { 1033 device_printf(dev, "could not allocate tx_br[%d]\n", i); 1034 goto qlnx_pci_attach_err; 1035 1036 } 1037 } 1038 1039 1040 if (qlnx_vf_device(ha) != 0) { 1041 1042 callout_init(&ha->qlnx_callout, 1); 1043 ha->flags.callout_init = 1; 1044 1045 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1046 1047 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0) 1048 goto qlnx_pci_attach_err; 1049 if (ha->grcdump_size[i] == 0) 1050 goto qlnx_pci_attach_err; 1051 1052 ha->grcdump_size[i] = ha->grcdump_size[i] << 2; 1053 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n", 1054 i, ha->grcdump_size[i]); 1055 1056 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]); 1057 if (ha->grcdump[i] == NULL) { 1058 device_printf(dev, "grcdump alloc[%d] failed\n", i); 1059 goto qlnx_pci_attach_err; 1060 } 1061 1062 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0) 1063 goto qlnx_pci_attach_err; 1064 if (ha->idle_chk_size[i] == 0) 1065 goto qlnx_pci_attach_err; 1066 1067 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2; 1068 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n", 1069 i, ha->idle_chk_size[i]); 1070 1071 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]); 1072 1073 if (ha->idle_chk[i] == NULL) { 1074 device_printf(dev, "idle_chk alloc failed\n"); 1075 goto qlnx_pci_attach_err; 1076 } 1077 } 1078 1079 if (qlnx_create_error_recovery_taskqueue(ha) != 0) 1080 goto qlnx_pci_attach_err; 1081 } 1082 1083 if (qlnx_slowpath_start(ha) != 0) 1084 goto qlnx_pci_attach_err; 1085 else 1086 ha->flags.slowpath_start = 1; 1087 1088 if (qlnx_vf_device(ha) != 0) { 1089 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) { 1090 qlnx_mdelay(__func__, 1000); 1091 qlnx_trigger_dump(ha); 1092 1093 goto qlnx_pci_attach_err0; 1094 } 1095 1096 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) { 1097 qlnx_mdelay(__func__, 1000); 1098 qlnx_trigger_dump(ha); 1099 1100 goto qlnx_pci_attach_err0; 1101 } 1102 } else { 1103 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 1104 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL); 1105 } 1106 1107 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d", 1108 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF), 1109 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF)); 1110 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d", 1111 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, 1112 FW_ENGINEERING_VERSION); 1113 1114 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n", 1115 ha->stormfw_ver, ha->mfw_ver); 1116 1117 qlnx_init_ifnet(dev, ha); 1118 1119 /* 1120 * add sysctls 1121 */ 1122 qlnx_add_sysctls(ha); 1123 1124 qlnx_pci_attach_err0: 1125 /* 1126 * create ioctl device interface 1127 */ 1128 if (qlnx_vf_device(ha) != 0) { 1129 1130 if (qlnx_make_cdev(ha)) { 1131 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 1132 goto qlnx_pci_attach_err; 1133 } 1134 1135 #ifdef QLNX_ENABLE_IWARP 1136 qlnx_rdma_dev_add(ha); 1137 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1138 } 1139 1140 #ifndef QLNX_VF 1141 #ifdef CONFIG_ECORE_SRIOV 1142 1143 if (qlnx_vf_device(ha) != 0) 1144 qlnx_initialize_sriov(ha); 1145 1146 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1147 #endif /* #ifdef QLNX_VF */ 1148 1149 QL_DPRINT2(ha, "success\n"); 1150 1151 return (0); 1152 1153 qlnx_pci_attach_err: 1154 1155 qlnx_release(ha); 1156 1157 return (ENXIO); 1158 } 1159 1160 /* 1161 * Name: qlnx_pci_detach 1162 * Function: Unhooks the device from the operating system 1163 */ 1164 static int 1165 qlnx_pci_detach(device_t dev) 1166 { 1167 qlnx_host_t *ha = NULL; 1168 1169 if ((ha = device_get_softc(dev)) == NULL) { 1170 device_printf(dev, "%s: cannot get softc\n", __func__); 1171 return (ENOMEM); 1172 } 1173 1174 if (qlnx_vf_device(ha) != 0) { 1175 #ifdef CONFIG_ECORE_SRIOV 1176 int ret; 1177 1178 ret = pci_iov_detach(dev); 1179 if (ret) { 1180 device_printf(dev, "%s: SRIOV in use\n", __func__); 1181 return (ret); 1182 } 1183 1184 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1185 1186 #ifdef QLNX_ENABLE_IWARP 1187 if (qlnx_rdma_dev_remove(ha) != 0) 1188 return (EBUSY); 1189 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1190 } 1191 1192 QLNX_LOCK(ha); 1193 qlnx_stop(ha); 1194 QLNX_UNLOCK(ha); 1195 1196 qlnx_release(ha); 1197 1198 return (0); 1199 } 1200 1201 #ifdef QLNX_ENABLE_IWARP 1202 1203 static uint8_t 1204 qlnx_get_personality(uint8_t pci_func) 1205 { 1206 uint8_t personality; 1207 1208 personality = (qlnxe_rdma_configuration >> 1209 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) & 1210 QLNX_PERSONALIY_MASK; 1211 return (personality); 1212 } 1213 1214 static void 1215 qlnx_set_personality(qlnx_host_t *ha) 1216 { 1217 struct ecore_hwfn *p_hwfn; 1218 uint8_t personality; 1219 1220 p_hwfn = &ha->cdev.hwfns[0]; 1221 1222 personality = qlnx_get_personality(ha->pci_func); 1223 1224 switch (personality) { 1225 1226 case QLNX_PERSONALITY_DEFAULT: 1227 device_printf(ha->pci_dev, "%s: DEFAULT\n", 1228 __func__); 1229 ha->personality = ECORE_PCI_DEFAULT; 1230 break; 1231 1232 case QLNX_PERSONALITY_ETH_ONLY: 1233 device_printf(ha->pci_dev, "%s: ETH_ONLY\n", 1234 __func__); 1235 ha->personality = ECORE_PCI_ETH; 1236 break; 1237 1238 case QLNX_PERSONALITY_ETH_IWARP: 1239 device_printf(ha->pci_dev, "%s: ETH_IWARP\n", 1240 __func__); 1241 ha->personality = ECORE_PCI_ETH_IWARP; 1242 break; 1243 1244 case QLNX_PERSONALITY_ETH_ROCE: 1245 device_printf(ha->pci_dev, "%s: ETH_ROCE\n", 1246 __func__); 1247 ha->personality = ECORE_PCI_ETH_ROCE; 1248 break; 1249 } 1250 1251 return; 1252 } 1253 1254 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1255 1256 static int 1257 qlnx_init_hw(qlnx_host_t *ha) 1258 { 1259 int rval = 0; 1260 struct ecore_hw_prepare_params params; 1261 1262 ecore_init_struct(&ha->cdev); 1263 1264 /* ha->dp_module = ECORE_MSG_PROBE | 1265 ECORE_MSG_INTR | 1266 ECORE_MSG_SP | 1267 ECORE_MSG_LINK | 1268 ECORE_MSG_SPQ | 1269 ECORE_MSG_RDMA; 1270 ha->dp_level = ECORE_LEVEL_VERBOSE;*/ 1271 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2; 1272 ha->dp_level = ECORE_LEVEL_NOTICE; 1273 //ha->dp_level = ECORE_LEVEL_VERBOSE; 1274 1275 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev); 1276 1277 ha->cdev.regview = ha->pci_reg; 1278 1279 ha->personality = ECORE_PCI_DEFAULT; 1280 1281 if (qlnx_vf_device(ha) == 0) { 1282 ha->cdev.b_is_vf = true; 1283 1284 if (ha->pci_dbells != NULL) { 1285 ha->cdev.doorbells = ha->pci_dbells; 1286 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1287 ha->cdev.db_size = ha->dbells_size; 1288 } else { 1289 ha->pci_dbells = ha->pci_reg; 1290 } 1291 } else { 1292 ha->cdev.doorbells = ha->pci_dbells; 1293 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1294 ha->cdev.db_size = ha->dbells_size; 1295 1296 #ifdef QLNX_ENABLE_IWARP 1297 1298 if (qlnx_rdma_supported(ha) == 0) 1299 qlnx_set_personality(ha); 1300 1301 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1302 1303 } 1304 QL_DPRINT2(ha, "%s: %s\n", __func__, 1305 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet")); 1306 1307 bzero(¶ms, sizeof (struct ecore_hw_prepare_params)); 1308 1309 params.personality = ha->personality; 1310 1311 params.drv_resc_alloc = false; 1312 params.chk_reg_fifo = false; 1313 params.initiate_pf_flr = true; 1314 params.epoch = 0; 1315 1316 ecore_hw_prepare(&ha->cdev, ¶ms); 1317 1318 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str); 1319 1320 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n", 1321 ha, &ha->cdev, &ha->cdev.hwfns[0]); 1322 1323 return (rval); 1324 } 1325 1326 static void 1327 qlnx_release(qlnx_host_t *ha) 1328 { 1329 device_t dev; 1330 int i; 1331 1332 dev = ha->pci_dev; 1333 1334 QL_DPRINT2(ha, "enter\n"); 1335 1336 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) { 1337 if (ha->idle_chk[i] != NULL) { 1338 free(ha->idle_chk[i], M_QLNXBUF); 1339 ha->idle_chk[i] = NULL; 1340 } 1341 1342 if (ha->grcdump[i] != NULL) { 1343 free(ha->grcdump[i], M_QLNXBUF); 1344 ha->grcdump[i] = NULL; 1345 } 1346 } 1347 1348 if (ha->flags.callout_init) 1349 callout_drain(&ha->qlnx_callout); 1350 1351 if (ha->flags.slowpath_start) { 1352 qlnx_slowpath_stop(ha); 1353 } 1354 1355 if (ha->flags.hw_init) 1356 ecore_hw_remove(&ha->cdev); 1357 1358 qlnx_del_cdev(ha); 1359 1360 if (ha->ifp != NULL) 1361 ether_ifdetach(ha->ifp); 1362 1363 qlnx_free_tx_dma_tag(ha); 1364 1365 qlnx_free_rx_dma_tag(ha); 1366 1367 qlnx_free_parent_dma_tag(ha); 1368 1369 if (qlnx_vf_device(ha) != 0) { 1370 qlnx_destroy_error_recovery_taskqueue(ha); 1371 } 1372 1373 for (i = 0; i < ha->num_rss; i++) { 1374 struct qlnx_fastpath *fp = &ha->fp_array[i]; 1375 1376 if (ha->irq_vec[i].handle) { 1377 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 1378 ha->irq_vec[i].handle); 1379 } 1380 1381 if (ha->irq_vec[i].irq) { 1382 (void)bus_release_resource(dev, SYS_RES_IRQ, 1383 ha->irq_vec[i].irq_rid, 1384 ha->irq_vec[i].irq); 1385 } 1386 1387 qlnx_free_tx_br(ha, fp); 1388 } 1389 qlnx_destroy_fp_taskqueues(ha); 1390 1391 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1392 if (ha->sp_handle[i]) 1393 (void)bus_teardown_intr(dev, ha->sp_irq[i], 1394 ha->sp_handle[i]); 1395 1396 if (ha->sp_irq[i]) 1397 (void) bus_release_resource(dev, SYS_RES_IRQ, 1398 ha->sp_irq_rid[i], ha->sp_irq[i]); 1399 } 1400 1401 qlnx_destroy_sp_taskqueues(ha); 1402 1403 if (ha->msix_count) 1404 pci_release_msi(dev); 1405 1406 if (ha->flags.lock_init) { 1407 mtx_destroy(&ha->hw_lock); 1408 } 1409 1410 if (ha->pci_reg) 1411 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 1412 ha->pci_reg); 1413 1414 if (ha->dbells_size && ha->pci_dbells) 1415 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid, 1416 ha->pci_dbells); 1417 1418 if (ha->msix_bar) 1419 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid, 1420 ha->msix_bar); 1421 1422 QL_DPRINT2(ha, "exit\n"); 1423 return; 1424 } 1425 1426 static void 1427 qlnx_trigger_dump(qlnx_host_t *ha) 1428 { 1429 int i; 1430 1431 if (ha->ifp != NULL) 1432 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 1433 1434 QL_DPRINT2(ha, "enter\n"); 1435 1436 if (qlnx_vf_device(ha) == 0) 1437 return; 1438 1439 ha->error_recovery = 1; 1440 1441 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1442 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i); 1443 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i); 1444 } 1445 1446 QL_DPRINT2(ha, "exit\n"); 1447 1448 return; 1449 } 1450 1451 static int 1452 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS) 1453 { 1454 int err, ret = 0; 1455 qlnx_host_t *ha; 1456 1457 err = sysctl_handle_int(oidp, &ret, 0, req); 1458 1459 if (err || !req->newptr) 1460 return (err); 1461 1462 if (ret == 1) { 1463 ha = (qlnx_host_t *)arg1; 1464 qlnx_trigger_dump(ha); 1465 } 1466 return (err); 1467 } 1468 1469 static int 1470 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS) 1471 { 1472 int err, i, ret = 0, usecs = 0; 1473 qlnx_host_t *ha; 1474 struct ecore_hwfn *p_hwfn; 1475 struct qlnx_fastpath *fp; 1476 1477 err = sysctl_handle_int(oidp, &usecs, 0, req); 1478 1479 if (err || !req->newptr || !usecs || (usecs > 255)) 1480 return (err); 1481 1482 ha = (qlnx_host_t *)arg1; 1483 1484 if (qlnx_vf_device(ha) == 0) 1485 return (-1); 1486 1487 for (i = 0; i < ha->num_rss; i++) { 1488 1489 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1490 1491 fp = &ha->fp_array[i]; 1492 1493 if (fp->txq[0]->handle != NULL) { 1494 ret = ecore_set_queue_coalesce(p_hwfn, 0, 1495 (uint16_t)usecs, fp->txq[0]->handle); 1496 } 1497 } 1498 1499 if (!ret) 1500 ha->tx_coalesce_usecs = (uint8_t)usecs; 1501 1502 return (err); 1503 } 1504 1505 static int 1506 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS) 1507 { 1508 int err, i, ret = 0, usecs = 0; 1509 qlnx_host_t *ha; 1510 struct ecore_hwfn *p_hwfn; 1511 struct qlnx_fastpath *fp; 1512 1513 err = sysctl_handle_int(oidp, &usecs, 0, req); 1514 1515 if (err || !req->newptr || !usecs || (usecs > 255)) 1516 return (err); 1517 1518 ha = (qlnx_host_t *)arg1; 1519 1520 if (qlnx_vf_device(ha) == 0) 1521 return (-1); 1522 1523 for (i = 0; i < ha->num_rss; i++) { 1524 1525 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1526 1527 fp = &ha->fp_array[i]; 1528 1529 if (fp->rxq->handle != NULL) { 1530 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs, 1531 0, fp->rxq->handle); 1532 } 1533 } 1534 1535 if (!ret) 1536 ha->rx_coalesce_usecs = (uint8_t)usecs; 1537 1538 return (err); 1539 } 1540 1541 static void 1542 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha) 1543 { 1544 struct sysctl_ctx_list *ctx; 1545 struct sysctl_oid_list *children; 1546 struct sysctl_oid *ctx_oid; 1547 1548 ctx = device_get_sysctl_ctx(ha->pci_dev); 1549 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1550 1551 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat", 1552 CTLFLAG_RD, NULL, "spstat"); 1553 children = SYSCTL_CHILDREN(ctx_oid); 1554 1555 SYSCTL_ADD_QUAD(ctx, children, 1556 OID_AUTO, "sp_interrupts", 1557 CTLFLAG_RD, &ha->sp_interrupts, 1558 "No. of slowpath interrupts"); 1559 1560 return; 1561 } 1562 1563 static void 1564 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha) 1565 { 1566 struct sysctl_ctx_list *ctx; 1567 struct sysctl_oid_list *children; 1568 struct sysctl_oid_list *node_children; 1569 struct sysctl_oid *ctx_oid; 1570 int i, j; 1571 uint8_t name_str[16]; 1572 1573 ctx = device_get_sysctl_ctx(ha->pci_dev); 1574 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1575 1576 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat", 1577 CTLFLAG_RD, NULL, "fpstat"); 1578 children = SYSCTL_CHILDREN(ctx_oid); 1579 1580 for (i = 0; i < ha->num_rss; i++) { 1581 1582 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1583 snprintf(name_str, sizeof(name_str), "%d", i); 1584 1585 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 1586 CTLFLAG_RD, NULL, name_str); 1587 node_children = SYSCTL_CHILDREN(ctx_oid); 1588 1589 /* Tx Related */ 1590 1591 SYSCTL_ADD_QUAD(ctx, node_children, 1592 OID_AUTO, "tx_pkts_processed", 1593 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed, 1594 "No. of packets processed for transmission"); 1595 1596 SYSCTL_ADD_QUAD(ctx, node_children, 1597 OID_AUTO, "tx_pkts_freed", 1598 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed, 1599 "No. of freed packets"); 1600 1601 SYSCTL_ADD_QUAD(ctx, node_children, 1602 OID_AUTO, "tx_pkts_transmitted", 1603 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted, 1604 "No. of transmitted packets"); 1605 1606 SYSCTL_ADD_QUAD(ctx, node_children, 1607 OID_AUTO, "tx_pkts_completed", 1608 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed, 1609 "No. of transmit completions"); 1610 1611 SYSCTL_ADD_QUAD(ctx, node_children, 1612 OID_AUTO, "tx_non_tso_pkts", 1613 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts, 1614 "No. of non LSO transmited packets"); 1615 1616 #ifdef QLNX_TRACE_PERF_DATA 1617 1618 SYSCTL_ADD_QUAD(ctx, node_children, 1619 OID_AUTO, "tx_pkts_trans_ctx", 1620 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx, 1621 "No. of transmitted packets in transmit context"); 1622 1623 SYSCTL_ADD_QUAD(ctx, node_children, 1624 OID_AUTO, "tx_pkts_compl_ctx", 1625 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx, 1626 "No. of transmit completions in transmit context"); 1627 1628 SYSCTL_ADD_QUAD(ctx, node_children, 1629 OID_AUTO, "tx_pkts_trans_fp", 1630 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp, 1631 "No. of transmitted packets in taskqueue"); 1632 1633 SYSCTL_ADD_QUAD(ctx, node_children, 1634 OID_AUTO, "tx_pkts_compl_fp", 1635 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp, 1636 "No. of transmit completions in taskqueue"); 1637 1638 SYSCTL_ADD_QUAD(ctx, node_children, 1639 OID_AUTO, "tx_pkts_compl_intr", 1640 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr, 1641 "No. of transmit completions in interrupt ctx"); 1642 #endif 1643 1644 SYSCTL_ADD_QUAD(ctx, node_children, 1645 OID_AUTO, "tx_tso_pkts", 1646 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts, 1647 "No. of LSO transmited packets"); 1648 1649 SYSCTL_ADD_QUAD(ctx, node_children, 1650 OID_AUTO, "tx_lso_wnd_min_len", 1651 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len, 1652 "tx_lso_wnd_min_len"); 1653 1654 SYSCTL_ADD_QUAD(ctx, node_children, 1655 OID_AUTO, "tx_defrag", 1656 CTLFLAG_RD, &ha->fp_array[i].tx_defrag, 1657 "tx_defrag"); 1658 1659 SYSCTL_ADD_QUAD(ctx, node_children, 1660 OID_AUTO, "tx_nsegs_gt_elem_left", 1661 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left, 1662 "tx_nsegs_gt_elem_left"); 1663 1664 SYSCTL_ADD_UINT(ctx, node_children, 1665 OID_AUTO, "tx_tso_max_nsegs", 1666 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs, 1667 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs"); 1668 1669 SYSCTL_ADD_UINT(ctx, node_children, 1670 OID_AUTO, "tx_tso_min_nsegs", 1671 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs, 1672 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs"); 1673 1674 SYSCTL_ADD_UINT(ctx, node_children, 1675 OID_AUTO, "tx_tso_max_pkt_len", 1676 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len, 1677 ha->fp_array[i].tx_tso_max_pkt_len, 1678 "tx_tso_max_pkt_len"); 1679 1680 SYSCTL_ADD_UINT(ctx, node_children, 1681 OID_AUTO, "tx_tso_min_pkt_len", 1682 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len, 1683 ha->fp_array[i].tx_tso_min_pkt_len, 1684 "tx_tso_min_pkt_len"); 1685 1686 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) { 1687 1688 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1689 snprintf(name_str, sizeof(name_str), 1690 "tx_pkts_nseg_%02d", (j+1)); 1691 1692 SYSCTL_ADD_QUAD(ctx, node_children, 1693 OID_AUTO, name_str, CTLFLAG_RD, 1694 &ha->fp_array[i].tx_pkts[j], name_str); 1695 } 1696 1697 #ifdef QLNX_TRACE_PERF_DATA 1698 for (j = 0; j < 18; j++) { 1699 1700 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1701 snprintf(name_str, sizeof(name_str), 1702 "tx_pkts_hist_%02d", (j+1)); 1703 1704 SYSCTL_ADD_QUAD(ctx, node_children, 1705 OID_AUTO, name_str, CTLFLAG_RD, 1706 &ha->fp_array[i].tx_pkts_hist[j], name_str); 1707 } 1708 for (j = 0; j < 5; j++) { 1709 1710 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1711 snprintf(name_str, sizeof(name_str), 1712 "tx_comInt_%02d", (j+1)); 1713 1714 SYSCTL_ADD_QUAD(ctx, node_children, 1715 OID_AUTO, name_str, CTLFLAG_RD, 1716 &ha->fp_array[i].tx_comInt[j], name_str); 1717 } 1718 for (j = 0; j < 18; j++) { 1719 1720 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1721 snprintf(name_str, sizeof(name_str), 1722 "tx_pkts_q_%02d", (j+1)); 1723 1724 SYSCTL_ADD_QUAD(ctx, node_children, 1725 OID_AUTO, name_str, CTLFLAG_RD, 1726 &ha->fp_array[i].tx_pkts_q[j], name_str); 1727 } 1728 #endif 1729 1730 SYSCTL_ADD_QUAD(ctx, node_children, 1731 OID_AUTO, "err_tx_nsegs_gt_elem_left", 1732 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left, 1733 "err_tx_nsegs_gt_elem_left"); 1734 1735 SYSCTL_ADD_QUAD(ctx, node_children, 1736 OID_AUTO, "err_tx_dmamap_create", 1737 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create, 1738 "err_tx_dmamap_create"); 1739 1740 SYSCTL_ADD_QUAD(ctx, node_children, 1741 OID_AUTO, "err_tx_defrag_dmamap_load", 1742 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load, 1743 "err_tx_defrag_dmamap_load"); 1744 1745 SYSCTL_ADD_QUAD(ctx, node_children, 1746 OID_AUTO, "err_tx_non_tso_max_seg", 1747 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg, 1748 "err_tx_non_tso_max_seg"); 1749 1750 SYSCTL_ADD_QUAD(ctx, node_children, 1751 OID_AUTO, "err_tx_dmamap_load", 1752 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load, 1753 "err_tx_dmamap_load"); 1754 1755 SYSCTL_ADD_QUAD(ctx, node_children, 1756 OID_AUTO, "err_tx_defrag", 1757 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag, 1758 "err_tx_defrag"); 1759 1760 SYSCTL_ADD_QUAD(ctx, node_children, 1761 OID_AUTO, "err_tx_free_pkt_null", 1762 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null, 1763 "err_tx_free_pkt_null"); 1764 1765 SYSCTL_ADD_QUAD(ctx, node_children, 1766 OID_AUTO, "err_tx_cons_idx_conflict", 1767 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict, 1768 "err_tx_cons_idx_conflict"); 1769 1770 SYSCTL_ADD_QUAD(ctx, node_children, 1771 OID_AUTO, "lro_cnt_64", 1772 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64, 1773 "lro_cnt_64"); 1774 1775 SYSCTL_ADD_QUAD(ctx, node_children, 1776 OID_AUTO, "lro_cnt_128", 1777 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128, 1778 "lro_cnt_128"); 1779 1780 SYSCTL_ADD_QUAD(ctx, node_children, 1781 OID_AUTO, "lro_cnt_256", 1782 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256, 1783 "lro_cnt_256"); 1784 1785 SYSCTL_ADD_QUAD(ctx, node_children, 1786 OID_AUTO, "lro_cnt_512", 1787 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512, 1788 "lro_cnt_512"); 1789 1790 SYSCTL_ADD_QUAD(ctx, node_children, 1791 OID_AUTO, "lro_cnt_1024", 1792 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024, 1793 "lro_cnt_1024"); 1794 1795 /* Rx Related */ 1796 1797 SYSCTL_ADD_QUAD(ctx, node_children, 1798 OID_AUTO, "rx_pkts", 1799 CTLFLAG_RD, &ha->fp_array[i].rx_pkts, 1800 "No. of received packets"); 1801 1802 SYSCTL_ADD_QUAD(ctx, node_children, 1803 OID_AUTO, "tpa_start", 1804 CTLFLAG_RD, &ha->fp_array[i].tpa_start, 1805 "No. of tpa_start packets"); 1806 1807 SYSCTL_ADD_QUAD(ctx, node_children, 1808 OID_AUTO, "tpa_cont", 1809 CTLFLAG_RD, &ha->fp_array[i].tpa_cont, 1810 "No. of tpa_cont packets"); 1811 1812 SYSCTL_ADD_QUAD(ctx, node_children, 1813 OID_AUTO, "tpa_end", 1814 CTLFLAG_RD, &ha->fp_array[i].tpa_end, 1815 "No. of tpa_end packets"); 1816 1817 SYSCTL_ADD_QUAD(ctx, node_children, 1818 OID_AUTO, "err_m_getcl", 1819 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl, 1820 "err_m_getcl"); 1821 1822 SYSCTL_ADD_QUAD(ctx, node_children, 1823 OID_AUTO, "err_m_getjcl", 1824 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl, 1825 "err_m_getjcl"); 1826 1827 SYSCTL_ADD_QUAD(ctx, node_children, 1828 OID_AUTO, "err_rx_hw_errors", 1829 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors, 1830 "err_rx_hw_errors"); 1831 1832 SYSCTL_ADD_QUAD(ctx, node_children, 1833 OID_AUTO, "err_rx_alloc_errors", 1834 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors, 1835 "err_rx_alloc_errors"); 1836 } 1837 1838 return; 1839 } 1840 1841 static void 1842 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha) 1843 { 1844 struct sysctl_ctx_list *ctx; 1845 struct sysctl_oid_list *children; 1846 struct sysctl_oid *ctx_oid; 1847 1848 ctx = device_get_sysctl_ctx(ha->pci_dev); 1849 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1850 1851 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat", 1852 CTLFLAG_RD, NULL, "hwstat"); 1853 children = SYSCTL_CHILDREN(ctx_oid); 1854 1855 SYSCTL_ADD_QUAD(ctx, children, 1856 OID_AUTO, "no_buff_discards", 1857 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards, 1858 "No. of packets discarded due to lack of buffer"); 1859 1860 SYSCTL_ADD_QUAD(ctx, children, 1861 OID_AUTO, "packet_too_big_discard", 1862 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard, 1863 "No. of packets discarded because packet was too big"); 1864 1865 SYSCTL_ADD_QUAD(ctx, children, 1866 OID_AUTO, "ttl0_discard", 1867 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard, 1868 "ttl0_discard"); 1869 1870 SYSCTL_ADD_QUAD(ctx, children, 1871 OID_AUTO, "rx_ucast_bytes", 1872 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes, 1873 "rx_ucast_bytes"); 1874 1875 SYSCTL_ADD_QUAD(ctx, children, 1876 OID_AUTO, "rx_mcast_bytes", 1877 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes, 1878 "rx_mcast_bytes"); 1879 1880 SYSCTL_ADD_QUAD(ctx, children, 1881 OID_AUTO, "rx_bcast_bytes", 1882 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes, 1883 "rx_bcast_bytes"); 1884 1885 SYSCTL_ADD_QUAD(ctx, children, 1886 OID_AUTO, "rx_ucast_pkts", 1887 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts, 1888 "rx_ucast_pkts"); 1889 1890 SYSCTL_ADD_QUAD(ctx, children, 1891 OID_AUTO, "rx_mcast_pkts", 1892 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts, 1893 "rx_mcast_pkts"); 1894 1895 SYSCTL_ADD_QUAD(ctx, children, 1896 OID_AUTO, "rx_bcast_pkts", 1897 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts, 1898 "rx_bcast_pkts"); 1899 1900 SYSCTL_ADD_QUAD(ctx, children, 1901 OID_AUTO, "mftag_filter_discards", 1902 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards, 1903 "mftag_filter_discards"); 1904 1905 SYSCTL_ADD_QUAD(ctx, children, 1906 OID_AUTO, "mac_filter_discards", 1907 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards, 1908 "mac_filter_discards"); 1909 1910 SYSCTL_ADD_QUAD(ctx, children, 1911 OID_AUTO, "tx_ucast_bytes", 1912 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes, 1913 "tx_ucast_bytes"); 1914 1915 SYSCTL_ADD_QUAD(ctx, children, 1916 OID_AUTO, "tx_mcast_bytes", 1917 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes, 1918 "tx_mcast_bytes"); 1919 1920 SYSCTL_ADD_QUAD(ctx, children, 1921 OID_AUTO, "tx_bcast_bytes", 1922 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes, 1923 "tx_bcast_bytes"); 1924 1925 SYSCTL_ADD_QUAD(ctx, children, 1926 OID_AUTO, "tx_ucast_pkts", 1927 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts, 1928 "tx_ucast_pkts"); 1929 1930 SYSCTL_ADD_QUAD(ctx, children, 1931 OID_AUTO, "tx_mcast_pkts", 1932 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts, 1933 "tx_mcast_pkts"); 1934 1935 SYSCTL_ADD_QUAD(ctx, children, 1936 OID_AUTO, "tx_bcast_pkts", 1937 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts, 1938 "tx_bcast_pkts"); 1939 1940 SYSCTL_ADD_QUAD(ctx, children, 1941 OID_AUTO, "tx_err_drop_pkts", 1942 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts, 1943 "tx_err_drop_pkts"); 1944 1945 SYSCTL_ADD_QUAD(ctx, children, 1946 OID_AUTO, "tpa_coalesced_pkts", 1947 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts, 1948 "tpa_coalesced_pkts"); 1949 1950 SYSCTL_ADD_QUAD(ctx, children, 1951 OID_AUTO, "tpa_coalesced_events", 1952 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events, 1953 "tpa_coalesced_events"); 1954 1955 SYSCTL_ADD_QUAD(ctx, children, 1956 OID_AUTO, "tpa_aborts_num", 1957 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num, 1958 "tpa_aborts_num"); 1959 1960 SYSCTL_ADD_QUAD(ctx, children, 1961 OID_AUTO, "tpa_not_coalesced_pkts", 1962 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts, 1963 "tpa_not_coalesced_pkts"); 1964 1965 SYSCTL_ADD_QUAD(ctx, children, 1966 OID_AUTO, "tpa_coalesced_bytes", 1967 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes, 1968 "tpa_coalesced_bytes"); 1969 1970 SYSCTL_ADD_QUAD(ctx, children, 1971 OID_AUTO, "rx_64_byte_packets", 1972 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets, 1973 "rx_64_byte_packets"); 1974 1975 SYSCTL_ADD_QUAD(ctx, children, 1976 OID_AUTO, "rx_65_to_127_byte_packets", 1977 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets, 1978 "rx_65_to_127_byte_packets"); 1979 1980 SYSCTL_ADD_QUAD(ctx, children, 1981 OID_AUTO, "rx_128_to_255_byte_packets", 1982 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets, 1983 "rx_128_to_255_byte_packets"); 1984 1985 SYSCTL_ADD_QUAD(ctx, children, 1986 OID_AUTO, "rx_256_to_511_byte_packets", 1987 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets, 1988 "rx_256_to_511_byte_packets"); 1989 1990 SYSCTL_ADD_QUAD(ctx, children, 1991 OID_AUTO, "rx_512_to_1023_byte_packets", 1992 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets, 1993 "rx_512_to_1023_byte_packets"); 1994 1995 SYSCTL_ADD_QUAD(ctx, children, 1996 OID_AUTO, "rx_1024_to_1518_byte_packets", 1997 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets, 1998 "rx_1024_to_1518_byte_packets"); 1999 2000 SYSCTL_ADD_QUAD(ctx, children, 2001 OID_AUTO, "rx_1519_to_1522_byte_packets", 2002 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets, 2003 "rx_1519_to_1522_byte_packets"); 2004 2005 SYSCTL_ADD_QUAD(ctx, children, 2006 OID_AUTO, "rx_1523_to_2047_byte_packets", 2007 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets, 2008 "rx_1523_to_2047_byte_packets"); 2009 2010 SYSCTL_ADD_QUAD(ctx, children, 2011 OID_AUTO, "rx_2048_to_4095_byte_packets", 2012 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets, 2013 "rx_2048_to_4095_byte_packets"); 2014 2015 SYSCTL_ADD_QUAD(ctx, children, 2016 OID_AUTO, "rx_4096_to_9216_byte_packets", 2017 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets, 2018 "rx_4096_to_9216_byte_packets"); 2019 2020 SYSCTL_ADD_QUAD(ctx, children, 2021 OID_AUTO, "rx_9217_to_16383_byte_packets", 2022 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets, 2023 "rx_9217_to_16383_byte_packets"); 2024 2025 SYSCTL_ADD_QUAD(ctx, children, 2026 OID_AUTO, "rx_crc_errors", 2027 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors, 2028 "rx_crc_errors"); 2029 2030 SYSCTL_ADD_QUAD(ctx, children, 2031 OID_AUTO, "rx_mac_crtl_frames", 2032 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames, 2033 "rx_mac_crtl_frames"); 2034 2035 SYSCTL_ADD_QUAD(ctx, children, 2036 OID_AUTO, "rx_pause_frames", 2037 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames, 2038 "rx_pause_frames"); 2039 2040 SYSCTL_ADD_QUAD(ctx, children, 2041 OID_AUTO, "rx_pfc_frames", 2042 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames, 2043 "rx_pfc_frames"); 2044 2045 SYSCTL_ADD_QUAD(ctx, children, 2046 OID_AUTO, "rx_align_errors", 2047 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors, 2048 "rx_align_errors"); 2049 2050 SYSCTL_ADD_QUAD(ctx, children, 2051 OID_AUTO, "rx_carrier_errors", 2052 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors, 2053 "rx_carrier_errors"); 2054 2055 SYSCTL_ADD_QUAD(ctx, children, 2056 OID_AUTO, "rx_oversize_packets", 2057 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets, 2058 "rx_oversize_packets"); 2059 2060 SYSCTL_ADD_QUAD(ctx, children, 2061 OID_AUTO, "rx_jabbers", 2062 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers, 2063 "rx_jabbers"); 2064 2065 SYSCTL_ADD_QUAD(ctx, children, 2066 OID_AUTO, "rx_undersize_packets", 2067 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets, 2068 "rx_undersize_packets"); 2069 2070 SYSCTL_ADD_QUAD(ctx, children, 2071 OID_AUTO, "rx_fragments", 2072 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments, 2073 "rx_fragments"); 2074 2075 SYSCTL_ADD_QUAD(ctx, children, 2076 OID_AUTO, "tx_64_byte_packets", 2077 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets, 2078 "tx_64_byte_packets"); 2079 2080 SYSCTL_ADD_QUAD(ctx, children, 2081 OID_AUTO, "tx_65_to_127_byte_packets", 2082 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets, 2083 "tx_65_to_127_byte_packets"); 2084 2085 SYSCTL_ADD_QUAD(ctx, children, 2086 OID_AUTO, "tx_128_to_255_byte_packets", 2087 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets, 2088 "tx_128_to_255_byte_packets"); 2089 2090 SYSCTL_ADD_QUAD(ctx, children, 2091 OID_AUTO, "tx_256_to_511_byte_packets", 2092 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets, 2093 "tx_256_to_511_byte_packets"); 2094 2095 SYSCTL_ADD_QUAD(ctx, children, 2096 OID_AUTO, "tx_512_to_1023_byte_packets", 2097 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets, 2098 "tx_512_to_1023_byte_packets"); 2099 2100 SYSCTL_ADD_QUAD(ctx, children, 2101 OID_AUTO, "tx_1024_to_1518_byte_packets", 2102 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets, 2103 "tx_1024_to_1518_byte_packets"); 2104 2105 SYSCTL_ADD_QUAD(ctx, children, 2106 OID_AUTO, "tx_1519_to_2047_byte_packets", 2107 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets, 2108 "tx_1519_to_2047_byte_packets"); 2109 2110 SYSCTL_ADD_QUAD(ctx, children, 2111 OID_AUTO, "tx_2048_to_4095_byte_packets", 2112 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets, 2113 "tx_2048_to_4095_byte_packets"); 2114 2115 SYSCTL_ADD_QUAD(ctx, children, 2116 OID_AUTO, "tx_4096_to_9216_byte_packets", 2117 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets, 2118 "tx_4096_to_9216_byte_packets"); 2119 2120 SYSCTL_ADD_QUAD(ctx, children, 2121 OID_AUTO, "tx_9217_to_16383_byte_packets", 2122 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets, 2123 "tx_9217_to_16383_byte_packets"); 2124 2125 SYSCTL_ADD_QUAD(ctx, children, 2126 OID_AUTO, "tx_pause_frames", 2127 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames, 2128 "tx_pause_frames"); 2129 2130 SYSCTL_ADD_QUAD(ctx, children, 2131 OID_AUTO, "tx_pfc_frames", 2132 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames, 2133 "tx_pfc_frames"); 2134 2135 SYSCTL_ADD_QUAD(ctx, children, 2136 OID_AUTO, "tx_lpi_entry_count", 2137 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count, 2138 "tx_lpi_entry_count"); 2139 2140 SYSCTL_ADD_QUAD(ctx, children, 2141 OID_AUTO, "tx_total_collisions", 2142 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions, 2143 "tx_total_collisions"); 2144 2145 SYSCTL_ADD_QUAD(ctx, children, 2146 OID_AUTO, "brb_truncates", 2147 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates, 2148 "brb_truncates"); 2149 2150 SYSCTL_ADD_QUAD(ctx, children, 2151 OID_AUTO, "brb_discards", 2152 CTLFLAG_RD, &ha->hw_stats.common.brb_discards, 2153 "brb_discards"); 2154 2155 SYSCTL_ADD_QUAD(ctx, children, 2156 OID_AUTO, "rx_mac_bytes", 2157 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes, 2158 "rx_mac_bytes"); 2159 2160 SYSCTL_ADD_QUAD(ctx, children, 2161 OID_AUTO, "rx_mac_uc_packets", 2162 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets, 2163 "rx_mac_uc_packets"); 2164 2165 SYSCTL_ADD_QUAD(ctx, children, 2166 OID_AUTO, "rx_mac_mc_packets", 2167 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets, 2168 "rx_mac_mc_packets"); 2169 2170 SYSCTL_ADD_QUAD(ctx, children, 2171 OID_AUTO, "rx_mac_bc_packets", 2172 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets, 2173 "rx_mac_bc_packets"); 2174 2175 SYSCTL_ADD_QUAD(ctx, children, 2176 OID_AUTO, "rx_mac_frames_ok", 2177 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok, 2178 "rx_mac_frames_ok"); 2179 2180 SYSCTL_ADD_QUAD(ctx, children, 2181 OID_AUTO, "tx_mac_bytes", 2182 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes, 2183 "tx_mac_bytes"); 2184 2185 SYSCTL_ADD_QUAD(ctx, children, 2186 OID_AUTO, "tx_mac_uc_packets", 2187 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets, 2188 "tx_mac_uc_packets"); 2189 2190 SYSCTL_ADD_QUAD(ctx, children, 2191 OID_AUTO, "tx_mac_mc_packets", 2192 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets, 2193 "tx_mac_mc_packets"); 2194 2195 SYSCTL_ADD_QUAD(ctx, children, 2196 OID_AUTO, "tx_mac_bc_packets", 2197 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets, 2198 "tx_mac_bc_packets"); 2199 2200 SYSCTL_ADD_QUAD(ctx, children, 2201 OID_AUTO, "tx_mac_ctrl_frames", 2202 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames, 2203 "tx_mac_ctrl_frames"); 2204 return; 2205 } 2206 2207 static void 2208 qlnx_add_sysctls(qlnx_host_t *ha) 2209 { 2210 device_t dev = ha->pci_dev; 2211 struct sysctl_ctx_list *ctx; 2212 struct sysctl_oid_list *children; 2213 2214 ctx = device_get_sysctl_ctx(dev); 2215 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2216 2217 qlnx_add_fp_stats_sysctls(ha); 2218 qlnx_add_sp_stats_sysctls(ha); 2219 2220 if (qlnx_vf_device(ha) != 0) 2221 qlnx_add_hw_stats_sysctls(ha); 2222 2223 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version", 2224 CTLFLAG_RD, qlnx_ver_str, 0, 2225 "Driver Version"); 2226 2227 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version", 2228 CTLFLAG_RD, ha->stormfw_ver, 0, 2229 "STORM Firmware Version"); 2230 2231 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version", 2232 CTLFLAG_RD, ha->mfw_ver, 0, 2233 "Management Firmware Version"); 2234 2235 SYSCTL_ADD_UINT(ctx, children, 2236 OID_AUTO, "personality", CTLFLAG_RD, 2237 &ha->personality, ha->personality, 2238 "\tpersonality = 0 => Ethernet Only\n" 2239 "\tpersonality = 3 => Ethernet and RoCE\n" 2240 "\tpersonality = 4 => Ethernet and iWARP\n" 2241 "\tpersonality = 6 => Default in Shared Memory\n"); 2242 2243 ha->dbg_level = 0; 2244 SYSCTL_ADD_UINT(ctx, children, 2245 OID_AUTO, "debug", CTLFLAG_RW, 2246 &ha->dbg_level, ha->dbg_level, "Debug Level"); 2247 2248 ha->dp_level = 0x01; 2249 SYSCTL_ADD_UINT(ctx, children, 2250 OID_AUTO, "dp_level", CTLFLAG_RW, 2251 &ha->dp_level, ha->dp_level, "DP Level"); 2252 2253 ha->dbg_trace_lro_cnt = 0; 2254 SYSCTL_ADD_UINT(ctx, children, 2255 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW, 2256 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt, 2257 "Trace LRO Counts"); 2258 2259 ha->dbg_trace_tso_pkt_len = 0; 2260 SYSCTL_ADD_UINT(ctx, children, 2261 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW, 2262 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len, 2263 "Trace TSO packet lengths"); 2264 2265 ha->dp_module = 0; 2266 SYSCTL_ADD_UINT(ctx, children, 2267 OID_AUTO, "dp_module", CTLFLAG_RW, 2268 &ha->dp_module, ha->dp_module, "DP Module"); 2269 2270 ha->err_inject = 0; 2271 2272 SYSCTL_ADD_UINT(ctx, children, 2273 OID_AUTO, "err_inject", CTLFLAG_RW, 2274 &ha->err_inject, ha->err_inject, "Error Inject"); 2275 2276 ha->storm_stats_enable = 0; 2277 2278 SYSCTL_ADD_UINT(ctx, children, 2279 OID_AUTO, "storm_stats_enable", CTLFLAG_RW, 2280 &ha->storm_stats_enable, ha->storm_stats_enable, 2281 "Enable Storm Statistics Gathering"); 2282 2283 ha->storm_stats_index = 0; 2284 2285 SYSCTL_ADD_UINT(ctx, children, 2286 OID_AUTO, "storm_stats_index", CTLFLAG_RD, 2287 &ha->storm_stats_index, ha->storm_stats_index, 2288 "Enable Storm Statistics Gathering Current Index"); 2289 2290 ha->grcdump_taken = 0; 2291 SYSCTL_ADD_UINT(ctx, children, 2292 OID_AUTO, "grcdump_taken", CTLFLAG_RD, 2293 &ha->grcdump_taken, ha->grcdump_taken, 2294 "grcdump_taken"); 2295 2296 ha->idle_chk_taken = 0; 2297 SYSCTL_ADD_UINT(ctx, children, 2298 OID_AUTO, "idle_chk_taken", CTLFLAG_RD, 2299 &ha->idle_chk_taken, ha->idle_chk_taken, 2300 "idle_chk_taken"); 2301 2302 SYSCTL_ADD_UINT(ctx, children, 2303 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD, 2304 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs, 2305 "rx_coalesce_usecs"); 2306 2307 SYSCTL_ADD_UINT(ctx, children, 2308 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD, 2309 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs, 2310 "tx_coalesce_usecs"); 2311 2312 SYSCTL_ADD_PROC(ctx, children, 2313 OID_AUTO, "trigger_dump", (CTLTYPE_INT | CTLFLAG_RW), 2314 (void *)ha, 0, 2315 qlnx_trigger_dump_sysctl, "I", "trigger_dump"); 2316 2317 SYSCTL_ADD_PROC(ctx, children, 2318 OID_AUTO, "set_rx_coalesce_usecs", 2319 (CTLTYPE_INT | CTLFLAG_RW), 2320 (void *)ha, 0, 2321 qlnx_set_rx_coalesce, "I", 2322 "rx interrupt coalesce period microseconds"); 2323 2324 SYSCTL_ADD_PROC(ctx, children, 2325 OID_AUTO, "set_tx_coalesce_usecs", 2326 (CTLTYPE_INT | CTLFLAG_RW), 2327 (void *)ha, 0, 2328 qlnx_set_tx_coalesce, "I", 2329 "tx interrupt coalesce period microseconds"); 2330 2331 ha->rx_pkt_threshold = 128; 2332 SYSCTL_ADD_UINT(ctx, children, 2333 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW, 2334 &ha->rx_pkt_threshold, ha->rx_pkt_threshold, 2335 "No. of Rx Pkts to process at a time"); 2336 2337 ha->rx_jumbo_buf_eq_mtu = 0; 2338 SYSCTL_ADD_UINT(ctx, children, 2339 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW, 2340 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu, 2341 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n" 2342 "otherwise Rx Jumbo buffers are set to >= MTU size\n"); 2343 2344 SYSCTL_ADD_QUAD(ctx, children, 2345 OID_AUTO, "err_illegal_intr", CTLFLAG_RD, 2346 &ha->err_illegal_intr, "err_illegal_intr"); 2347 2348 SYSCTL_ADD_QUAD(ctx, children, 2349 OID_AUTO, "err_fp_null", CTLFLAG_RD, 2350 &ha->err_fp_null, "err_fp_null"); 2351 2352 SYSCTL_ADD_QUAD(ctx, children, 2353 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD, 2354 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type"); 2355 return; 2356 } 2357 2358 2359 2360 /***************************************************************************** 2361 * Operating System Network Interface Functions 2362 *****************************************************************************/ 2363 2364 static void 2365 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) 2366 { 2367 uint16_t device_id; 2368 struct ifnet *ifp; 2369 2370 ifp = ha->ifp = if_alloc(IFT_ETHER); 2371 2372 if (ifp == NULL) 2373 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 2374 2375 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2376 2377 device_id = pci_get_device(ha->pci_dev); 2378 2379 #if __FreeBSD_version >= 1000000 2380 2381 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) 2382 ifp->if_baudrate = IF_Gbps(40); 2383 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2384 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 2385 ifp->if_baudrate = IF_Gbps(25); 2386 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) 2387 ifp->if_baudrate = IF_Gbps(50); 2388 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) 2389 ifp->if_baudrate = IF_Gbps(100); 2390 2391 ifp->if_capabilities = IFCAP_LINKSTATE; 2392 #else 2393 ifp->if_mtu = ETHERMTU; 2394 ifp->if_baudrate = (1 * 1000 * 1000 *1000); 2395 2396 #endif /* #if __FreeBSD_version >= 1000000 */ 2397 2398 ifp->if_init = qlnx_init; 2399 ifp->if_softc = ha; 2400 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2401 ifp->if_ioctl = qlnx_ioctl; 2402 ifp->if_transmit = qlnx_transmit; 2403 ifp->if_qflush = qlnx_qflush; 2404 2405 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha)); 2406 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha); 2407 IFQ_SET_READY(&ifp->if_snd); 2408 2409 #if __FreeBSD_version >= 1100036 2410 if_setgetcounterfn(ifp, qlnx_get_counter); 2411 #endif 2412 2413 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2414 2415 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN); 2416 2417 if (!ha->primary_mac[0] && !ha->primary_mac[1] && 2418 !ha->primary_mac[2] && !ha->primary_mac[3] && 2419 !ha->primary_mac[4] && !ha->primary_mac[5]) { 2420 uint32_t rnd; 2421 2422 rnd = arc4random(); 2423 2424 ha->primary_mac[0] = 0x00; 2425 ha->primary_mac[1] = 0x0e; 2426 ha->primary_mac[2] = 0x1e; 2427 ha->primary_mac[3] = rnd & 0xFF; 2428 ha->primary_mac[4] = (rnd >> 8) & 0xFF; 2429 ha->primary_mac[5] = (rnd >> 16) & 0xFF; 2430 } 2431 2432 ether_ifattach(ifp, ha->primary_mac); 2433 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN); 2434 2435 ifp->if_capabilities = IFCAP_HWCSUM; 2436 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 2437 2438 ifp->if_capabilities |= IFCAP_VLAN_MTU; 2439 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 2440 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 2441 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 2442 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 2443 ifp->if_capabilities |= IFCAP_TSO4; 2444 ifp->if_capabilities |= IFCAP_TSO6; 2445 ifp->if_capabilities |= IFCAP_LRO; 2446 2447 ifp->if_hw_tsomax = QLNX_MAX_TSO_FRAME_SIZE - 2448 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 2449 ifp->if_hw_tsomaxsegcount = QLNX_MAX_SEGMENTS - 1 /* hdr */; 2450 ifp->if_hw_tsomaxsegsize = QLNX_MAX_TX_MBUF_SIZE; 2451 2452 2453 ifp->if_capenable = ifp->if_capabilities; 2454 2455 ifp->if_hwassist = CSUM_IP; 2456 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP; 2457 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; 2458 ifp->if_hwassist |= CSUM_TSO; 2459 2460 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 2461 2462 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\ 2463 qlnx_media_status); 2464 2465 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) { 2466 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL); 2467 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL); 2468 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL); 2469 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2470 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) { 2471 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL); 2472 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL); 2473 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) { 2474 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL); 2475 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL); 2476 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) { 2477 ifmedia_add(&ha->media, 2478 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL); 2479 ifmedia_add(&ha->media, 2480 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL); 2481 ifmedia_add(&ha->media, 2482 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL); 2483 } 2484 2485 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL); 2486 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 2487 2488 2489 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 2490 2491 QL_DPRINT2(ha, "exit\n"); 2492 2493 return; 2494 } 2495 2496 static void 2497 qlnx_init_locked(qlnx_host_t *ha) 2498 { 2499 struct ifnet *ifp = ha->ifp; 2500 2501 QL_DPRINT1(ha, "Driver Initialization start \n"); 2502 2503 qlnx_stop(ha); 2504 2505 if (qlnx_load(ha) == 0) { 2506 2507 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2508 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2509 2510 #ifdef QLNX_ENABLE_IWARP 2511 if (qlnx_vf_device(ha) != 0) { 2512 qlnx_rdma_dev_open(ha); 2513 } 2514 #endif /* #ifdef QLNX_ENABLE_IWARP */ 2515 } 2516 2517 return; 2518 } 2519 2520 static void 2521 qlnx_init(void *arg) 2522 { 2523 qlnx_host_t *ha; 2524 2525 ha = (qlnx_host_t *)arg; 2526 2527 QL_DPRINT2(ha, "enter\n"); 2528 2529 QLNX_LOCK(ha); 2530 qlnx_init_locked(ha); 2531 QLNX_UNLOCK(ha); 2532 2533 QL_DPRINT2(ha, "exit\n"); 2534 2535 return; 2536 } 2537 2538 static int 2539 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac) 2540 { 2541 struct ecore_filter_mcast *mcast; 2542 struct ecore_dev *cdev; 2543 int rc; 2544 2545 cdev = &ha->cdev; 2546 2547 mcast = &ha->ecore_mcast; 2548 bzero(mcast, sizeof(struct ecore_filter_mcast)); 2549 2550 if (add_mac) 2551 mcast->opcode = ECORE_FILTER_ADD; 2552 else 2553 mcast->opcode = ECORE_FILTER_REMOVE; 2554 2555 mcast->num_mc_addrs = 1; 2556 memcpy(mcast->mac, mac_addr, ETH_ALEN); 2557 2558 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 2559 2560 return (rc); 2561 } 2562 2563 static int 2564 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta) 2565 { 2566 int i; 2567 2568 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2569 2570 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) 2571 return 0; /* its been already added */ 2572 } 2573 2574 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2575 2576 if ((ha->mcast[i].addr[0] == 0) && 2577 (ha->mcast[i].addr[1] == 0) && 2578 (ha->mcast[i].addr[2] == 0) && 2579 (ha->mcast[i].addr[3] == 0) && 2580 (ha->mcast[i].addr[4] == 0) && 2581 (ha->mcast[i].addr[5] == 0)) { 2582 2583 if (qlnx_config_mcast_mac_addr(ha, mta, 1)) 2584 return (-1); 2585 2586 bcopy(mta, ha->mcast[i].addr, ETH_ALEN); 2587 ha->nmcast++; 2588 2589 return 0; 2590 } 2591 } 2592 return 0; 2593 } 2594 2595 static int 2596 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta) 2597 { 2598 int i; 2599 2600 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2601 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { 2602 2603 if (qlnx_config_mcast_mac_addr(ha, mta, 0)) 2604 return (-1); 2605 2606 ha->mcast[i].addr[0] = 0; 2607 ha->mcast[i].addr[1] = 0; 2608 ha->mcast[i].addr[2] = 0; 2609 ha->mcast[i].addr[3] = 0; 2610 ha->mcast[i].addr[4] = 0; 2611 ha->mcast[i].addr[5] = 0; 2612 2613 ha->nmcast--; 2614 2615 return 0; 2616 } 2617 } 2618 return 0; 2619 } 2620 2621 /* 2622 * Name: qls_hw_set_multi 2623 * Function: Sets the Multicast Addresses provided the host O.S into the 2624 * hardware (for the given interface) 2625 */ 2626 static void 2627 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 2628 uint32_t add_mac) 2629 { 2630 int i; 2631 2632 for (i = 0; i < mcnt; i++) { 2633 if (add_mac) { 2634 if (qlnx_hw_add_mcast(ha, mta)) 2635 break; 2636 } else { 2637 if (qlnx_hw_del_mcast(ha, mta)) 2638 break; 2639 } 2640 2641 mta += ETHER_HDR_LEN; 2642 } 2643 return; 2644 } 2645 2646 2647 #define QLNX_MCAST_ADDRS_SIZE (QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN) 2648 static int 2649 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi) 2650 { 2651 uint8_t mta[QLNX_MCAST_ADDRS_SIZE]; 2652 struct ifmultiaddr *ifma; 2653 int mcnt = 0; 2654 struct ifnet *ifp = ha->ifp; 2655 int ret = 0; 2656 2657 if (qlnx_vf_device(ha) == 0) 2658 return (0); 2659 2660 if_maddr_rlock(ifp); 2661 2662 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2663 2664 if (ifma->ifma_addr->sa_family != AF_LINK) 2665 continue; 2666 2667 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS) 2668 break; 2669 2670 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 2671 &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN); 2672 2673 mcnt++; 2674 } 2675 2676 if_maddr_runlock(ifp); 2677 2678 QLNX_LOCK(ha); 2679 qlnx_hw_set_multi(ha, mta, mcnt, add_multi); 2680 QLNX_UNLOCK(ha); 2681 2682 return (ret); 2683 } 2684 2685 static int 2686 qlnx_set_promisc(qlnx_host_t *ha) 2687 { 2688 int rc = 0; 2689 uint8_t filter; 2690 2691 if (qlnx_vf_device(ha) == 0) 2692 return (0); 2693 2694 filter = ha->filter; 2695 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2696 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 2697 2698 rc = qlnx_set_rx_accept_filter(ha, filter); 2699 return (rc); 2700 } 2701 2702 static int 2703 qlnx_set_allmulti(qlnx_host_t *ha) 2704 { 2705 int rc = 0; 2706 uint8_t filter; 2707 2708 if (qlnx_vf_device(ha) == 0) 2709 return (0); 2710 2711 filter = ha->filter; 2712 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2713 rc = qlnx_set_rx_accept_filter(ha, filter); 2714 2715 return (rc); 2716 } 2717 2718 2719 static int 2720 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2721 { 2722 int ret = 0, mask; 2723 struct ifreq *ifr = (struct ifreq *)data; 2724 struct ifaddr *ifa = (struct ifaddr *)data; 2725 qlnx_host_t *ha; 2726 2727 ha = (qlnx_host_t *)ifp->if_softc; 2728 2729 switch (cmd) { 2730 case SIOCSIFADDR: 2731 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd); 2732 2733 if (ifa->ifa_addr->sa_family == AF_INET) { 2734 ifp->if_flags |= IFF_UP; 2735 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2736 QLNX_LOCK(ha); 2737 qlnx_init_locked(ha); 2738 QLNX_UNLOCK(ha); 2739 } 2740 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 2741 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)); 2742 2743 arp_ifinit(ifp, ifa); 2744 } else { 2745 ether_ioctl(ifp, cmd, data); 2746 } 2747 break; 2748 2749 case SIOCSIFMTU: 2750 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd); 2751 2752 if (ifr->ifr_mtu > QLNX_MAX_MTU) { 2753 ret = EINVAL; 2754 } else { 2755 QLNX_LOCK(ha); 2756 ifp->if_mtu = ifr->ifr_mtu; 2757 ha->max_frame_size = 2758 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2759 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2760 qlnx_init_locked(ha); 2761 } 2762 2763 QLNX_UNLOCK(ha); 2764 } 2765 2766 break; 2767 2768 case SIOCSIFFLAGS: 2769 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd); 2770 2771 QLNX_LOCK(ha); 2772 2773 if (ifp->if_flags & IFF_UP) { 2774 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2775 if ((ifp->if_flags ^ ha->if_flags) & 2776 IFF_PROMISC) { 2777 ret = qlnx_set_promisc(ha); 2778 } else if ((ifp->if_flags ^ ha->if_flags) & 2779 IFF_ALLMULTI) { 2780 ret = qlnx_set_allmulti(ha); 2781 } 2782 } else { 2783 ha->max_frame_size = ifp->if_mtu + 2784 ETHER_HDR_LEN + ETHER_CRC_LEN; 2785 qlnx_init_locked(ha); 2786 } 2787 } else { 2788 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2789 qlnx_stop(ha); 2790 ha->if_flags = ifp->if_flags; 2791 } 2792 2793 QLNX_UNLOCK(ha); 2794 break; 2795 2796 case SIOCADDMULTI: 2797 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd); 2798 2799 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2800 if (qlnx_set_multi(ha, 1)) 2801 ret = EINVAL; 2802 } 2803 break; 2804 2805 case SIOCDELMULTI: 2806 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd); 2807 2808 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2809 if (qlnx_set_multi(ha, 0)) 2810 ret = EINVAL; 2811 } 2812 break; 2813 2814 case SIOCSIFMEDIA: 2815 case SIOCGIFMEDIA: 2816 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd); 2817 2818 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 2819 break; 2820 2821 case SIOCSIFCAP: 2822 2823 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2824 2825 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd); 2826 2827 if (mask & IFCAP_HWCSUM) 2828 ifp->if_capenable ^= IFCAP_HWCSUM; 2829 if (mask & IFCAP_TSO4) 2830 ifp->if_capenable ^= IFCAP_TSO4; 2831 if (mask & IFCAP_TSO6) 2832 ifp->if_capenable ^= IFCAP_TSO6; 2833 if (mask & IFCAP_VLAN_HWTAGGING) 2834 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2835 if (mask & IFCAP_VLAN_HWTSO) 2836 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2837 if (mask & IFCAP_LRO) 2838 ifp->if_capenable ^= IFCAP_LRO; 2839 2840 QLNX_LOCK(ha); 2841 2842 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2843 qlnx_init_locked(ha); 2844 2845 QLNX_UNLOCK(ha); 2846 2847 VLAN_CAPABILITIES(ifp); 2848 break; 2849 2850 #if (__FreeBSD_version >= 1100101) 2851 2852 case SIOCGI2C: 2853 { 2854 struct ifi2creq i2c; 2855 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 2856 struct ecore_ptt *p_ptt; 2857 2858 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 2859 2860 if (ret) 2861 break; 2862 2863 if ((i2c.len > sizeof (i2c.data)) || 2864 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) { 2865 ret = EINVAL; 2866 break; 2867 } 2868 2869 p_ptt = ecore_ptt_acquire(p_hwfn); 2870 2871 if (!p_ptt) { 2872 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 2873 ret = -1; 2874 break; 2875 } 2876 2877 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt, 2878 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset, 2879 i2c.len, &i2c.data[0]); 2880 2881 ecore_ptt_release(p_hwfn, p_ptt); 2882 2883 if (ret) { 2884 ret = -1; 2885 break; 2886 } 2887 2888 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 2889 2890 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \ 2891 len = %d addr = 0x%02x offset = 0x%04x \ 2892 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \ 2893 0x%02x 0x%02x 0x%02x\n", 2894 ret, i2c.len, i2c.dev_addr, i2c.offset, 2895 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3], 2896 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]); 2897 break; 2898 } 2899 #endif /* #if (__FreeBSD_version >= 1100101) */ 2900 2901 default: 2902 QL_DPRINT4(ha, "default (0x%lx)\n", cmd); 2903 ret = ether_ioctl(ifp, cmd, data); 2904 break; 2905 } 2906 2907 return (ret); 2908 } 2909 2910 static int 2911 qlnx_media_change(struct ifnet *ifp) 2912 { 2913 qlnx_host_t *ha; 2914 struct ifmedia *ifm; 2915 int ret = 0; 2916 2917 ha = (qlnx_host_t *)ifp->if_softc; 2918 2919 QL_DPRINT2(ha, "enter\n"); 2920 2921 ifm = &ha->media; 2922 2923 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2924 ret = EINVAL; 2925 2926 QL_DPRINT2(ha, "exit\n"); 2927 2928 return (ret); 2929 } 2930 2931 static void 2932 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2933 { 2934 qlnx_host_t *ha; 2935 2936 ha = (qlnx_host_t *)ifp->if_softc; 2937 2938 QL_DPRINT2(ha, "enter\n"); 2939 2940 ifmr->ifm_status = IFM_AVALID; 2941 ifmr->ifm_active = IFM_ETHER; 2942 2943 if (ha->link_up) { 2944 ifmr->ifm_status |= IFM_ACTIVE; 2945 ifmr->ifm_active |= 2946 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link)); 2947 2948 if (ha->if_link.link_partner_caps & 2949 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause)) 2950 ifmr->ifm_active |= 2951 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 2952 } 2953 2954 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down")); 2955 2956 return; 2957 } 2958 2959 2960 static void 2961 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2962 struct qlnx_tx_queue *txq) 2963 { 2964 u16 idx; 2965 struct mbuf *mp; 2966 bus_dmamap_t map; 2967 int i; 2968 struct eth_tx_bd *tx_data_bd; 2969 struct eth_tx_1st_bd *first_bd; 2970 int nbds = 0; 2971 2972 idx = txq->sw_tx_cons; 2973 mp = txq->sw_tx_ring[idx].mp; 2974 map = txq->sw_tx_ring[idx].map; 2975 2976 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){ 2977 2978 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL); 2979 2980 QL_DPRINT1(ha, "(mp == NULL) " 2981 " tx_idx = 0x%x" 2982 " ecore_prod_idx = 0x%x" 2983 " ecore_cons_idx = 0x%x" 2984 " hw_bd_cons = 0x%x" 2985 " txq_db_last = 0x%x" 2986 " elem_left = 0x%x\n", 2987 fp->rss_id, 2988 ecore_chain_get_prod_idx(&txq->tx_pbl), 2989 ecore_chain_get_cons_idx(&txq->tx_pbl), 2990 le16toh(*txq->hw_cons_ptr), 2991 txq->tx_db.raw, 2992 ecore_chain_get_elem_left(&txq->tx_pbl)); 2993 2994 fp->err_tx_free_pkt_null++; 2995 2996 //DEBUG 2997 qlnx_trigger_dump(ha); 2998 2999 return; 3000 } else { 3001 3002 QLNX_INC_OPACKETS((ha->ifp)); 3003 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len)); 3004 3005 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE); 3006 bus_dmamap_unload(ha->tx_tag, map); 3007 3008 fp->tx_pkts_freed++; 3009 fp->tx_pkts_completed++; 3010 3011 m_freem(mp); 3012 } 3013 3014 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl); 3015 nbds = first_bd->data.nbds; 3016 3017 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0); 3018 3019 for (i = 1; i < nbds; i++) { 3020 tx_data_bd = ecore_chain_consume(&txq->tx_pbl); 3021 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0); 3022 } 3023 txq->sw_tx_ring[idx].flags = 0; 3024 txq->sw_tx_ring[idx].mp = NULL; 3025 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0; 3026 3027 return; 3028 } 3029 3030 static void 3031 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3032 struct qlnx_tx_queue *txq) 3033 { 3034 u16 hw_bd_cons; 3035 u16 ecore_cons_idx; 3036 uint16_t diff; 3037 uint16_t idx, idx2; 3038 3039 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 3040 3041 while (hw_bd_cons != 3042 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 3043 3044 if (hw_bd_cons < ecore_cons_idx) { 3045 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 3046 } else { 3047 diff = hw_bd_cons - ecore_cons_idx; 3048 } 3049 if ((diff > TX_RING_SIZE) || 3050 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){ 3051 3052 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF); 3053 3054 QL_DPRINT1(ha, "(diff = 0x%x) " 3055 " tx_idx = 0x%x" 3056 " ecore_prod_idx = 0x%x" 3057 " ecore_cons_idx = 0x%x" 3058 " hw_bd_cons = 0x%x" 3059 " txq_db_last = 0x%x" 3060 " elem_left = 0x%x\n", 3061 diff, 3062 fp->rss_id, 3063 ecore_chain_get_prod_idx(&txq->tx_pbl), 3064 ecore_chain_get_cons_idx(&txq->tx_pbl), 3065 le16toh(*txq->hw_cons_ptr), 3066 txq->tx_db.raw, 3067 ecore_chain_get_elem_left(&txq->tx_pbl)); 3068 3069 fp->err_tx_cons_idx_conflict++; 3070 3071 //DEBUG 3072 qlnx_trigger_dump(ha); 3073 } 3074 3075 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 3076 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1); 3077 prefetch(txq->sw_tx_ring[idx].mp); 3078 prefetch(txq->sw_tx_ring[idx2].mp); 3079 3080 qlnx_free_tx_pkt(ha, fp, txq); 3081 3082 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 3083 } 3084 return; 3085 } 3086 3087 static int 3088 qlnx_transmit_locked(struct ifnet *ifp,struct qlnx_fastpath *fp, struct mbuf *mp) 3089 { 3090 int ret = 0; 3091 struct qlnx_tx_queue *txq; 3092 qlnx_host_t * ha; 3093 uint16_t elem_left; 3094 3095 txq = fp->txq[0]; 3096 ha = (qlnx_host_t *)fp->edev; 3097 3098 3099 if ((!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || (!ha->link_up)) { 3100 if(mp != NULL) 3101 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3102 return (ret); 3103 } 3104 3105 if(mp != NULL) 3106 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3107 3108 mp = drbr_peek(ifp, fp->tx_br); 3109 3110 while (mp != NULL) { 3111 3112 if (qlnx_send(ha, fp, &mp)) { 3113 3114 if (mp != NULL) { 3115 drbr_putback(ifp, fp->tx_br, mp); 3116 } else { 3117 fp->tx_pkts_processed++; 3118 drbr_advance(ifp, fp->tx_br); 3119 } 3120 goto qlnx_transmit_locked_exit; 3121 3122 } else { 3123 drbr_advance(ifp, fp->tx_br); 3124 fp->tx_pkts_transmitted++; 3125 fp->tx_pkts_processed++; 3126 } 3127 3128 mp = drbr_peek(ifp, fp->tx_br); 3129 } 3130 3131 qlnx_transmit_locked_exit: 3132 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) || 3133 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) 3134 < QLNX_TX_ELEM_MAX_THRESH)) 3135 (void)qlnx_tx_int(ha, fp, fp->txq[0]); 3136 3137 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret); 3138 return ret; 3139 } 3140 3141 3142 static int 3143 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp) 3144 { 3145 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc; 3146 struct qlnx_fastpath *fp; 3147 int rss_id = 0, ret = 0; 3148 3149 #ifdef QLNX_TRACEPERF_DATA 3150 uint64_t tx_pkts = 0, tx_compl = 0; 3151 #endif 3152 3153 QL_DPRINT2(ha, "enter\n"); 3154 3155 #if __FreeBSD_version >= 1100000 3156 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 3157 #else 3158 if (mp->m_flags & M_FLOWID) 3159 #endif 3160 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) % 3161 ha->num_rss; 3162 3163 fp = &ha->fp_array[rss_id]; 3164 3165 if (fp->tx_br == NULL) { 3166 ret = EINVAL; 3167 goto qlnx_transmit_exit; 3168 } 3169 3170 if (mtx_trylock(&fp->tx_mtx)) { 3171 3172 #ifdef QLNX_TRACEPERF_DATA 3173 tx_pkts = fp->tx_pkts_transmitted; 3174 tx_compl = fp->tx_pkts_completed; 3175 #endif 3176 3177 ret = qlnx_transmit_locked(ifp, fp, mp); 3178 3179 #ifdef QLNX_TRACEPERF_DATA 3180 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts); 3181 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl); 3182 #endif 3183 mtx_unlock(&fp->tx_mtx); 3184 } else { 3185 if (mp != NULL && (fp->fp_taskqueue != NULL)) { 3186 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3187 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 3188 } 3189 } 3190 3191 qlnx_transmit_exit: 3192 3193 QL_DPRINT2(ha, "exit ret = %d\n", ret); 3194 return ret; 3195 } 3196 3197 static void 3198 qlnx_qflush(struct ifnet *ifp) 3199 { 3200 int rss_id; 3201 struct qlnx_fastpath *fp; 3202 struct mbuf *mp; 3203 qlnx_host_t *ha; 3204 3205 ha = (qlnx_host_t *)ifp->if_softc; 3206 3207 QL_DPRINT2(ha, "enter\n"); 3208 3209 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 3210 3211 fp = &ha->fp_array[rss_id]; 3212 3213 if (fp == NULL) 3214 continue; 3215 3216 if (fp->tx_br) { 3217 mtx_lock(&fp->tx_mtx); 3218 3219 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 3220 fp->tx_pkts_freed++; 3221 m_freem(mp); 3222 } 3223 mtx_unlock(&fp->tx_mtx); 3224 } 3225 } 3226 QL_DPRINT2(ha, "exit\n"); 3227 3228 return; 3229 } 3230 3231 static void 3232 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value) 3233 { 3234 struct ecore_dev *cdev; 3235 uint32_t offset; 3236 3237 cdev = &ha->cdev; 3238 3239 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells); 3240 3241 bus_write_4(ha->pci_dbells, offset, value); 3242 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ); 3243 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ); 3244 3245 return; 3246 } 3247 3248 static uint32_t 3249 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp) 3250 { 3251 struct ether_vlan_header *eh = NULL; 3252 struct ip *ip = NULL; 3253 struct ip6_hdr *ip6 = NULL; 3254 struct tcphdr *th = NULL; 3255 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0; 3256 uint16_t etype = 0; 3257 device_t dev; 3258 uint8_t buf[sizeof(struct ip6_hdr)]; 3259 3260 dev = ha->pci_dev; 3261 3262 eh = mtod(mp, struct ether_vlan_header *); 3263 3264 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 3265 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3266 etype = ntohs(eh->evl_proto); 3267 } else { 3268 ehdrlen = ETHER_HDR_LEN; 3269 etype = ntohs(eh->evl_encap_proto); 3270 } 3271 3272 switch (etype) { 3273 3274 case ETHERTYPE_IP: 3275 ip = (struct ip *)(mp->m_data + ehdrlen); 3276 3277 ip_hlen = sizeof (struct ip); 3278 3279 if (mp->m_len < (ehdrlen + ip_hlen)) { 3280 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 3281 ip = (struct ip *)buf; 3282 } 3283 3284 th = (struct tcphdr *)(ip + 1); 3285 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3286 break; 3287 3288 case ETHERTYPE_IPV6: 3289 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 3290 3291 ip_hlen = sizeof(struct ip6_hdr); 3292 3293 if (mp->m_len < (ehdrlen + ip_hlen)) { 3294 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 3295 buf); 3296 ip6 = (struct ip6_hdr *)buf; 3297 } 3298 th = (struct tcphdr *)(ip6 + 1); 3299 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3300 break; 3301 3302 default: 3303 break; 3304 } 3305 3306 return (offset); 3307 } 3308 3309 static __inline int 3310 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs, 3311 uint32_t offset) 3312 { 3313 int i; 3314 uint32_t sum, nbds_in_hdr = 1; 3315 uint32_t window; 3316 bus_dma_segment_t *s_seg; 3317 3318 /* If the header spans mulitple segments, skip those segments */ 3319 3320 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM) 3321 return (0); 3322 3323 i = 0; 3324 3325 while ((i < nsegs) && (offset >= segs->ds_len)) { 3326 offset = offset - segs->ds_len; 3327 segs++; 3328 i++; 3329 nbds_in_hdr++; 3330 } 3331 3332 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr; 3333 3334 nsegs = nsegs - i; 3335 3336 while (nsegs >= window) { 3337 3338 sum = 0; 3339 s_seg = segs; 3340 3341 for (i = 0; i < window; i++){ 3342 sum += s_seg->ds_len; 3343 s_seg++; 3344 } 3345 3346 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) { 3347 fp->tx_lso_wnd_min_len++; 3348 return (-1); 3349 } 3350 3351 nsegs = nsegs - 1; 3352 segs++; 3353 } 3354 3355 return (0); 3356 } 3357 3358 static int 3359 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp) 3360 { 3361 bus_dma_segment_t *segs; 3362 bus_dmamap_t map = 0; 3363 uint32_t nsegs = 0; 3364 int ret = -1; 3365 struct mbuf *m_head = *m_headp; 3366 uint16_t idx = 0; 3367 uint16_t elem_left; 3368 3369 uint8_t nbd = 0; 3370 struct qlnx_tx_queue *txq; 3371 3372 struct eth_tx_1st_bd *first_bd; 3373 struct eth_tx_2nd_bd *second_bd; 3374 struct eth_tx_3rd_bd *third_bd; 3375 struct eth_tx_bd *tx_data_bd; 3376 3377 int seg_idx = 0; 3378 uint32_t nbds_in_hdr = 0; 3379 uint32_t offset = 0; 3380 3381 #ifdef QLNX_TRACE_PERF_DATA 3382 uint16_t bd_used; 3383 #endif 3384 3385 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id); 3386 3387 if (!ha->link_up) 3388 return (-1); 3389 3390 first_bd = NULL; 3391 second_bd = NULL; 3392 third_bd = NULL; 3393 tx_data_bd = NULL; 3394 3395 txq = fp->txq[0]; 3396 3397 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) < 3398 QLNX_TX_ELEM_MIN_THRESH) { 3399 3400 fp->tx_nsegs_gt_elem_left++; 3401 fp->err_tx_nsegs_gt_elem_left++; 3402 3403 return (ENOBUFS); 3404 } 3405 3406 idx = txq->sw_tx_prod; 3407 3408 map = txq->sw_tx_ring[idx].map; 3409 segs = txq->segs; 3410 3411 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 3412 BUS_DMA_NOWAIT); 3413 3414 if (ha->dbg_trace_tso_pkt_len) { 3415 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3416 if (!fp->tx_tso_min_pkt_len) { 3417 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3418 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3419 } else { 3420 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len) 3421 fp->tx_tso_min_pkt_len = 3422 m_head->m_pkthdr.len; 3423 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len) 3424 fp->tx_tso_max_pkt_len = 3425 m_head->m_pkthdr.len; 3426 } 3427 } 3428 } 3429 3430 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3431 offset = qlnx_tcp_offset(ha, m_head); 3432 3433 if ((ret == EFBIG) || 3434 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && ( 3435 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) || 3436 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) && 3437 qlnx_tso_check(fp, segs, nsegs, offset))))) { 3438 3439 struct mbuf *m; 3440 3441 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len); 3442 3443 fp->tx_defrag++; 3444 3445 m = m_defrag(m_head, M_NOWAIT); 3446 if (m == NULL) { 3447 fp->err_tx_defrag++; 3448 fp->tx_pkts_freed++; 3449 m_freem(m_head); 3450 *m_headp = NULL; 3451 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret); 3452 return (ENOBUFS); 3453 } 3454 3455 m_head = m; 3456 *m_headp = m_head; 3457 3458 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 3459 segs, &nsegs, BUS_DMA_NOWAIT))) { 3460 3461 fp->err_tx_defrag_dmamap_load++; 3462 3463 QL_DPRINT1(ha, 3464 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n", 3465 ret, m_head->m_pkthdr.len); 3466 3467 fp->tx_pkts_freed++; 3468 m_freem(m_head); 3469 *m_headp = NULL; 3470 3471 return (ret); 3472 } 3473 3474 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && 3475 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) { 3476 3477 fp->err_tx_non_tso_max_seg++; 3478 3479 QL_DPRINT1(ha, 3480 "(%d) nsegs too many for non-TSO [%d, %d]\n", 3481 ret, nsegs, m_head->m_pkthdr.len); 3482 3483 fp->tx_pkts_freed++; 3484 m_freem(m_head); 3485 *m_headp = NULL; 3486 3487 return (ret); 3488 } 3489 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3490 offset = qlnx_tcp_offset(ha, m_head); 3491 3492 } else if (ret) { 3493 3494 fp->err_tx_dmamap_load++; 3495 3496 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n", 3497 ret, m_head->m_pkthdr.len); 3498 fp->tx_pkts_freed++; 3499 m_freem(m_head); 3500 *m_headp = NULL; 3501 return (ret); 3502 } 3503 3504 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet")); 3505 3506 if (ha->dbg_trace_tso_pkt_len) { 3507 if (nsegs < QLNX_FP_MAX_SEGS) 3508 fp->tx_pkts[(nsegs - 1)]++; 3509 else 3510 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; 3511 } 3512 3513 #ifdef QLNX_TRACE_PERF_DATA 3514 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3515 if(m_head->m_pkthdr.len <= 2048) 3516 fp->tx_pkts_hist[0]++; 3517 else if((m_head->m_pkthdr.len > 2048) && 3518 (m_head->m_pkthdr.len <= 4096)) 3519 fp->tx_pkts_hist[1]++; 3520 else if((m_head->m_pkthdr.len > 4096) && 3521 (m_head->m_pkthdr.len <= 8192)) 3522 fp->tx_pkts_hist[2]++; 3523 else if((m_head->m_pkthdr.len > 8192) && 3524 (m_head->m_pkthdr.len <= 12288 )) 3525 fp->tx_pkts_hist[3]++; 3526 else if((m_head->m_pkthdr.len > 11288) && 3527 (m_head->m_pkthdr.len <= 16394)) 3528 fp->tx_pkts_hist[4]++; 3529 else if((m_head->m_pkthdr.len > 16384) && 3530 (m_head->m_pkthdr.len <= 20480)) 3531 fp->tx_pkts_hist[5]++; 3532 else if((m_head->m_pkthdr.len > 20480) && 3533 (m_head->m_pkthdr.len <= 24576)) 3534 fp->tx_pkts_hist[6]++; 3535 else if((m_head->m_pkthdr.len > 24576) && 3536 (m_head->m_pkthdr.len <= 28672)) 3537 fp->tx_pkts_hist[7]++; 3538 else if((m_head->m_pkthdr.len > 28762) && 3539 (m_head->m_pkthdr.len <= 32768)) 3540 fp->tx_pkts_hist[8]++; 3541 else if((m_head->m_pkthdr.len > 32768) && 3542 (m_head->m_pkthdr.len <= 36864)) 3543 fp->tx_pkts_hist[9]++; 3544 else if((m_head->m_pkthdr.len > 36864) && 3545 (m_head->m_pkthdr.len <= 40960)) 3546 fp->tx_pkts_hist[10]++; 3547 else if((m_head->m_pkthdr.len > 40960) && 3548 (m_head->m_pkthdr.len <= 45056)) 3549 fp->tx_pkts_hist[11]++; 3550 else if((m_head->m_pkthdr.len > 45056) && 3551 (m_head->m_pkthdr.len <= 49152)) 3552 fp->tx_pkts_hist[12]++; 3553 else if((m_head->m_pkthdr.len > 49512) && 3554 m_head->m_pkthdr.len <= 53248)) 3555 fp->tx_pkts_hist[13]++; 3556 else if((m_head->m_pkthdr.len > 53248) && 3557 (m_head->m_pkthdr.len <= 57344)) 3558 fp->tx_pkts_hist[14]++; 3559 else if((m_head->m_pkthdr.len > 53248) && 3560 (m_head->m_pkthdr.len <= 57344)) 3561 fp->tx_pkts_hist[15]++; 3562 else if((m_head->m_pkthdr.len > 57344) && 3563 (m_head->m_pkthdr.len <= 61440)) 3564 fp->tx_pkts_hist[16]++; 3565 else 3566 fp->tx_pkts_hist[17]++; 3567 } 3568 3569 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3570 3571 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl); 3572 bd_used = TX_RING_SIZE - elem_left; 3573 3574 if(bd_used <= 100) 3575 fp->tx_pkts_q[0]++; 3576 else if((bd_used > 100) && (bd_used <= 500)) 3577 fp->tx_pkts_q[1]++; 3578 else if((bd_used > 500) && (bd_used <= 1000)) 3579 fp->tx_pkts_q[2]++; 3580 else if((bd_used > 1000) && (bd_used <= 2000)) 3581 fp->tx_pkts_q[3]++; 3582 else if((bd_used > 3000) && (bd_used <= 4000)) 3583 fp->tx_pkts_q[4]++; 3584 else if((bd_used > 4000) && (bd_used <= 5000)) 3585 fp->tx_pkts_q[5]++; 3586 else if((bd_used > 6000) && (bd_used <= 7000)) 3587 fp->tx_pkts_q[6]++; 3588 else if((bd_used > 7000) && (bd_used <= 8000)) 3589 fp->tx_pkts_q[7]++; 3590 else if((bd_used > 8000) && (bd_used <= 9000)) 3591 fp->tx_pkts_q[8]++; 3592 else if((bd_used > 9000) && (bd_used <= 10000)) 3593 fp->tx_pkts_q[9]++; 3594 else if((bd_used > 10000) && (bd_used <= 11000)) 3595 fp->tx_pkts_q[10]++; 3596 else if((bd_used > 11000) && (bd_used <= 12000)) 3597 fp->tx_pkts_q[11]++; 3598 else if((bd_used > 12000) && (bd_used <= 13000)) 3599 fp->tx_pkts_q[12]++; 3600 else if((bd_used > 13000) && (bd_used <= 14000)) 3601 fp->tx_pkts_q[13]++; 3602 else if((bd_used > 14000) && (bd_used <= 15000)) 3603 fp->tx_pkts_q[14]++; 3604 else if((bd_used > 15000) && (bd_used <= 16000)) 3605 fp->tx_pkts_q[15]++; 3606 else 3607 fp->tx_pkts_q[16]++; 3608 } 3609 3610 #endif /* end of QLNX_TRACE_PERF_DATA */ 3611 3612 if ((nsegs + QLNX_TX_ELEM_RESERVE) > 3613 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) { 3614 3615 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs" 3616 " in chain[%d] trying to free packets\n", 3617 nsegs, elem_left, fp->rss_id); 3618 3619 fp->tx_nsegs_gt_elem_left++; 3620 3621 (void)qlnx_tx_int(ha, fp, txq); 3622 3623 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left = 3624 ecore_chain_get_elem_left(&txq->tx_pbl))) { 3625 3626 QL_DPRINT1(ha, 3627 "(%d, 0x%x) insuffient BDs in chain[%d]\n", 3628 nsegs, elem_left, fp->rss_id); 3629 3630 fp->err_tx_nsegs_gt_elem_left++; 3631 fp->tx_ring_full = 1; 3632 if (ha->storm_stats_enable) 3633 ha->storm_stats_gather = 1; 3634 return (ENOBUFS); 3635 } 3636 } 3637 3638 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 3639 3640 txq->sw_tx_ring[idx].mp = m_head; 3641 3642 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); 3643 3644 memset(first_bd, 0, sizeof(*first_bd)); 3645 3646 first_bd->data.bd_flags.bitfields = 3647 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 3648 3649 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len); 3650 3651 nbd++; 3652 3653 if (m_head->m_pkthdr.csum_flags & CSUM_IP) { 3654 first_bd->data.bd_flags.bitfields |= 3655 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3656 } 3657 3658 if (m_head->m_pkthdr.csum_flags & 3659 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) { 3660 first_bd->data.bd_flags.bitfields |= 3661 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT); 3662 } 3663 3664 if (m_head->m_flags & M_VLANTAG) { 3665 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag; 3666 first_bd->data.bd_flags.bitfields |= 3667 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT); 3668 } 3669 3670 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3671 3672 first_bd->data.bd_flags.bitfields |= 3673 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); 3674 first_bd->data.bd_flags.bitfields |= 3675 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3676 3677 nbds_in_hdr = 1; 3678 3679 if (offset == segs->ds_len) { 3680 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3681 segs++; 3682 seg_idx++; 3683 3684 second_bd = (struct eth_tx_2nd_bd *) 3685 ecore_chain_produce(&txq->tx_pbl); 3686 memset(second_bd, 0, sizeof(*second_bd)); 3687 nbd++; 3688 3689 if (seg_idx < nsegs) { 3690 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3691 (segs->ds_addr), (segs->ds_len)); 3692 segs++; 3693 seg_idx++; 3694 } 3695 3696 third_bd = (struct eth_tx_3rd_bd *) 3697 ecore_chain_produce(&txq->tx_pbl); 3698 memset(third_bd, 0, sizeof(*third_bd)); 3699 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3700 third_bd->data.bitfields |= 3701 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3702 nbd++; 3703 3704 if (seg_idx < nsegs) { 3705 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3706 (segs->ds_addr), (segs->ds_len)); 3707 segs++; 3708 seg_idx++; 3709 } 3710 3711 for (; seg_idx < nsegs; seg_idx++) { 3712 tx_data_bd = (struct eth_tx_bd *) 3713 ecore_chain_produce(&txq->tx_pbl); 3714 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3715 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3716 segs->ds_addr,\ 3717 segs->ds_len); 3718 segs++; 3719 nbd++; 3720 } 3721 3722 } else if (offset < segs->ds_len) { 3723 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3724 3725 second_bd = (struct eth_tx_2nd_bd *) 3726 ecore_chain_produce(&txq->tx_pbl); 3727 memset(second_bd, 0, sizeof(*second_bd)); 3728 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3729 (segs->ds_addr + offset),\ 3730 (segs->ds_len - offset)); 3731 nbd++; 3732 segs++; 3733 3734 third_bd = (struct eth_tx_3rd_bd *) 3735 ecore_chain_produce(&txq->tx_pbl); 3736 memset(third_bd, 0, sizeof(*third_bd)); 3737 3738 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3739 segs->ds_addr,\ 3740 segs->ds_len); 3741 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3742 third_bd->data.bitfields |= 3743 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3744 segs++; 3745 nbd++; 3746 3747 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) { 3748 tx_data_bd = (struct eth_tx_bd *) 3749 ecore_chain_produce(&txq->tx_pbl); 3750 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3751 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3752 segs->ds_addr,\ 3753 segs->ds_len); 3754 segs++; 3755 nbd++; 3756 } 3757 3758 } else { 3759 offset = offset - segs->ds_len; 3760 segs++; 3761 3762 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3763 3764 if (offset) 3765 nbds_in_hdr++; 3766 3767 tx_data_bd = (struct eth_tx_bd *) 3768 ecore_chain_produce(&txq->tx_pbl); 3769 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3770 3771 if (second_bd == NULL) { 3772 second_bd = (struct eth_tx_2nd_bd *) 3773 tx_data_bd; 3774 } else if (third_bd == NULL) { 3775 third_bd = (struct eth_tx_3rd_bd *) 3776 tx_data_bd; 3777 } 3778 3779 if (offset && (offset < segs->ds_len)) { 3780 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3781 segs->ds_addr, offset); 3782 3783 tx_data_bd = (struct eth_tx_bd *) 3784 ecore_chain_produce(&txq->tx_pbl); 3785 3786 memset(tx_data_bd, 0, 3787 sizeof(*tx_data_bd)); 3788 3789 if (second_bd == NULL) { 3790 second_bd = 3791 (struct eth_tx_2nd_bd *)tx_data_bd; 3792 } else if (third_bd == NULL) { 3793 third_bd = 3794 (struct eth_tx_3rd_bd *)tx_data_bd; 3795 } 3796 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3797 (segs->ds_addr + offset), \ 3798 (segs->ds_len - offset)); 3799 nbd++; 3800 offset = 0; 3801 } else { 3802 if (offset) 3803 offset = offset - segs->ds_len; 3804 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3805 segs->ds_addr, segs->ds_len); 3806 } 3807 segs++; 3808 nbd++; 3809 } 3810 3811 if (third_bd == NULL) { 3812 third_bd = (struct eth_tx_3rd_bd *) 3813 ecore_chain_produce(&txq->tx_pbl); 3814 memset(third_bd, 0, sizeof(*third_bd)); 3815 } 3816 3817 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3818 third_bd->data.bitfields |= 3819 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3820 } 3821 fp->tx_tso_pkts++; 3822 } else { 3823 segs++; 3824 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3825 tx_data_bd = (struct eth_tx_bd *) 3826 ecore_chain_produce(&txq->tx_pbl); 3827 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3828 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\ 3829 segs->ds_len); 3830 segs++; 3831 nbd++; 3832 } 3833 first_bd->data.bitfields = 3834 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) 3835 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 3836 first_bd->data.bitfields = 3837 htole16(first_bd->data.bitfields); 3838 fp->tx_non_tso_pkts++; 3839 } 3840 3841 3842 first_bd->data.nbds = nbd; 3843 3844 if (ha->dbg_trace_tso_pkt_len) { 3845 if (fp->tx_tso_max_nsegs < nsegs) 3846 fp->tx_tso_max_nsegs = nsegs; 3847 3848 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs)) 3849 fp->tx_tso_min_nsegs = nsegs; 3850 } 3851 3852 txq->sw_tx_ring[idx].nsegs = nsegs; 3853 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1); 3854 3855 txq->tx_db.data.bd_prod = 3856 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl)); 3857 3858 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw); 3859 3860 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id); 3861 return (0); 3862 } 3863 3864 static void 3865 qlnx_stop(qlnx_host_t *ha) 3866 { 3867 struct ifnet *ifp = ha->ifp; 3868 device_t dev; 3869 int i; 3870 3871 dev = ha->pci_dev; 3872 3873 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 3874 3875 /* 3876 * We simply lock and unlock each fp->tx_mtx to 3877 * propagate the if_drv_flags 3878 * state to each tx thread 3879 */ 3880 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state); 3881 3882 if (ha->state == QLNX_STATE_OPEN) { 3883 for (i = 0; i < ha->num_rss; i++) { 3884 struct qlnx_fastpath *fp = &ha->fp_array[i]; 3885 3886 mtx_lock(&fp->tx_mtx); 3887 mtx_unlock(&fp->tx_mtx); 3888 3889 if (fp->fp_taskqueue != NULL) 3890 taskqueue_enqueue(fp->fp_taskqueue, 3891 &fp->fp_task); 3892 } 3893 } 3894 #ifdef QLNX_ENABLE_IWARP 3895 if (qlnx_vf_device(ha) != 0) { 3896 qlnx_rdma_dev_close(ha); 3897 } 3898 #endif /* #ifdef QLNX_ENABLE_IWARP */ 3899 3900 qlnx_unload(ha); 3901 3902 return; 3903 } 3904 3905 static int 3906 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha) 3907 { 3908 return(TX_RING_SIZE - 1); 3909 } 3910 3911 uint8_t * 3912 qlnx_get_mac_addr(qlnx_host_t *ha) 3913 { 3914 struct ecore_hwfn *p_hwfn; 3915 unsigned char mac[ETHER_ADDR_LEN]; 3916 uint8_t p_is_forced; 3917 3918 p_hwfn = &ha->cdev.hwfns[0]; 3919 3920 if (qlnx_vf_device(ha) != 0) 3921 return (p_hwfn->hw_info.hw_mac_addr); 3922 3923 ecore_vf_read_bulletin(p_hwfn, &p_is_forced); 3924 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) == 3925 true) { 3926 device_printf(ha->pci_dev, "%s: p_is_forced = %d" 3927 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, 3928 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3929 memcpy(ha->primary_mac, mac, ETH_ALEN); 3930 } 3931 3932 return (ha->primary_mac); 3933 } 3934 3935 static uint32_t 3936 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link) 3937 { 3938 uint32_t ifm_type = 0; 3939 3940 switch (if_link->media_type) { 3941 3942 case MEDIA_MODULE_FIBER: 3943 case MEDIA_UNSPECIFIED: 3944 if (if_link->speed == (100 * 1000)) 3945 ifm_type = QLNX_IFM_100G_SR4; 3946 else if (if_link->speed == (40 * 1000)) 3947 ifm_type = IFM_40G_SR4; 3948 else if (if_link->speed == (25 * 1000)) 3949 ifm_type = QLNX_IFM_25G_SR; 3950 else if (if_link->speed == (10 * 1000)) 3951 ifm_type = (IFM_10G_LR | IFM_10G_SR); 3952 else if (if_link->speed == (1 * 1000)) 3953 ifm_type = (IFM_1000_SX | IFM_1000_LX); 3954 3955 break; 3956 3957 case MEDIA_DA_TWINAX: 3958 if (if_link->speed == (100 * 1000)) 3959 ifm_type = QLNX_IFM_100G_CR4; 3960 else if (if_link->speed == (40 * 1000)) 3961 ifm_type = IFM_40G_CR4; 3962 else if (if_link->speed == (25 * 1000)) 3963 ifm_type = QLNX_IFM_25G_CR; 3964 else if (if_link->speed == (10 * 1000)) 3965 ifm_type = IFM_10G_TWINAX; 3966 3967 break; 3968 3969 default : 3970 ifm_type = IFM_UNKNOWN; 3971 break; 3972 } 3973 return (ifm_type); 3974 } 3975 3976 3977 3978 /***************************************************************************** 3979 * Interrupt Service Functions 3980 *****************************************************************************/ 3981 3982 static int 3983 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3984 struct mbuf *mp_head, uint16_t len) 3985 { 3986 struct mbuf *mp, *mpf, *mpl; 3987 struct sw_rx_data *sw_rx_data; 3988 struct qlnx_rx_queue *rxq; 3989 uint16_t len_in_buffer; 3990 3991 rxq = fp->rxq; 3992 mpf = mpl = mp = NULL; 3993 3994 while (len) { 3995 3996 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3997 3998 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3999 mp = sw_rx_data->data; 4000 4001 if (mp == NULL) { 4002 QL_DPRINT1(ha, "mp = NULL\n"); 4003 fp->err_rx_mp_null++; 4004 rxq->sw_rx_cons = 4005 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4006 4007 if (mpf != NULL) 4008 m_freem(mpf); 4009 4010 return (-1); 4011 } 4012 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4013 BUS_DMASYNC_POSTREAD); 4014 4015 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4016 4017 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 4018 " incoming packet and reusing its buffer\n"); 4019 4020 qlnx_reuse_rx_data(rxq); 4021 fp->err_rx_alloc_errors++; 4022 4023 if (mpf != NULL) 4024 m_freem(mpf); 4025 4026 return (-1); 4027 } 4028 ecore_chain_consume(&rxq->rx_bd_ring); 4029 4030 if (len > rxq->rx_buf_size) 4031 len_in_buffer = rxq->rx_buf_size; 4032 else 4033 len_in_buffer = len; 4034 4035 len = len - len_in_buffer; 4036 4037 mp->m_flags &= ~M_PKTHDR; 4038 mp->m_next = NULL; 4039 mp->m_len = len_in_buffer; 4040 4041 if (mpf == NULL) 4042 mpf = mpl = mp; 4043 else { 4044 mpl->m_next = mp; 4045 mpl = mp; 4046 } 4047 } 4048 4049 if (mpf != NULL) 4050 mp_head->m_next = mpf; 4051 4052 return (0); 4053 } 4054 4055 static void 4056 qlnx_tpa_start(qlnx_host_t *ha, 4057 struct qlnx_fastpath *fp, 4058 struct qlnx_rx_queue *rxq, 4059 struct eth_fast_path_rx_tpa_start_cqe *cqe) 4060 { 4061 uint32_t agg_index; 4062 struct ifnet *ifp = ha->ifp; 4063 struct mbuf *mp; 4064 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4065 struct sw_rx_data *sw_rx_data; 4066 dma_addr_t addr; 4067 bus_dmamap_t map; 4068 struct eth_rx_bd *rx_bd; 4069 int i; 4070 device_t dev; 4071 #if __FreeBSD_version >= 1100000 4072 uint8_t hash_type; 4073 #endif /* #if __FreeBSD_version >= 1100000 */ 4074 4075 dev = ha->pci_dev; 4076 agg_index = cqe->tpa_agg_index; 4077 4078 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \ 4079 \t type = 0x%x\n \ 4080 \t bitfields = 0x%x\n \ 4081 \t seg_len = 0x%x\n \ 4082 \t pars_flags = 0x%x\n \ 4083 \t vlan_tag = 0x%x\n \ 4084 \t rss_hash = 0x%x\n \ 4085 \t len_on_first_bd = 0x%x\n \ 4086 \t placement_offset = 0x%x\n \ 4087 \t tpa_agg_index = 0x%x\n \ 4088 \t header_len = 0x%x\n \ 4089 \t ext_bd_len_list[0] = 0x%x\n \ 4090 \t ext_bd_len_list[1] = 0x%x\n \ 4091 \t ext_bd_len_list[2] = 0x%x\n \ 4092 \t ext_bd_len_list[3] = 0x%x\n \ 4093 \t ext_bd_len_list[4] = 0x%x\n", 4094 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len, 4095 cqe->pars_flags.flags, cqe->vlan_tag, 4096 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset, 4097 cqe->tpa_agg_index, cqe->header_len, 4098 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1], 4099 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3], 4100 cqe->ext_bd_len_list[4]); 4101 4102 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4103 fp->err_rx_tpa_invalid_agg_num++; 4104 return; 4105 } 4106 4107 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4108 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); 4109 mp = sw_rx_data->data; 4110 4111 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp); 4112 4113 if (mp == NULL) { 4114 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id); 4115 fp->err_rx_mp_null++; 4116 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4117 4118 return; 4119 } 4120 4121 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) { 4122 4123 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error," 4124 " flags = %x, dropping incoming packet\n", fp->rss_id, 4125 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags)); 4126 4127 fp->err_rx_hw_errors++; 4128 4129 qlnx_reuse_rx_data(rxq); 4130 4131 QLNX_INC_IERRORS(ifp); 4132 4133 return; 4134 } 4135 4136 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4137 4138 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4139 " dropping incoming packet and reusing its buffer\n", 4140 fp->rss_id); 4141 4142 fp->err_rx_alloc_errors++; 4143 QLNX_INC_IQDROPS(ifp); 4144 4145 /* 4146 * Load the tpa mbuf into the rx ring and save the 4147 * posted mbuf 4148 */ 4149 4150 map = sw_rx_data->map; 4151 addr = sw_rx_data->dma_addr; 4152 4153 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 4154 4155 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data; 4156 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr; 4157 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map; 4158 4159 rxq->tpa_info[agg_index].rx_buf.data = mp; 4160 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr; 4161 rxq->tpa_info[agg_index].rx_buf.map = map; 4162 4163 rx_bd = (struct eth_rx_bd *) 4164 ecore_chain_produce(&rxq->rx_bd_ring); 4165 4166 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr)); 4167 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr)); 4168 4169 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4170 BUS_DMASYNC_PREREAD); 4171 4172 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 4173 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4174 4175 ecore_chain_consume(&rxq->rx_bd_ring); 4176 4177 /* Now reuse any buffers posted in ext_bd_len_list */ 4178 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4179 4180 if (cqe->ext_bd_len_list[i] == 0) 4181 break; 4182 4183 qlnx_reuse_rx_data(rxq); 4184 } 4185 4186 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4187 return; 4188 } 4189 4190 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4191 4192 QL_DPRINT7(ha, "[%d]: invalid aggregation state," 4193 " dropping incoming packet and reusing its buffer\n", 4194 fp->rss_id); 4195 4196 QLNX_INC_IQDROPS(ifp); 4197 4198 /* if we already have mbuf head in aggregation free it */ 4199 if (rxq->tpa_info[agg_index].mpf) { 4200 m_freem(rxq->tpa_info[agg_index].mpf); 4201 rxq->tpa_info[agg_index].mpl = NULL; 4202 } 4203 rxq->tpa_info[agg_index].mpf = mp; 4204 rxq->tpa_info[agg_index].mpl = NULL; 4205 4206 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4207 ecore_chain_consume(&rxq->rx_bd_ring); 4208 4209 /* Now reuse any buffers posted in ext_bd_len_list */ 4210 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4211 4212 if (cqe->ext_bd_len_list[i] == 0) 4213 break; 4214 4215 qlnx_reuse_rx_data(rxq); 4216 } 4217 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4218 4219 return; 4220 } 4221 4222 /* 4223 * first process the ext_bd_len_list 4224 * if this fails then we simply drop the packet 4225 */ 4226 ecore_chain_consume(&rxq->rx_bd_ring); 4227 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4228 4229 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4230 4231 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id); 4232 4233 if (cqe->ext_bd_len_list[i] == 0) 4234 break; 4235 4236 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4237 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4238 BUS_DMASYNC_POSTREAD); 4239 4240 mpc = sw_rx_data->data; 4241 4242 if (mpc == NULL) { 4243 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4244 fp->err_rx_mp_null++; 4245 if (mpf != NULL) 4246 m_freem(mpf); 4247 mpf = mpl = NULL; 4248 rxq->tpa_info[agg_index].agg_state = 4249 QLNX_AGG_STATE_ERROR; 4250 ecore_chain_consume(&rxq->rx_bd_ring); 4251 rxq->sw_rx_cons = 4252 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4253 continue; 4254 } 4255 4256 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4257 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4258 " dropping incoming packet and reusing its" 4259 " buffer\n", fp->rss_id); 4260 4261 qlnx_reuse_rx_data(rxq); 4262 4263 if (mpf != NULL) 4264 m_freem(mpf); 4265 mpf = mpl = NULL; 4266 4267 rxq->tpa_info[agg_index].agg_state = 4268 QLNX_AGG_STATE_ERROR; 4269 4270 ecore_chain_consume(&rxq->rx_bd_ring); 4271 rxq->sw_rx_cons = 4272 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4273 4274 continue; 4275 } 4276 4277 mpc->m_flags &= ~M_PKTHDR; 4278 mpc->m_next = NULL; 4279 mpc->m_len = cqe->ext_bd_len_list[i]; 4280 4281 4282 if (mpf == NULL) { 4283 mpf = mpl = mpc; 4284 } else { 4285 mpl->m_len = ha->rx_buf_size; 4286 mpl->m_next = mpc; 4287 mpl = mpc; 4288 } 4289 4290 ecore_chain_consume(&rxq->rx_bd_ring); 4291 rxq->sw_rx_cons = 4292 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4293 } 4294 4295 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4296 4297 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping" 4298 " incoming packet and reusing its buffer\n", 4299 fp->rss_id); 4300 4301 QLNX_INC_IQDROPS(ifp); 4302 4303 rxq->tpa_info[agg_index].mpf = mp; 4304 rxq->tpa_info[agg_index].mpl = NULL; 4305 4306 return; 4307 } 4308 4309 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset; 4310 4311 if (mpf != NULL) { 4312 mp->m_len = ha->rx_buf_size; 4313 mp->m_next = mpf; 4314 rxq->tpa_info[agg_index].mpf = mp; 4315 rxq->tpa_info[agg_index].mpl = mpl; 4316 } else { 4317 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset; 4318 rxq->tpa_info[agg_index].mpf = mp; 4319 rxq->tpa_info[agg_index].mpl = mp; 4320 mp->m_next = NULL; 4321 } 4322 4323 mp->m_flags |= M_PKTHDR; 4324 4325 /* assign packet to this interface interface */ 4326 mp->m_pkthdr.rcvif = ifp; 4327 4328 /* assume no hardware checksum has complated */ 4329 mp->m_pkthdr.csum_flags = 0; 4330 4331 //mp->m_pkthdr.flowid = fp->rss_id; 4332 mp->m_pkthdr.flowid = cqe->rss_hash; 4333 4334 #if __FreeBSD_version >= 1100000 4335 4336 hash_type = cqe->bitfields & 4337 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4338 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4339 4340 switch (hash_type) { 4341 4342 case RSS_HASH_TYPE_IPV4: 4343 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4344 break; 4345 4346 case RSS_HASH_TYPE_TCP_IPV4: 4347 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4348 break; 4349 4350 case RSS_HASH_TYPE_IPV6: 4351 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4352 break; 4353 4354 case RSS_HASH_TYPE_TCP_IPV6: 4355 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4356 break; 4357 4358 default: 4359 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4360 break; 4361 } 4362 4363 #else 4364 mp->m_flags |= M_FLOWID; 4365 #endif 4366 4367 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | 4368 CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4369 4370 mp->m_pkthdr.csum_data = 0xFFFF; 4371 4372 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) { 4373 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag); 4374 mp->m_flags |= M_VLANTAG; 4375 } 4376 4377 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START; 4378 4379 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n", 4380 fp->rss_id, rxq->tpa_info[agg_index].agg_state, 4381 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl); 4382 4383 return; 4384 } 4385 4386 static void 4387 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4388 struct qlnx_rx_queue *rxq, 4389 struct eth_fast_path_rx_tpa_cont_cqe *cqe) 4390 { 4391 struct sw_rx_data *sw_rx_data; 4392 int i; 4393 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4394 struct mbuf *mp; 4395 uint32_t agg_index; 4396 device_t dev; 4397 4398 dev = ha->pci_dev; 4399 4400 QL_DPRINT7(ha, "[%d]: enter\n \ 4401 \t type = 0x%x\n \ 4402 \t tpa_agg_index = 0x%x\n \ 4403 \t len_list[0] = 0x%x\n \ 4404 \t len_list[1] = 0x%x\n \ 4405 \t len_list[2] = 0x%x\n \ 4406 \t len_list[3] = 0x%x\n \ 4407 \t len_list[4] = 0x%x\n \ 4408 \t len_list[5] = 0x%x\n", 4409 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4410 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4411 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]); 4412 4413 agg_index = cqe->tpa_agg_index; 4414 4415 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4416 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4417 fp->err_rx_tpa_invalid_agg_num++; 4418 return; 4419 } 4420 4421 4422 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) { 4423 4424 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4425 4426 if (cqe->len_list[i] == 0) 4427 break; 4428 4429 if (rxq->tpa_info[agg_index].agg_state != 4430 QLNX_AGG_STATE_START) { 4431 qlnx_reuse_rx_data(rxq); 4432 continue; 4433 } 4434 4435 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4436 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4437 BUS_DMASYNC_POSTREAD); 4438 4439 mpc = sw_rx_data->data; 4440 4441 if (mpc == NULL) { 4442 4443 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4444 4445 fp->err_rx_mp_null++; 4446 if (mpf != NULL) 4447 m_freem(mpf); 4448 mpf = mpl = NULL; 4449 rxq->tpa_info[agg_index].agg_state = 4450 QLNX_AGG_STATE_ERROR; 4451 ecore_chain_consume(&rxq->rx_bd_ring); 4452 rxq->sw_rx_cons = 4453 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4454 continue; 4455 } 4456 4457 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4458 4459 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4460 " dropping incoming packet and reusing its" 4461 " buffer\n", fp->rss_id); 4462 4463 qlnx_reuse_rx_data(rxq); 4464 4465 if (mpf != NULL) 4466 m_freem(mpf); 4467 mpf = mpl = NULL; 4468 4469 rxq->tpa_info[agg_index].agg_state = 4470 QLNX_AGG_STATE_ERROR; 4471 4472 ecore_chain_consume(&rxq->rx_bd_ring); 4473 rxq->sw_rx_cons = 4474 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4475 4476 continue; 4477 } 4478 4479 mpc->m_flags &= ~M_PKTHDR; 4480 mpc->m_next = NULL; 4481 mpc->m_len = cqe->len_list[i]; 4482 4483 4484 if (mpf == NULL) { 4485 mpf = mpl = mpc; 4486 } else { 4487 mpl->m_len = ha->rx_buf_size; 4488 mpl->m_next = mpc; 4489 mpl = mpc; 4490 } 4491 4492 ecore_chain_consume(&rxq->rx_bd_ring); 4493 rxq->sw_rx_cons = 4494 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4495 } 4496 4497 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n", 4498 fp->rss_id, mpf, mpl); 4499 4500 if (mpf != NULL) { 4501 mp = rxq->tpa_info[agg_index].mpl; 4502 mp->m_len = ha->rx_buf_size; 4503 mp->m_next = mpf; 4504 rxq->tpa_info[agg_index].mpl = mpl; 4505 } 4506 4507 return; 4508 } 4509 4510 static int 4511 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4512 struct qlnx_rx_queue *rxq, 4513 struct eth_fast_path_rx_tpa_end_cqe *cqe) 4514 { 4515 struct sw_rx_data *sw_rx_data; 4516 int i; 4517 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4518 struct mbuf *mp; 4519 uint32_t agg_index; 4520 uint32_t len = 0; 4521 struct ifnet *ifp = ha->ifp; 4522 device_t dev; 4523 4524 dev = ha->pci_dev; 4525 4526 QL_DPRINT7(ha, "[%d]: enter\n \ 4527 \t type = 0x%x\n \ 4528 \t tpa_agg_index = 0x%x\n \ 4529 \t total_packet_len = 0x%x\n \ 4530 \t num_of_bds = 0x%x\n \ 4531 \t end_reason = 0x%x\n \ 4532 \t num_of_coalesced_segs = 0x%x\n \ 4533 \t ts_delta = 0x%x\n \ 4534 \t len_list[0] = 0x%x\n \ 4535 \t len_list[1] = 0x%x\n \ 4536 \t len_list[2] = 0x%x\n \ 4537 \t len_list[3] = 0x%x\n", 4538 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4539 cqe->total_packet_len, cqe->num_of_bds, 4540 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta, 4541 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4542 cqe->len_list[3]); 4543 4544 agg_index = cqe->tpa_agg_index; 4545 4546 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4547 4548 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4549 4550 fp->err_rx_tpa_invalid_agg_num++; 4551 return (0); 4552 } 4553 4554 4555 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) { 4556 4557 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4558 4559 if (cqe->len_list[i] == 0) 4560 break; 4561 4562 if (rxq->tpa_info[agg_index].agg_state != 4563 QLNX_AGG_STATE_START) { 4564 4565 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id); 4566 4567 qlnx_reuse_rx_data(rxq); 4568 continue; 4569 } 4570 4571 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4572 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4573 BUS_DMASYNC_POSTREAD); 4574 4575 mpc = sw_rx_data->data; 4576 4577 if (mpc == NULL) { 4578 4579 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4580 4581 fp->err_rx_mp_null++; 4582 if (mpf != NULL) 4583 m_freem(mpf); 4584 mpf = mpl = NULL; 4585 rxq->tpa_info[agg_index].agg_state = 4586 QLNX_AGG_STATE_ERROR; 4587 ecore_chain_consume(&rxq->rx_bd_ring); 4588 rxq->sw_rx_cons = 4589 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4590 continue; 4591 } 4592 4593 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4594 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4595 " dropping incoming packet and reusing its" 4596 " buffer\n", fp->rss_id); 4597 4598 qlnx_reuse_rx_data(rxq); 4599 4600 if (mpf != NULL) 4601 m_freem(mpf); 4602 mpf = mpl = NULL; 4603 4604 rxq->tpa_info[agg_index].agg_state = 4605 QLNX_AGG_STATE_ERROR; 4606 4607 ecore_chain_consume(&rxq->rx_bd_ring); 4608 rxq->sw_rx_cons = 4609 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4610 4611 continue; 4612 } 4613 4614 mpc->m_flags &= ~M_PKTHDR; 4615 mpc->m_next = NULL; 4616 mpc->m_len = cqe->len_list[i]; 4617 4618 4619 if (mpf == NULL) { 4620 mpf = mpl = mpc; 4621 } else { 4622 mpl->m_len = ha->rx_buf_size; 4623 mpl->m_next = mpc; 4624 mpl = mpc; 4625 } 4626 4627 ecore_chain_consume(&rxq->rx_bd_ring); 4628 rxq->sw_rx_cons = 4629 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4630 } 4631 4632 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id); 4633 4634 if (mpf != NULL) { 4635 4636 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id); 4637 4638 mp = rxq->tpa_info[agg_index].mpl; 4639 mp->m_len = ha->rx_buf_size; 4640 mp->m_next = mpf; 4641 } 4642 4643 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { 4644 4645 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id); 4646 4647 if (rxq->tpa_info[agg_index].mpf != NULL) 4648 m_freem(rxq->tpa_info[agg_index].mpf); 4649 rxq->tpa_info[agg_index].mpf = NULL; 4650 rxq->tpa_info[agg_index].mpl = NULL; 4651 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4652 return (0); 4653 } 4654 4655 mp = rxq->tpa_info[agg_index].mpf; 4656 m_adj(mp, rxq->tpa_info[agg_index].placement_offset); 4657 mp->m_pkthdr.len = cqe->total_packet_len; 4658 4659 if (mp->m_next == NULL) 4660 mp->m_len = mp->m_pkthdr.len; 4661 else { 4662 /* compute the total packet length */ 4663 mpf = mp; 4664 while (mpf != NULL) { 4665 len += mpf->m_len; 4666 mpf = mpf->m_next; 4667 } 4668 4669 if (cqe->total_packet_len > len) { 4670 mpl = rxq->tpa_info[agg_index].mpl; 4671 mpl->m_len += (cqe->total_packet_len - len); 4672 } 4673 } 4674 4675 QLNX_INC_IPACKETS(ifp); 4676 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len)); 4677 4678 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \ 4679 m_len = 0x%x m_pkthdr_len = 0x%x\n", 4680 fp->rss_id, mp->m_pkthdr.csum_data, 4681 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len); 4682 4683 (*ifp->if_input)(ifp, mp); 4684 4685 rxq->tpa_info[agg_index].mpf = NULL; 4686 rxq->tpa_info[agg_index].mpl = NULL; 4687 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4688 4689 return (cqe->num_of_coalesced_segs); 4690 } 4691 4692 static int 4693 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 4694 int lro_enable) 4695 { 4696 uint16_t hw_comp_cons, sw_comp_cons; 4697 int rx_pkt = 0; 4698 struct qlnx_rx_queue *rxq = fp->rxq; 4699 struct ifnet *ifp = ha->ifp; 4700 struct ecore_dev *cdev = &ha->cdev; 4701 struct ecore_hwfn *p_hwfn; 4702 4703 #ifdef QLNX_SOFT_LRO 4704 struct lro_ctrl *lro; 4705 4706 lro = &rxq->lro; 4707 #endif /* #ifdef QLNX_SOFT_LRO */ 4708 4709 hw_comp_cons = le16toh(*rxq->hw_cons_ptr); 4710 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4711 4712 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)]; 4713 4714 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 4715 * / BD in the while-loop before reading hw_comp_cons. If the CQE is 4716 * read before it is written by FW, then FW writes CQE and SB, and then 4717 * the CPU reads the hw_comp_cons, it will use an old CQE. 4718 */ 4719 4720 /* Loop to complete all indicated BDs */ 4721 while (sw_comp_cons != hw_comp_cons) { 4722 union eth_rx_cqe *cqe; 4723 struct eth_fast_path_rx_reg_cqe *fp_cqe; 4724 struct sw_rx_data *sw_rx_data; 4725 register struct mbuf *mp; 4726 enum eth_rx_cqe_type cqe_type; 4727 uint16_t len, pad, len_on_first_bd; 4728 uint8_t *data; 4729 #if __FreeBSD_version >= 1100000 4730 uint8_t hash_type; 4731 #endif /* #if __FreeBSD_version >= 1100000 */ 4732 4733 /* Get the CQE from the completion ring */ 4734 cqe = (union eth_rx_cqe *) 4735 ecore_chain_consume(&rxq->rx_comp_ring); 4736 cqe_type = cqe->fast_path_regular.type; 4737 4738 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { 4739 QL_DPRINT3(ha, "Got a slowath CQE\n"); 4740 4741 ecore_eth_cqe_completion(p_hwfn, 4742 (struct eth_slow_path_rx_cqe *)cqe); 4743 goto next_cqe; 4744 } 4745 4746 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { 4747 4748 switch (cqe_type) { 4749 4750 case ETH_RX_CQE_TYPE_TPA_START: 4751 qlnx_tpa_start(ha, fp, rxq, 4752 &cqe->fast_path_tpa_start); 4753 fp->tpa_start++; 4754 break; 4755 4756 case ETH_RX_CQE_TYPE_TPA_CONT: 4757 qlnx_tpa_cont(ha, fp, rxq, 4758 &cqe->fast_path_tpa_cont); 4759 fp->tpa_cont++; 4760 break; 4761 4762 case ETH_RX_CQE_TYPE_TPA_END: 4763 rx_pkt += qlnx_tpa_end(ha, fp, rxq, 4764 &cqe->fast_path_tpa_end); 4765 fp->tpa_end++; 4766 break; 4767 4768 default: 4769 break; 4770 } 4771 4772 goto next_cqe; 4773 } 4774 4775 /* Get the data from the SW ring */ 4776 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4777 mp = sw_rx_data->data; 4778 4779 if (mp == NULL) { 4780 QL_DPRINT1(ha, "mp = NULL\n"); 4781 fp->err_rx_mp_null++; 4782 rxq->sw_rx_cons = 4783 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4784 goto next_cqe; 4785 } 4786 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4787 BUS_DMASYNC_POSTREAD); 4788 4789 /* non GRO */ 4790 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */ 4791 len = le16toh(fp_cqe->pkt_len); 4792 pad = fp_cqe->placement_offset; 4793 #if 0 4794 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x," 4795 " len %u, parsing flags = %d pad = %d\n", 4796 cqe_type, fp_cqe->bitfields, 4797 le16toh(fp_cqe->vlan_tag), 4798 len, le16toh(fp_cqe->pars_flags.flags), pad); 4799 #endif 4800 data = mtod(mp, uint8_t *); 4801 data = data + pad; 4802 4803 if (0) 4804 qlnx_dump_buf8(ha, __func__, data, len); 4805 4806 /* For every Rx BD consumed, we allocate a new BD so the BD ring 4807 * is always with a fixed size. If allocation fails, we take the 4808 * consumed BD and return it to the ring in the PROD position. 4809 * The packet that was received on that BD will be dropped (and 4810 * not passed to the upper stack). 4811 */ 4812 /* If this is an error packet then drop it */ 4813 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) & 4814 CQE_FLAGS_ERR) { 4815 4816 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x," 4817 " dropping incoming packet\n", sw_comp_cons, 4818 le16toh(cqe->fast_path_regular.pars_flags.flags)); 4819 fp->err_rx_hw_errors++; 4820 4821 qlnx_reuse_rx_data(rxq); 4822 4823 QLNX_INC_IERRORS(ifp); 4824 4825 goto next_cqe; 4826 } 4827 4828 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4829 4830 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 4831 " incoming packet and reusing its buffer\n"); 4832 qlnx_reuse_rx_data(rxq); 4833 4834 fp->err_rx_alloc_errors++; 4835 4836 QLNX_INC_IQDROPS(ifp); 4837 4838 goto next_cqe; 4839 } 4840 4841 ecore_chain_consume(&rxq->rx_bd_ring); 4842 4843 len_on_first_bd = fp_cqe->len_on_first_bd; 4844 m_adj(mp, pad); 4845 mp->m_pkthdr.len = len; 4846 4847 if ((len > 60 ) && (len > len_on_first_bd)) { 4848 4849 mp->m_len = len_on_first_bd; 4850 4851 if (qlnx_rx_jumbo_chain(ha, fp, mp, 4852 (len - len_on_first_bd)) != 0) { 4853 4854 m_freem(mp); 4855 4856 QLNX_INC_IQDROPS(ifp); 4857 4858 goto next_cqe; 4859 } 4860 4861 } else if (len_on_first_bd < len) { 4862 fp->err_rx_jumbo_chain_pkts++; 4863 } else { 4864 mp->m_len = len; 4865 } 4866 4867 mp->m_flags |= M_PKTHDR; 4868 4869 /* assign packet to this interface interface */ 4870 mp->m_pkthdr.rcvif = ifp; 4871 4872 /* assume no hardware checksum has complated */ 4873 mp->m_pkthdr.csum_flags = 0; 4874 4875 mp->m_pkthdr.flowid = fp_cqe->rss_hash; 4876 4877 #if __FreeBSD_version >= 1100000 4878 4879 hash_type = fp_cqe->bitfields & 4880 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4881 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4882 4883 switch (hash_type) { 4884 4885 case RSS_HASH_TYPE_IPV4: 4886 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4887 break; 4888 4889 case RSS_HASH_TYPE_TCP_IPV4: 4890 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4891 break; 4892 4893 case RSS_HASH_TYPE_IPV6: 4894 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4895 break; 4896 4897 case RSS_HASH_TYPE_TCP_IPV6: 4898 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4899 break; 4900 4901 default: 4902 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4903 break; 4904 } 4905 4906 #else 4907 mp->m_flags |= M_FLOWID; 4908 #endif 4909 4910 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) { 4911 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4912 } 4913 4914 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) { 4915 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4916 } 4917 4918 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) { 4919 mp->m_pkthdr.csum_data = 0xFFFF; 4920 mp->m_pkthdr.csum_flags |= 4921 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4922 } 4923 4924 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) { 4925 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag); 4926 mp->m_flags |= M_VLANTAG; 4927 } 4928 4929 QLNX_INC_IPACKETS(ifp); 4930 QLNX_INC_IBYTES(ifp, len); 4931 4932 #ifdef QLNX_SOFT_LRO 4933 4934 if (lro_enable) { 4935 4936 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 4937 4938 tcp_lro_queue_mbuf(lro, mp); 4939 4940 #else 4941 4942 if (tcp_lro_rx(lro, mp, 0)) 4943 (*ifp->if_input)(ifp, mp); 4944 4945 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 4946 4947 } else { 4948 (*ifp->if_input)(ifp, mp); 4949 } 4950 #else 4951 4952 (*ifp->if_input)(ifp, mp); 4953 4954 #endif /* #ifdef QLNX_SOFT_LRO */ 4955 4956 rx_pkt++; 4957 4958 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4959 4960 next_cqe: /* don't consume bd rx buffer */ 4961 ecore_chain_recycle_consumed(&rxq->rx_comp_ring); 4962 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4963 4964 /* CR TPA - revisit how to handle budget in TPA perhaps 4965 increase on "end" */ 4966 if (rx_pkt == budget) 4967 break; 4968 } /* repeat while sw_comp_cons != hw_comp_cons... */ 4969 4970 /* Update producers */ 4971 qlnx_update_rx_prod(p_hwfn, rxq); 4972 4973 return rx_pkt; 4974 } 4975 4976 4977 /* 4978 * fast path interrupt 4979 */ 4980 4981 static void 4982 qlnx_fp_isr(void *arg) 4983 { 4984 qlnx_ivec_t *ivec = arg; 4985 qlnx_host_t *ha; 4986 struct qlnx_fastpath *fp = NULL; 4987 int idx; 4988 4989 ha = ivec->ha; 4990 4991 if (ha->state != QLNX_STATE_OPEN) { 4992 return; 4993 } 4994 4995 idx = ivec->rss_idx; 4996 4997 if ((idx = ivec->rss_idx) >= ha->num_rss) { 4998 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx); 4999 ha->err_illegal_intr++; 5000 return; 5001 } 5002 fp = &ha->fp_array[idx]; 5003 5004 if (fp == NULL) { 5005 ha->err_fp_null++; 5006 } else { 5007 int rx_int = 0, total_rx_count = 0; 5008 int lro_enable, tc; 5009 struct qlnx_tx_queue *txq; 5010 uint16_t elem_left; 5011 5012 lro_enable = ha->ifp->if_capenable & IFCAP_LRO; 5013 5014 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); 5015 5016 do { 5017 for (tc = 0; tc < ha->num_tc; tc++) { 5018 5019 txq = fp->txq[tc]; 5020 5021 if((int)(elem_left = 5022 ecore_chain_get_elem_left(&txq->tx_pbl)) < 5023 QLNX_TX_ELEM_THRESH) { 5024 5025 if (mtx_trylock(&fp->tx_mtx)) { 5026 #ifdef QLNX_TRACE_PERF_DATA 5027 tx_compl = fp->tx_pkts_completed; 5028 #endif 5029 5030 qlnx_tx_int(ha, fp, fp->txq[tc]); 5031 #ifdef QLNX_TRACE_PERF_DATA 5032 fp->tx_pkts_compl_intr += 5033 (fp->tx_pkts_completed - tx_compl); 5034 if ((fp->tx_pkts_completed - tx_compl) <= 32) 5035 fp->tx_comInt[0]++; 5036 else if (((fp->tx_pkts_completed - tx_compl) > 32) && 5037 ((fp->tx_pkts_completed - tx_compl) <= 64)) 5038 fp->tx_comInt[1]++; 5039 else if(((fp->tx_pkts_completed - tx_compl) > 64) && 5040 ((fp->tx_pkts_completed - tx_compl) <= 128)) 5041 fp->tx_comInt[2]++; 5042 else if(((fp->tx_pkts_completed - tx_compl) > 128)) 5043 fp->tx_comInt[3]++; 5044 #endif 5045 mtx_unlock(&fp->tx_mtx); 5046 } 5047 } 5048 } 5049 5050 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, 5051 lro_enable); 5052 5053 if (rx_int) { 5054 fp->rx_pkts += rx_int; 5055 total_rx_count += rx_int; 5056 } 5057 5058 } while (rx_int); 5059 5060 #ifdef QLNX_SOFT_LRO 5061 { 5062 struct lro_ctrl *lro; 5063 5064 lro = &fp->rxq->lro; 5065 5066 if (lro_enable && total_rx_count) { 5067 5068 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 5069 5070 #ifdef QLNX_TRACE_LRO_CNT 5071 if (lro->lro_mbuf_count & ~1023) 5072 fp->lro_cnt_1024++; 5073 else if (lro->lro_mbuf_count & ~511) 5074 fp->lro_cnt_512++; 5075 else if (lro->lro_mbuf_count & ~255) 5076 fp->lro_cnt_256++; 5077 else if (lro->lro_mbuf_count & ~127) 5078 fp->lro_cnt_128++; 5079 else if (lro->lro_mbuf_count & ~63) 5080 fp->lro_cnt_64++; 5081 #endif /* #ifdef QLNX_TRACE_LRO_CNT */ 5082 5083 tcp_lro_flush_all(lro); 5084 5085 #else 5086 struct lro_entry *queued; 5087 5088 while ((!SLIST_EMPTY(&lro->lro_active))) { 5089 queued = SLIST_FIRST(&lro->lro_active); 5090 SLIST_REMOVE_HEAD(&lro->lro_active, \ 5091 next); 5092 tcp_lro_flush(lro, queued); 5093 } 5094 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 5095 } 5096 } 5097 #endif /* #ifdef QLNX_SOFT_LRO */ 5098 5099 ecore_sb_update_sb_idx(fp->sb_info); 5100 rmb(); 5101 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 5102 } 5103 5104 return; 5105 } 5106 5107 5108 /* 5109 * slow path interrupt processing function 5110 * can be invoked in polled mode or in interrupt mode via taskqueue. 5111 */ 5112 void 5113 qlnx_sp_isr(void *arg) 5114 { 5115 struct ecore_hwfn *p_hwfn; 5116 qlnx_host_t *ha; 5117 5118 p_hwfn = arg; 5119 5120 ha = (qlnx_host_t *)p_hwfn->p_dev; 5121 5122 ha->sp_interrupts++; 5123 5124 QL_DPRINT2(ha, "enter\n"); 5125 5126 ecore_int_sp_dpc(p_hwfn); 5127 5128 QL_DPRINT2(ha, "exit\n"); 5129 5130 return; 5131 } 5132 5133 /***************************************************************************** 5134 * Support Functions for DMA'able Memory 5135 *****************************************************************************/ 5136 5137 static void 5138 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 5139 { 5140 *((bus_addr_t *)arg) = 0; 5141 5142 if (error) { 5143 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 5144 return; 5145 } 5146 5147 *((bus_addr_t *)arg) = segs[0].ds_addr; 5148 5149 return; 5150 } 5151 5152 static int 5153 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 5154 { 5155 int ret = 0; 5156 device_t dev; 5157 bus_addr_t b_addr; 5158 5159 dev = ha->pci_dev; 5160 5161 ret = bus_dma_tag_create( 5162 ha->parent_tag,/* parent */ 5163 dma_buf->alignment, 5164 ((bus_size_t)(1ULL << 32)),/* boundary */ 5165 BUS_SPACE_MAXADDR, /* lowaddr */ 5166 BUS_SPACE_MAXADDR, /* highaddr */ 5167 NULL, NULL, /* filter, filterarg */ 5168 dma_buf->size, /* maxsize */ 5169 1, /* nsegments */ 5170 dma_buf->size, /* maxsegsize */ 5171 0, /* flags */ 5172 NULL, NULL, /* lockfunc, lockarg */ 5173 &dma_buf->dma_tag); 5174 5175 if (ret) { 5176 QL_DPRINT1(ha, "could not create dma tag\n"); 5177 goto qlnx_alloc_dmabuf_exit; 5178 } 5179 ret = bus_dmamem_alloc(dma_buf->dma_tag, 5180 (void **)&dma_buf->dma_b, 5181 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 5182 &dma_buf->dma_map); 5183 if (ret) { 5184 bus_dma_tag_destroy(dma_buf->dma_tag); 5185 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n"); 5186 goto qlnx_alloc_dmabuf_exit; 5187 } 5188 5189 ret = bus_dmamap_load(dma_buf->dma_tag, 5190 dma_buf->dma_map, 5191 dma_buf->dma_b, 5192 dma_buf->size, 5193 qlnx_dmamap_callback, 5194 &b_addr, BUS_DMA_NOWAIT); 5195 5196 if (ret || !b_addr) { 5197 bus_dma_tag_destroy(dma_buf->dma_tag); 5198 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 5199 dma_buf->dma_map); 5200 ret = -1; 5201 goto qlnx_alloc_dmabuf_exit; 5202 } 5203 5204 dma_buf->dma_addr = b_addr; 5205 5206 qlnx_alloc_dmabuf_exit: 5207 5208 return ret; 5209 } 5210 5211 static void 5212 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 5213 { 5214 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 5215 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 5216 bus_dma_tag_destroy(dma_buf->dma_tag); 5217 return; 5218 } 5219 5220 void * 5221 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size) 5222 { 5223 qlnx_dma_t dma_buf; 5224 qlnx_dma_t *dma_p; 5225 qlnx_host_t *ha; 5226 device_t dev; 5227 5228 ha = (qlnx_host_t *)ecore_dev; 5229 dev = ha->pci_dev; 5230 5231 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5232 5233 memset(&dma_buf, 0, sizeof (qlnx_dma_t)); 5234 5235 dma_buf.size = size + PAGE_SIZE; 5236 dma_buf.alignment = 8; 5237 5238 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0) 5239 return (NULL); 5240 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size); 5241 5242 *phys = dma_buf.dma_addr; 5243 5244 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size); 5245 5246 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t)); 5247 5248 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5249 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag, 5250 dma_buf.dma_b, (void *)dma_buf.dma_addr, size); 5251 5252 return (dma_buf.dma_b); 5253 } 5254 5255 void 5256 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys, 5257 uint32_t size) 5258 { 5259 qlnx_dma_t dma_buf, *dma_p; 5260 qlnx_host_t *ha; 5261 device_t dev; 5262 5263 ha = (qlnx_host_t *)ecore_dev; 5264 dev = ha->pci_dev; 5265 5266 if (v_addr == NULL) 5267 return; 5268 5269 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5270 5271 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size); 5272 5273 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5274 (void *)dma_p->dma_map, (void *)dma_p->dma_tag, 5275 dma_p->dma_b, (void *)dma_p->dma_addr, size); 5276 5277 dma_buf = *dma_p; 5278 5279 if (!ha->qlnxr_debug) 5280 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf); 5281 return; 5282 } 5283 5284 static int 5285 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha) 5286 { 5287 int ret; 5288 device_t dev; 5289 5290 dev = ha->pci_dev; 5291 5292 /* 5293 * Allocate parent DMA Tag 5294 */ 5295 ret = bus_dma_tag_create( 5296 bus_get_dma_tag(dev), /* parent */ 5297 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 5298 BUS_SPACE_MAXADDR, /* lowaddr */ 5299 BUS_SPACE_MAXADDR, /* highaddr */ 5300 NULL, NULL, /* filter, filterarg */ 5301 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 5302 0, /* nsegments */ 5303 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 5304 0, /* flags */ 5305 NULL, NULL, /* lockfunc, lockarg */ 5306 &ha->parent_tag); 5307 5308 if (ret) { 5309 QL_DPRINT1(ha, "could not create parent dma tag\n"); 5310 return (-1); 5311 } 5312 5313 ha->flags.parent_tag = 1; 5314 5315 return (0); 5316 } 5317 5318 static void 5319 qlnx_free_parent_dma_tag(qlnx_host_t *ha) 5320 { 5321 if (ha->parent_tag != NULL) { 5322 bus_dma_tag_destroy(ha->parent_tag); 5323 ha->parent_tag = NULL; 5324 } 5325 return; 5326 } 5327 5328 static int 5329 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha) 5330 { 5331 if (bus_dma_tag_create(NULL, /* parent */ 5332 1, 0, /* alignment, bounds */ 5333 BUS_SPACE_MAXADDR, /* lowaddr */ 5334 BUS_SPACE_MAXADDR, /* highaddr */ 5335 NULL, NULL, /* filter, filterarg */ 5336 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */ 5337 QLNX_MAX_SEGMENTS, /* nsegments */ 5338 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */ 5339 0, /* flags */ 5340 NULL, /* lockfunc */ 5341 NULL, /* lockfuncarg */ 5342 &ha->tx_tag)) { 5343 5344 QL_DPRINT1(ha, "tx_tag alloc failed\n"); 5345 return (-1); 5346 } 5347 5348 return (0); 5349 } 5350 5351 static void 5352 qlnx_free_tx_dma_tag(qlnx_host_t *ha) 5353 { 5354 if (ha->tx_tag != NULL) { 5355 bus_dma_tag_destroy(ha->tx_tag); 5356 ha->tx_tag = NULL; 5357 } 5358 return; 5359 } 5360 5361 static int 5362 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha) 5363 { 5364 if (bus_dma_tag_create(NULL, /* parent */ 5365 1, 0, /* alignment, bounds */ 5366 BUS_SPACE_MAXADDR, /* lowaddr */ 5367 BUS_SPACE_MAXADDR, /* highaddr */ 5368 NULL, NULL, /* filter, filterarg */ 5369 MJUM9BYTES, /* maxsize */ 5370 1, /* nsegments */ 5371 MJUM9BYTES, /* maxsegsize */ 5372 0, /* flags */ 5373 NULL, /* lockfunc */ 5374 NULL, /* lockfuncarg */ 5375 &ha->rx_tag)) { 5376 5377 QL_DPRINT1(ha, " rx_tag alloc failed\n"); 5378 5379 return (-1); 5380 } 5381 return (0); 5382 } 5383 5384 static void 5385 qlnx_free_rx_dma_tag(qlnx_host_t *ha) 5386 { 5387 if (ha->rx_tag != NULL) { 5388 bus_dma_tag_destroy(ha->rx_tag); 5389 ha->rx_tag = NULL; 5390 } 5391 return; 5392 } 5393 5394 /********************************* 5395 * Exported functions 5396 *********************************/ 5397 uint32_t 5398 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id) 5399 { 5400 uint32_t bar_size; 5401 5402 bar_id = bar_id * 2; 5403 5404 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev, 5405 SYS_RES_MEMORY, 5406 PCIR_BAR(bar_id)); 5407 5408 return (bar_size); 5409 } 5410 5411 uint32_t 5412 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value) 5413 { 5414 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5415 pci_reg, 1); 5416 return 0; 5417 } 5418 5419 uint32_t 5420 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg, 5421 uint16_t *reg_value) 5422 { 5423 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5424 pci_reg, 2); 5425 return 0; 5426 } 5427 5428 uint32_t 5429 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg, 5430 uint32_t *reg_value) 5431 { 5432 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5433 pci_reg, 4); 5434 return 0; 5435 } 5436 5437 void 5438 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value) 5439 { 5440 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5441 pci_reg, reg_value, 1); 5442 return; 5443 } 5444 5445 void 5446 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg, 5447 uint16_t reg_value) 5448 { 5449 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5450 pci_reg, reg_value, 2); 5451 return; 5452 } 5453 5454 void 5455 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg, 5456 uint32_t reg_value) 5457 { 5458 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5459 pci_reg, reg_value, 4); 5460 return; 5461 } 5462 5463 int 5464 qlnx_pci_find_capability(void *ecore_dev, int cap) 5465 { 5466 int reg; 5467 qlnx_host_t *ha; 5468 5469 ha = ecore_dev; 5470 5471 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0) 5472 return reg; 5473 else { 5474 QL_DPRINT1(ha, "failed\n"); 5475 return 0; 5476 } 5477 } 5478 5479 int 5480 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap) 5481 { 5482 int reg; 5483 qlnx_host_t *ha; 5484 5485 ha = ecore_dev; 5486 5487 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0) 5488 return reg; 5489 else { 5490 QL_DPRINT1(ha, "failed\n"); 5491 return 0; 5492 } 5493 } 5494 5495 uint32_t 5496 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr) 5497 { 5498 uint32_t data32; 5499 struct ecore_hwfn *p_hwfn; 5500 5501 p_hwfn = hwfn; 5502 5503 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5504 (bus_size_t)(p_hwfn->reg_offset + reg_addr)); 5505 5506 return (data32); 5507 } 5508 5509 void 5510 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5511 { 5512 struct ecore_hwfn *p_hwfn = hwfn; 5513 5514 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5515 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5516 5517 return; 5518 } 5519 5520 void 5521 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value) 5522 { 5523 struct ecore_hwfn *p_hwfn = hwfn; 5524 5525 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5526 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5527 return; 5528 } 5529 5530 void 5531 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value) 5532 { 5533 struct ecore_dev *cdev; 5534 struct ecore_hwfn *p_hwfn; 5535 uint32_t offset; 5536 5537 p_hwfn = hwfn; 5538 5539 cdev = p_hwfn->p_dev; 5540 5541 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells)); 5542 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value); 5543 5544 return; 5545 } 5546 5547 void 5548 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5549 { 5550 struct ecore_hwfn *p_hwfn = hwfn; 5551 5552 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \ 5553 (bus_size_t)(p_hwfn->db_offset + reg_addr), value); 5554 5555 return; 5556 } 5557 5558 uint32_t 5559 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr) 5560 { 5561 uint32_t data32; 5562 bus_size_t offset; 5563 struct ecore_dev *cdev; 5564 5565 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5566 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5567 5568 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset); 5569 5570 return (data32); 5571 } 5572 5573 void 5574 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value) 5575 { 5576 bus_size_t offset; 5577 struct ecore_dev *cdev; 5578 5579 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5580 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5581 5582 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5583 5584 return; 5585 } 5586 5587 void 5588 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value) 5589 { 5590 bus_size_t offset; 5591 struct ecore_dev *cdev; 5592 5593 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5594 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5595 5596 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5597 return; 5598 } 5599 5600 void * 5601 qlnx_zalloc(uint32_t size) 5602 { 5603 caddr_t va; 5604 5605 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT); 5606 bzero(va, size); 5607 return ((void *)va); 5608 } 5609 5610 void 5611 qlnx_barrier(void *p_hwfn) 5612 { 5613 qlnx_host_t *ha; 5614 5615 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5616 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE); 5617 } 5618 5619 void 5620 qlnx_link_update(void *p_hwfn) 5621 { 5622 qlnx_host_t *ha; 5623 int prev_link_state; 5624 5625 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5626 5627 qlnx_fill_link(ha, p_hwfn, &ha->if_link); 5628 5629 prev_link_state = ha->link_up; 5630 ha->link_up = ha->if_link.link_up; 5631 5632 if (prev_link_state != ha->link_up) { 5633 if (ha->link_up) { 5634 if_link_state_change(ha->ifp, LINK_STATE_UP); 5635 } else { 5636 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 5637 } 5638 } 5639 #ifndef QLNX_VF 5640 #ifdef CONFIG_ECORE_SRIOV 5641 5642 if (qlnx_vf_device(ha) != 0) { 5643 if (ha->sriov_initialized) 5644 qlnx_inform_vf_link_state(p_hwfn, ha); 5645 } 5646 5647 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 5648 #endif /* #ifdef QLNX_VF */ 5649 5650 return; 5651 } 5652 5653 static void 5654 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn, 5655 struct ecore_vf_acquire_sw_info *p_sw_info) 5656 { 5657 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) | 5658 (QLNX_VERSION_MINOR << 16) | 5659 QLNX_VERSION_BUILD; 5660 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD; 5661 5662 return; 5663 } 5664 5665 void 5666 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req, 5667 void *p_sw_info) 5668 { 5669 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info); 5670 5671 return; 5672 } 5673 5674 void 5675 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn, 5676 struct qlnx_link_output *if_link) 5677 { 5678 struct ecore_mcp_link_params link_params; 5679 struct ecore_mcp_link_state link_state; 5680 uint8_t p_change; 5681 struct ecore_ptt *p_ptt = NULL; 5682 5683 5684 memset(if_link, 0, sizeof(*if_link)); 5685 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params)); 5686 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state)); 5687 5688 ha = (qlnx_host_t *)hwfn->p_dev; 5689 5690 /* Prepare source inputs */ 5691 /* we only deal with physical functions */ 5692 if (qlnx_vf_device(ha) != 0) { 5693 5694 p_ptt = ecore_ptt_acquire(hwfn); 5695 5696 if (p_ptt == NULL) { 5697 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 5698 return; 5699 } 5700 5701 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type); 5702 ecore_ptt_release(hwfn, p_ptt); 5703 5704 memcpy(&link_params, ecore_mcp_get_link_params(hwfn), 5705 sizeof(link_params)); 5706 memcpy(&link_state, ecore_mcp_get_link_state(hwfn), 5707 sizeof(link_state)); 5708 } else { 5709 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type); 5710 ecore_vf_read_bulletin(hwfn, &p_change); 5711 ecore_vf_get_link_params(hwfn, &link_params); 5712 ecore_vf_get_link_state(hwfn, &link_state); 5713 } 5714 5715 /* Set the link parameters to pass to protocol driver */ 5716 if (link_state.link_up) { 5717 if_link->link_up = true; 5718 if_link->speed = link_state.speed; 5719 } 5720 5721 if_link->supported_caps = QLNX_LINK_CAP_FIBRE; 5722 5723 if (link_params.speed.autoneg) 5724 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg; 5725 5726 if (link_params.pause.autoneg || 5727 (link_params.pause.forced_rx && link_params.pause.forced_tx)) 5728 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause; 5729 5730 if (link_params.pause.autoneg || link_params.pause.forced_rx || 5731 link_params.pause.forced_tx) 5732 if_link->supported_caps |= QLNX_LINK_CAP_Pause; 5733 5734 if (link_params.speed.advertised_speeds & 5735 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 5736 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half | 5737 QLNX_LINK_CAP_1000baseT_Full; 5738 5739 if (link_params.speed.advertised_speeds & 5740 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 5741 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5742 5743 if (link_params.speed.advertised_speeds & 5744 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 5745 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5746 5747 if (link_params.speed.advertised_speeds & 5748 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) 5749 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5750 5751 if (link_params.speed.advertised_speeds & 5752 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 5753 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5754 5755 if (link_params.speed.advertised_speeds & 5756 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 5757 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5758 5759 if_link->advertised_caps = if_link->supported_caps; 5760 5761 if_link->autoneg = link_params.speed.autoneg; 5762 if_link->duplex = QLNX_LINK_DUPLEX; 5763 5764 /* Link partner capabilities */ 5765 5766 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD) 5767 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half; 5768 5769 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD) 5770 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full; 5771 5772 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G) 5773 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5774 5775 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G) 5776 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5777 5778 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G) 5779 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5780 5781 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G) 5782 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5783 5784 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G) 5785 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5786 5787 if (link_state.an_complete) 5788 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg; 5789 5790 if (link_state.partner_adv_pause) 5791 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause; 5792 5793 if ((link_state.partner_adv_pause == 5794 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) || 5795 (link_state.partner_adv_pause == 5796 ECORE_LINK_PARTNER_BOTH_PAUSE)) 5797 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause; 5798 5799 return; 5800 } 5801 5802 void 5803 qlnx_schedule_recovery(void *p_hwfn) 5804 { 5805 qlnx_host_t *ha; 5806 5807 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5808 5809 if (qlnx_vf_device(ha) != 0) { 5810 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 5811 } 5812 5813 return; 5814 } 5815 5816 static int 5817 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params) 5818 { 5819 int rc, i; 5820 5821 for (i = 0; i < cdev->num_hwfns; i++) { 5822 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 5823 p_hwfn->pf_params = *func_params; 5824 5825 #ifdef QLNX_ENABLE_IWARP 5826 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) { 5827 p_hwfn->using_ll2 = true; 5828 } 5829 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5830 5831 } 5832 5833 rc = ecore_resc_alloc(cdev); 5834 if (rc) 5835 goto qlnx_nic_setup_exit; 5836 5837 ecore_resc_setup(cdev); 5838 5839 qlnx_nic_setup_exit: 5840 5841 return rc; 5842 } 5843 5844 static int 5845 qlnx_nic_start(struct ecore_dev *cdev) 5846 { 5847 int rc; 5848 struct ecore_hw_init_params params; 5849 5850 bzero(¶ms, sizeof (struct ecore_hw_init_params)); 5851 5852 params.p_tunn = NULL; 5853 params.b_hw_start = true; 5854 params.int_mode = cdev->int_mode; 5855 params.allow_npar_tx_switch = true; 5856 params.bin_fw_data = NULL; 5857 5858 rc = ecore_hw_init(cdev, ¶ms); 5859 if (rc) { 5860 ecore_resc_free(cdev); 5861 return rc; 5862 } 5863 5864 return 0; 5865 } 5866 5867 static int 5868 qlnx_slowpath_start(qlnx_host_t *ha) 5869 { 5870 struct ecore_dev *cdev; 5871 struct ecore_pf_params pf_params; 5872 int rc; 5873 5874 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 5875 pf_params.eth_pf_params.num_cons = 5876 (ha->num_rss) * (ha->num_tc + 1); 5877 5878 #ifdef QLNX_ENABLE_IWARP 5879 if (qlnx_vf_device(ha) != 0) { 5880 if(ha->personality == ECORE_PCI_ETH_IWARP) { 5881 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n"); 5882 pf_params.rdma_pf_params.num_qps = 1024; 5883 pf_params.rdma_pf_params.num_srqs = 1024; 5884 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5885 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP; 5886 } else if(ha->personality == ECORE_PCI_ETH_ROCE) { 5887 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n"); 5888 pf_params.rdma_pf_params.num_qps = 8192; 5889 pf_params.rdma_pf_params.num_srqs = 8192; 5890 //pf_params.rdma_pf_params.min_dpis = 0; 5891 pf_params.rdma_pf_params.min_dpis = 8; 5892 pf_params.rdma_pf_params.roce_edpm_mode = 0; 5893 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5894 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE; 5895 } 5896 } 5897 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5898 5899 cdev = &ha->cdev; 5900 5901 rc = qlnx_nic_setup(cdev, &pf_params); 5902 if (rc) 5903 goto qlnx_slowpath_start_exit; 5904 5905 cdev->int_mode = ECORE_INT_MODE_MSIX; 5906 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; 5907 5908 #ifdef QLNX_MAX_COALESCE 5909 cdev->rx_coalesce_usecs = 255; 5910 cdev->tx_coalesce_usecs = 255; 5911 #endif 5912 5913 rc = qlnx_nic_start(cdev); 5914 5915 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs; 5916 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs; 5917 5918 #ifdef QLNX_USER_LLDP 5919 (void)qlnx_set_lldp_tlvx(ha, NULL); 5920 #endif /* #ifdef QLNX_USER_LLDP */ 5921 5922 qlnx_slowpath_start_exit: 5923 5924 return (rc); 5925 } 5926 5927 static int 5928 qlnx_slowpath_stop(qlnx_host_t *ha) 5929 { 5930 struct ecore_dev *cdev; 5931 device_t dev = ha->pci_dev; 5932 int i; 5933 5934 cdev = &ha->cdev; 5935 5936 ecore_hw_stop(cdev); 5937 5938 for (i = 0; i < ha->cdev.num_hwfns; i++) { 5939 5940 if (ha->sp_handle[i]) 5941 (void)bus_teardown_intr(dev, ha->sp_irq[i], 5942 ha->sp_handle[i]); 5943 5944 ha->sp_handle[i] = NULL; 5945 5946 if (ha->sp_irq[i]) 5947 (void) bus_release_resource(dev, SYS_RES_IRQ, 5948 ha->sp_irq_rid[i], ha->sp_irq[i]); 5949 ha->sp_irq[i] = NULL; 5950 } 5951 5952 ecore_resc_free(cdev); 5953 5954 return 0; 5955 } 5956 5957 static void 5958 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 5959 char ver_str[VER_SIZE]) 5960 { 5961 int i; 5962 5963 memcpy(cdev->name, name, NAME_SIZE); 5964 5965 for_each_hwfn(cdev, i) { 5966 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 5967 } 5968 5969 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD; 5970 5971 return ; 5972 } 5973 5974 void 5975 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats) 5976 { 5977 enum ecore_mcp_protocol_type type; 5978 union ecore_mcp_protocol_stats *stats; 5979 struct ecore_eth_stats eth_stats; 5980 qlnx_host_t *ha; 5981 5982 ha = cdev; 5983 stats = proto_stats; 5984 type = proto_type; 5985 5986 switch (type) { 5987 5988 case ECORE_MCP_LAN_STATS: 5989 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats); 5990 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; 5991 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts; 5992 stats->lan_stats.fcs_err = -1; 5993 break; 5994 5995 default: 5996 ha->err_get_proto_invalid_type++; 5997 5998 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type); 5999 break; 6000 } 6001 return; 6002 } 6003 6004 static int 6005 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver) 6006 { 6007 struct ecore_hwfn *p_hwfn; 6008 struct ecore_ptt *p_ptt; 6009 6010 p_hwfn = &ha->cdev.hwfns[0]; 6011 p_ptt = ecore_ptt_acquire(p_hwfn); 6012 6013 if (p_ptt == NULL) { 6014 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 6015 return (-1); 6016 } 6017 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL); 6018 6019 ecore_ptt_release(p_hwfn, p_ptt); 6020 6021 return (0); 6022 } 6023 6024 static int 6025 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size) 6026 { 6027 struct ecore_hwfn *p_hwfn; 6028 struct ecore_ptt *p_ptt; 6029 6030 p_hwfn = &ha->cdev.hwfns[0]; 6031 p_ptt = ecore_ptt_acquire(p_hwfn); 6032 6033 if (p_ptt == NULL) { 6034 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n"); 6035 return (-1); 6036 } 6037 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size); 6038 6039 ecore_ptt_release(p_hwfn, p_ptt); 6040 6041 return (0); 6042 } 6043 6044 static int 6045 qlnx_alloc_mem_arrays(qlnx_host_t *ha) 6046 { 6047 struct ecore_dev *cdev; 6048 6049 cdev = &ha->cdev; 6050 6051 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS)); 6052 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS)); 6053 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS)); 6054 6055 return 0; 6056 } 6057 6058 static void 6059 qlnx_init_fp(qlnx_host_t *ha) 6060 { 6061 int rss_id, txq_array_index, tc; 6062 6063 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 6064 6065 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 6066 6067 fp->rss_id = rss_id; 6068 fp->edev = ha; 6069 fp->sb_info = &ha->sb_array[rss_id]; 6070 fp->rxq = &ha->rxq_array[rss_id]; 6071 fp->rxq->rxq_id = rss_id; 6072 6073 for (tc = 0; tc < ha->num_tc; tc++) { 6074 txq_array_index = tc * ha->num_rss + rss_id; 6075 fp->txq[tc] = &ha->txq_array[txq_array_index]; 6076 fp->txq[tc]->index = txq_array_index; 6077 } 6078 6079 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str, 6080 rss_id); 6081 6082 fp->tx_ring_full = 0; 6083 6084 /* reset all the statistics counters */ 6085 6086 fp->tx_pkts_processed = 0; 6087 fp->tx_pkts_freed = 0; 6088 fp->tx_pkts_transmitted = 0; 6089 fp->tx_pkts_completed = 0; 6090 6091 #ifdef QLNX_TRACE_PERF_DATA 6092 fp->tx_pkts_trans_ctx = 0; 6093 fp->tx_pkts_compl_ctx = 0; 6094 fp->tx_pkts_trans_fp = 0; 6095 fp->tx_pkts_compl_fp = 0; 6096 fp->tx_pkts_compl_intr = 0; 6097 #endif 6098 fp->tx_lso_wnd_min_len = 0; 6099 fp->tx_defrag = 0; 6100 fp->tx_nsegs_gt_elem_left = 0; 6101 fp->tx_tso_max_nsegs = 0; 6102 fp->tx_tso_min_nsegs = 0; 6103 fp->err_tx_nsegs_gt_elem_left = 0; 6104 fp->err_tx_dmamap_create = 0; 6105 fp->err_tx_defrag_dmamap_load = 0; 6106 fp->err_tx_non_tso_max_seg = 0; 6107 fp->err_tx_dmamap_load = 0; 6108 fp->err_tx_defrag = 0; 6109 fp->err_tx_free_pkt_null = 0; 6110 fp->err_tx_cons_idx_conflict = 0; 6111 6112 fp->rx_pkts = 0; 6113 fp->err_m_getcl = 0; 6114 fp->err_m_getjcl = 0; 6115 } 6116 return; 6117 } 6118 6119 void 6120 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info) 6121 { 6122 struct ecore_dev *cdev; 6123 6124 cdev = &ha->cdev; 6125 6126 if (sb_info->sb_virt) { 6127 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt), 6128 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt))); 6129 sb_info->sb_virt = NULL; 6130 } 6131 } 6132 6133 static int 6134 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info, 6135 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id) 6136 { 6137 struct ecore_hwfn *p_hwfn; 6138 int hwfn_index, rc; 6139 u16 rel_sb_id; 6140 6141 hwfn_index = sb_id % cdev->num_hwfns; 6142 p_hwfn = &cdev->hwfns[hwfn_index]; 6143 rel_sb_id = sb_id / cdev->num_hwfns; 6144 6145 QL_DPRINT2(((qlnx_host_t *)cdev), 6146 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \ 6147 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n", 6148 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info, 6149 sb_virt_addr, (void *)sb_phy_addr); 6150 6151 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, 6152 sb_virt_addr, sb_phy_addr, rel_sb_id); 6153 6154 return rc; 6155 } 6156 6157 /* This function allocates fast-path status block memory */ 6158 int 6159 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id) 6160 { 6161 struct status_block_e4 *sb_virt; 6162 bus_addr_t sb_phys; 6163 int rc; 6164 uint32_t size; 6165 struct ecore_dev *cdev; 6166 6167 cdev = &ha->cdev; 6168 6169 size = sizeof(*sb_virt); 6170 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size); 6171 6172 if (!sb_virt) { 6173 QL_DPRINT1(ha, "Status block allocation failed\n"); 6174 return -ENOMEM; 6175 } 6176 6177 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id); 6178 if (rc) { 6179 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size); 6180 } 6181 6182 return rc; 6183 } 6184 6185 static void 6186 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6187 { 6188 int i; 6189 struct sw_rx_data *rx_buf; 6190 6191 for (i = 0; i < rxq->num_rx_buffers; i++) { 6192 6193 rx_buf = &rxq->sw_rx_ring[i]; 6194 6195 if (rx_buf->data != NULL) { 6196 if (rx_buf->map != NULL) { 6197 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 6198 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 6199 rx_buf->map = NULL; 6200 } 6201 m_freem(rx_buf->data); 6202 rx_buf->data = NULL; 6203 } 6204 } 6205 return; 6206 } 6207 6208 static void 6209 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6210 { 6211 struct ecore_dev *cdev; 6212 int i; 6213 6214 cdev = &ha->cdev; 6215 6216 qlnx_free_rx_buffers(ha, rxq); 6217 6218 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 6219 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]); 6220 if (rxq->tpa_info[i].mpf != NULL) 6221 m_freem(rxq->tpa_info[i].mpf); 6222 } 6223 6224 bzero((void *)&rxq->sw_rx_ring[0], 6225 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 6226 6227 /* Free the real RQ ring used by FW */ 6228 if (rxq->rx_bd_ring.p_virt_addr) { 6229 ecore_chain_free(cdev, &rxq->rx_bd_ring); 6230 rxq->rx_bd_ring.p_virt_addr = NULL; 6231 } 6232 6233 /* Free the real completion ring used by FW */ 6234 if (rxq->rx_comp_ring.p_virt_addr && 6235 rxq->rx_comp_ring.pbl_sp.p_virt_table) { 6236 ecore_chain_free(cdev, &rxq->rx_comp_ring); 6237 rxq->rx_comp_ring.p_virt_addr = NULL; 6238 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL; 6239 } 6240 6241 #ifdef QLNX_SOFT_LRO 6242 { 6243 struct lro_ctrl *lro; 6244 6245 lro = &rxq->lro; 6246 tcp_lro_free(lro); 6247 } 6248 #endif /* #ifdef QLNX_SOFT_LRO */ 6249 6250 return; 6251 } 6252 6253 static int 6254 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6255 { 6256 register struct mbuf *mp; 6257 uint16_t rx_buf_size; 6258 struct sw_rx_data *sw_rx_data; 6259 struct eth_rx_bd *rx_bd; 6260 dma_addr_t dma_addr; 6261 bus_dmamap_t map; 6262 bus_dma_segment_t segs[1]; 6263 int nsegs; 6264 int ret; 6265 struct ecore_dev *cdev; 6266 6267 cdev = &ha->cdev; 6268 6269 rx_buf_size = rxq->rx_buf_size; 6270 6271 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6272 6273 if (mp == NULL) { 6274 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6275 return -ENOMEM; 6276 } 6277 6278 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6279 6280 map = (bus_dmamap_t)0; 6281 6282 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6283 BUS_DMA_NOWAIT); 6284 dma_addr = segs[0].ds_addr; 6285 6286 if (ret || !dma_addr || (nsegs != 1)) { 6287 m_freem(mp); 6288 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6289 ret, (long long unsigned int)dma_addr, nsegs); 6290 return -ENOMEM; 6291 } 6292 6293 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6294 sw_rx_data->data = mp; 6295 sw_rx_data->dma_addr = dma_addr; 6296 sw_rx_data->map = map; 6297 6298 /* Advance PROD and get BD pointer */ 6299 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); 6300 rx_bd->addr.hi = htole32(U64_HI(dma_addr)); 6301 rx_bd->addr.lo = htole32(U64_LO(dma_addr)); 6302 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6303 6304 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6305 6306 return 0; 6307 } 6308 6309 static int 6310 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 6311 struct qlnx_agg_info *tpa) 6312 { 6313 struct mbuf *mp; 6314 dma_addr_t dma_addr; 6315 bus_dmamap_t map; 6316 bus_dma_segment_t segs[1]; 6317 int nsegs; 6318 int ret; 6319 struct sw_rx_data *rx_buf; 6320 6321 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6322 6323 if (mp == NULL) { 6324 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6325 return -ENOMEM; 6326 } 6327 6328 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6329 6330 map = (bus_dmamap_t)0; 6331 6332 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6333 BUS_DMA_NOWAIT); 6334 dma_addr = segs[0].ds_addr; 6335 6336 if (ret || !dma_addr || (nsegs != 1)) { 6337 m_freem(mp); 6338 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6339 ret, (long long unsigned int)dma_addr, nsegs); 6340 return -ENOMEM; 6341 } 6342 6343 rx_buf = &tpa->rx_buf; 6344 6345 memset(rx_buf, 0, sizeof (struct sw_rx_data)); 6346 6347 rx_buf->data = mp; 6348 rx_buf->dma_addr = dma_addr; 6349 rx_buf->map = map; 6350 6351 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6352 6353 return (0); 6354 } 6355 6356 static void 6357 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa) 6358 { 6359 struct sw_rx_data *rx_buf; 6360 6361 rx_buf = &tpa->rx_buf; 6362 6363 if (rx_buf->data != NULL) { 6364 if (rx_buf->map != NULL) { 6365 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 6366 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 6367 rx_buf->map = NULL; 6368 } 6369 m_freem(rx_buf->data); 6370 rx_buf->data = NULL; 6371 } 6372 return; 6373 } 6374 6375 /* This function allocates all memory needed per Rx queue */ 6376 static int 6377 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6378 { 6379 int i, rc, num_allocated; 6380 struct ifnet *ifp; 6381 struct ecore_dev *cdev; 6382 6383 cdev = &ha->cdev; 6384 ifp = ha->ifp; 6385 6386 rxq->num_rx_buffers = RX_RING_SIZE; 6387 6388 rxq->rx_buf_size = ha->rx_buf_size; 6389 6390 /* Allocate the parallel driver ring for Rx buffers */ 6391 bzero((void *)&rxq->sw_rx_ring[0], 6392 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 6393 6394 /* Allocate FW Rx ring */ 6395 6396 rc = ecore_chain_alloc(cdev, 6397 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6398 ECORE_CHAIN_MODE_NEXT_PTR, 6399 ECORE_CHAIN_CNT_TYPE_U16, 6400 RX_RING_SIZE, 6401 sizeof(struct eth_rx_bd), 6402 &rxq->rx_bd_ring, NULL); 6403 6404 if (rc) 6405 goto err; 6406 6407 /* Allocate FW completion ring */ 6408 rc = ecore_chain_alloc(cdev, 6409 ECORE_CHAIN_USE_TO_CONSUME, 6410 ECORE_CHAIN_MODE_PBL, 6411 ECORE_CHAIN_CNT_TYPE_U16, 6412 RX_RING_SIZE, 6413 sizeof(union eth_rx_cqe), 6414 &rxq->rx_comp_ring, NULL); 6415 6416 if (rc) 6417 goto err; 6418 6419 /* Allocate buffers for the Rx ring */ 6420 6421 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 6422 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size, 6423 &rxq->tpa_info[i]); 6424 if (rc) 6425 break; 6426 6427 } 6428 6429 for (i = 0; i < rxq->num_rx_buffers; i++) { 6430 rc = qlnx_alloc_rx_buffer(ha, rxq); 6431 if (rc) 6432 break; 6433 } 6434 num_allocated = i; 6435 if (!num_allocated) { 6436 QL_DPRINT1(ha, "Rx buffers allocation failed\n"); 6437 goto err; 6438 } else if (num_allocated < rxq->num_rx_buffers) { 6439 QL_DPRINT1(ha, "Allocated less buffers than" 6440 " desired (%d allocated)\n", num_allocated); 6441 } 6442 6443 #ifdef QLNX_SOFT_LRO 6444 6445 { 6446 struct lro_ctrl *lro; 6447 6448 lro = &rxq->lro; 6449 6450 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 6451 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) { 6452 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 6453 rxq->rxq_id); 6454 goto err; 6455 } 6456 #else 6457 if (tcp_lro_init(lro)) { 6458 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 6459 rxq->rxq_id); 6460 goto err; 6461 } 6462 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 6463 6464 lro->ifp = ha->ifp; 6465 } 6466 #endif /* #ifdef QLNX_SOFT_LRO */ 6467 return 0; 6468 6469 err: 6470 qlnx_free_mem_rxq(ha, rxq); 6471 return -ENOMEM; 6472 } 6473 6474 6475 static void 6476 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6477 struct qlnx_tx_queue *txq) 6478 { 6479 struct ecore_dev *cdev; 6480 6481 cdev = &ha->cdev; 6482 6483 bzero((void *)&txq->sw_tx_ring[0], 6484 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6485 6486 /* Free the real RQ ring used by FW */ 6487 if (txq->tx_pbl.p_virt_addr) { 6488 ecore_chain_free(cdev, &txq->tx_pbl); 6489 txq->tx_pbl.p_virt_addr = NULL; 6490 } 6491 return; 6492 } 6493 6494 /* This function allocates all memory needed per Tx queue */ 6495 static int 6496 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6497 struct qlnx_tx_queue *txq) 6498 { 6499 int ret = ECORE_SUCCESS; 6500 union eth_tx_bd_types *p_virt; 6501 struct ecore_dev *cdev; 6502 6503 cdev = &ha->cdev; 6504 6505 bzero((void *)&txq->sw_tx_ring[0], 6506 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6507 6508 /* Allocate the real Tx ring to be used by FW */ 6509 ret = ecore_chain_alloc(cdev, 6510 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6511 ECORE_CHAIN_MODE_PBL, 6512 ECORE_CHAIN_CNT_TYPE_U16, 6513 TX_RING_SIZE, 6514 sizeof(*p_virt), 6515 &txq->tx_pbl, NULL); 6516 6517 if (ret != ECORE_SUCCESS) { 6518 goto err; 6519 } 6520 6521 txq->num_tx_buffers = TX_RING_SIZE; 6522 6523 return 0; 6524 6525 err: 6526 qlnx_free_mem_txq(ha, fp, txq); 6527 return -ENOMEM; 6528 } 6529 6530 static void 6531 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6532 { 6533 struct mbuf *mp; 6534 struct ifnet *ifp = ha->ifp; 6535 6536 if (mtx_initialized(&fp->tx_mtx)) { 6537 6538 if (fp->tx_br != NULL) { 6539 6540 mtx_lock(&fp->tx_mtx); 6541 6542 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 6543 fp->tx_pkts_freed++; 6544 m_freem(mp); 6545 } 6546 6547 mtx_unlock(&fp->tx_mtx); 6548 6549 buf_ring_free(fp->tx_br, M_DEVBUF); 6550 fp->tx_br = NULL; 6551 } 6552 mtx_destroy(&fp->tx_mtx); 6553 } 6554 return; 6555 } 6556 6557 static void 6558 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6559 { 6560 int tc; 6561 6562 qlnx_free_mem_sb(ha, fp->sb_info); 6563 6564 qlnx_free_mem_rxq(ha, fp->rxq); 6565 6566 for (tc = 0; tc < ha->num_tc; tc++) 6567 qlnx_free_mem_txq(ha, fp, fp->txq[tc]); 6568 6569 return; 6570 } 6571 6572 static int 6573 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6574 { 6575 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 6576 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id); 6577 6578 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 6579 6580 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF, 6581 M_NOWAIT, &fp->tx_mtx); 6582 if (fp->tx_br == NULL) { 6583 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n", 6584 ha->dev_unit, fp->rss_id); 6585 return -ENOMEM; 6586 } 6587 return 0; 6588 } 6589 6590 static int 6591 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6592 { 6593 int rc, tc; 6594 6595 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id); 6596 if (rc) 6597 goto err; 6598 6599 if (ha->rx_jumbo_buf_eq_mtu) { 6600 if (ha->max_frame_size <= MCLBYTES) 6601 ha->rx_buf_size = MCLBYTES; 6602 else if (ha->max_frame_size <= MJUMPAGESIZE) 6603 ha->rx_buf_size = MJUMPAGESIZE; 6604 else if (ha->max_frame_size <= MJUM9BYTES) 6605 ha->rx_buf_size = MJUM9BYTES; 6606 else if (ha->max_frame_size <= MJUM16BYTES) 6607 ha->rx_buf_size = MJUM16BYTES; 6608 } else { 6609 if (ha->max_frame_size <= MCLBYTES) 6610 ha->rx_buf_size = MCLBYTES; 6611 else 6612 ha->rx_buf_size = MJUMPAGESIZE; 6613 } 6614 6615 rc = qlnx_alloc_mem_rxq(ha, fp->rxq); 6616 if (rc) 6617 goto err; 6618 6619 for (tc = 0; tc < ha->num_tc; tc++) { 6620 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]); 6621 if (rc) 6622 goto err; 6623 } 6624 6625 return 0; 6626 6627 err: 6628 qlnx_free_mem_fp(ha, fp); 6629 return -ENOMEM; 6630 } 6631 6632 static void 6633 qlnx_free_mem_load(qlnx_host_t *ha) 6634 { 6635 int i; 6636 struct ecore_dev *cdev; 6637 6638 cdev = &ha->cdev; 6639 6640 for (i = 0; i < ha->num_rss; i++) { 6641 struct qlnx_fastpath *fp = &ha->fp_array[i]; 6642 6643 qlnx_free_mem_fp(ha, fp); 6644 } 6645 return; 6646 } 6647 6648 static int 6649 qlnx_alloc_mem_load(qlnx_host_t *ha) 6650 { 6651 int rc = 0, rss_id; 6652 6653 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 6654 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 6655 6656 rc = qlnx_alloc_mem_fp(ha, fp); 6657 if (rc) 6658 break; 6659 } 6660 return (rc); 6661 } 6662 6663 static int 6664 qlnx_start_vport(struct ecore_dev *cdev, 6665 u8 vport_id, 6666 u16 mtu, 6667 u8 drop_ttl0_flg, 6668 u8 inner_vlan_removal_en_flg, 6669 u8 tx_switching, 6670 u8 hw_lro_enable) 6671 { 6672 int rc, i; 6673 struct ecore_sp_vport_start_params vport_start_params = { 0 }; 6674 qlnx_host_t *ha; 6675 6676 ha = (qlnx_host_t *)cdev; 6677 6678 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg; 6679 vport_start_params.tx_switching = 0; 6680 vport_start_params.handle_ptp_pkts = 0; 6681 vport_start_params.only_untagged = 0; 6682 vport_start_params.drop_ttl0 = drop_ttl0_flg; 6683 6684 vport_start_params.tpa_mode = 6685 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE); 6686 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6687 6688 vport_start_params.vport_id = vport_id; 6689 vport_start_params.mtu = mtu; 6690 6691 6692 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id); 6693 6694 for_each_hwfn(cdev, i) { 6695 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6696 6697 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid; 6698 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6699 6700 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params); 6701 6702 if (rc) { 6703 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d" 6704 " with MTU %d\n" , vport_id, mtu); 6705 return -ENOMEM; 6706 } 6707 6708 ecore_hw_start_fastpath(p_hwfn); 6709 6710 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n", 6711 vport_id, mtu); 6712 } 6713 return 0; 6714 } 6715 6716 6717 static int 6718 qlnx_update_vport(struct ecore_dev *cdev, 6719 struct qlnx_update_vport_params *params) 6720 { 6721 struct ecore_sp_vport_update_params sp_params; 6722 int rc, i, j, fp_index; 6723 struct ecore_hwfn *p_hwfn; 6724 struct ecore_rss_params *rss; 6725 qlnx_host_t *ha = (qlnx_host_t *)cdev; 6726 struct qlnx_fastpath *fp; 6727 6728 memset(&sp_params, 0, sizeof(sp_params)); 6729 /* Translate protocol params into sp params */ 6730 sp_params.vport_id = params->vport_id; 6731 6732 sp_params.update_vport_active_rx_flg = 6733 params->update_vport_active_rx_flg; 6734 sp_params.vport_active_rx_flg = params->vport_active_rx_flg; 6735 6736 sp_params.update_vport_active_tx_flg = 6737 params->update_vport_active_tx_flg; 6738 sp_params.vport_active_tx_flg = params->vport_active_tx_flg; 6739 6740 sp_params.update_inner_vlan_removal_flg = 6741 params->update_inner_vlan_removal_flg; 6742 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; 6743 6744 sp_params.sge_tpa_params = params->sge_tpa_params; 6745 6746 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 6747 * We need to re-fix the rss values per engine for CMT. 6748 */ 6749 if (params->rss_params->update_rss_config) 6750 sp_params.rss_params = params->rss_params; 6751 else 6752 sp_params.rss_params = NULL; 6753 6754 for_each_hwfn(cdev, i) { 6755 6756 p_hwfn = &cdev->hwfns[i]; 6757 6758 if ((cdev->num_hwfns > 1) && 6759 params->rss_params->update_rss_config && 6760 params->rss_params->rss_enable) { 6761 6762 rss = params->rss_params; 6763 6764 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) { 6765 6766 fp_index = ((cdev->num_hwfns * j) + i) % 6767 ha->num_rss; 6768 6769 fp = &ha->fp_array[fp_index]; 6770 rss->rss_ind_table[j] = fp->rxq->handle; 6771 } 6772 6773 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) { 6774 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n", 6775 rss->rss_ind_table[j], 6776 rss->rss_ind_table[j+1], 6777 rss->rss_ind_table[j+2], 6778 rss->rss_ind_table[j+3], 6779 rss->rss_ind_table[j+4], 6780 rss->rss_ind_table[j+5], 6781 rss->rss_ind_table[j+6], 6782 rss->rss_ind_table[j+7]); 6783 j += 8; 6784 } 6785 } 6786 6787 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6788 6789 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id); 6790 6791 rc = ecore_sp_vport_update(p_hwfn, &sp_params, 6792 ECORE_SPQ_MODE_EBLOCK, NULL); 6793 if (rc) { 6794 QL_DPRINT1(ha, "Failed to update VPORT\n"); 6795 return rc; 6796 } 6797 6798 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \ 6799 rx_active_flag %d [tx_update %d], [rx_update %d]\n", 6800 params->vport_id, params->vport_active_tx_flg, 6801 params->vport_active_rx_flg, 6802 params->update_vport_active_tx_flg, 6803 params->update_vport_active_rx_flg); 6804 } 6805 6806 return 0; 6807 } 6808 6809 static void 6810 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq) 6811 { 6812 struct eth_rx_bd *rx_bd_cons = 6813 ecore_chain_consume(&rxq->rx_bd_ring); 6814 struct eth_rx_bd *rx_bd_prod = 6815 ecore_chain_produce(&rxq->rx_bd_ring); 6816 struct sw_rx_data *sw_rx_data_cons = 6817 &rxq->sw_rx_ring[rxq->sw_rx_cons]; 6818 struct sw_rx_data *sw_rx_data_prod = 6819 &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6820 6821 sw_rx_data_prod->data = sw_rx_data_cons->data; 6822 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); 6823 6824 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 6825 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6826 6827 return; 6828 } 6829 6830 static void 6831 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq) 6832 { 6833 6834 uint16_t bd_prod; 6835 uint16_t cqe_prod; 6836 union { 6837 struct eth_rx_prod_data rx_prod_data; 6838 uint32_t data32; 6839 } rx_prods; 6840 6841 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); 6842 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); 6843 6844 /* Update producers */ 6845 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod); 6846 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod); 6847 6848 /* Make sure that the BD and SGE data is updated before updating the 6849 * producers since FW might read the BD/SGE right after the producer 6850 * is updated. 6851 */ 6852 wmb(); 6853 6854 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr, 6855 sizeof(rx_prods), &rx_prods.data32); 6856 6857 /* mmiowb is needed to synchronize doorbell writes from more than one 6858 * processor. It guarantees that the write arrives to the device before 6859 * the napi lock is released and another qlnx_poll is called (possibly 6860 * on another CPU). Without this barrier, the next doorbell can bypass 6861 * this doorbell. This is applicable to IA64/Altix systems. 6862 */ 6863 wmb(); 6864 6865 return; 6866 } 6867 6868 static uint32_t qlnx_hash_key[] = { 6869 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda), 6870 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2), 6871 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d), 6872 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0), 6873 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb), 6874 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4), 6875 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3), 6876 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c), 6877 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b), 6878 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)}; 6879 6880 static int 6881 qlnx_start_queues(qlnx_host_t *ha) 6882 { 6883 int rc, tc, i, vport_id = 0, 6884 drop_ttl0_flg = 1, vlan_removal_en = 1, 6885 tx_switching = 0, hw_lro_enable = 0; 6886 struct ecore_dev *cdev = &ha->cdev; 6887 struct ecore_rss_params *rss_params = &ha->rss_params; 6888 struct qlnx_update_vport_params vport_update_params; 6889 struct ifnet *ifp; 6890 struct ecore_hwfn *p_hwfn; 6891 struct ecore_sge_tpa_params tpa_params; 6892 struct ecore_queue_start_common_params qparams; 6893 struct qlnx_fastpath *fp; 6894 6895 ifp = ha->ifp; 6896 6897 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss); 6898 6899 if (!ha->num_rss) { 6900 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there" 6901 " are no Rx queues\n"); 6902 return -EINVAL; 6903 } 6904 6905 #ifndef QLNX_SOFT_LRO 6906 hw_lro_enable = ifp->if_capenable & IFCAP_LRO; 6907 #endif /* #ifndef QLNX_SOFT_LRO */ 6908 6909 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg, 6910 vlan_removal_en, tx_switching, hw_lro_enable); 6911 6912 if (rc) { 6913 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc); 6914 return rc; 6915 } 6916 6917 QL_DPRINT2(ha, "Start vport ramrod passed, " 6918 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n", 6919 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en); 6920 6921 for_each_rss(i) { 6922 struct ecore_rxq_start_ret_params rx_ret_params; 6923 struct ecore_txq_start_ret_params tx_ret_params; 6924 6925 fp = &ha->fp_array[i]; 6926 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)]; 6927 6928 bzero(&qparams, sizeof(struct ecore_queue_start_common_params)); 6929 bzero(&rx_ret_params, 6930 sizeof (struct ecore_rxq_start_ret_params)); 6931 6932 qparams.queue_id = i ; 6933 qparams.vport_id = vport_id; 6934 qparams.stats_id = vport_id; 6935 qparams.p_sb = fp->sb_info; 6936 qparams.sb_idx = RX_PI; 6937 6938 6939 rc = ecore_eth_rx_queue_start(p_hwfn, 6940 p_hwfn->hw_info.opaque_fid, 6941 &qparams, 6942 fp->rxq->rx_buf_size, /* bd_max_bytes */ 6943 /* bd_chain_phys_addr */ 6944 fp->rxq->rx_bd_ring.p_phys_addr, 6945 /* cqe_pbl_addr */ 6946 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring), 6947 /* cqe_pbl_size */ 6948 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring), 6949 &rx_ret_params); 6950 6951 if (rc) { 6952 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc); 6953 return rc; 6954 } 6955 6956 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod; 6957 fp->rxq->handle = rx_ret_params.p_handle; 6958 fp->rxq->hw_cons_ptr = 6959 &fp->sb_info->sb_virt->pi_array[RX_PI]; 6960 6961 qlnx_update_rx_prod(p_hwfn, fp->rxq); 6962 6963 for (tc = 0; tc < ha->num_tc; tc++) { 6964 struct qlnx_tx_queue *txq = fp->txq[tc]; 6965 6966 bzero(&qparams, 6967 sizeof(struct ecore_queue_start_common_params)); 6968 bzero(&tx_ret_params, 6969 sizeof (struct ecore_txq_start_ret_params)); 6970 6971 qparams.queue_id = txq->index / cdev->num_hwfns ; 6972 qparams.vport_id = vport_id; 6973 qparams.stats_id = vport_id; 6974 qparams.p_sb = fp->sb_info; 6975 qparams.sb_idx = TX_PI(tc); 6976 6977 rc = ecore_eth_tx_queue_start(p_hwfn, 6978 p_hwfn->hw_info.opaque_fid, 6979 &qparams, tc, 6980 /* bd_chain_phys_addr */ 6981 ecore_chain_get_pbl_phys(&txq->tx_pbl), 6982 ecore_chain_get_page_cnt(&txq->tx_pbl), 6983 &tx_ret_params); 6984 6985 if (rc) { 6986 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n", 6987 txq->index, rc); 6988 return rc; 6989 } 6990 6991 txq->doorbell_addr = tx_ret_params.p_doorbell; 6992 txq->handle = tx_ret_params.p_handle; 6993 6994 txq->hw_cons_ptr = 6995 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; 6996 SET_FIELD(txq->tx_db.data.params, 6997 ETH_DB_DATA_DEST, DB_DEST_XCM); 6998 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, 6999 DB_AGG_CMD_SET); 7000 SET_FIELD(txq->tx_db.data.params, 7001 ETH_DB_DATA_AGG_VAL_SEL, 7002 DQ_XCM_ETH_TX_BD_PROD_CMD); 7003 7004 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 7005 } 7006 } 7007 7008 /* Fill struct with RSS params */ 7009 if (ha->num_rss > 1) { 7010 7011 rss_params->update_rss_config = 1; 7012 rss_params->rss_enable = 1; 7013 rss_params->update_rss_capabilities = 1; 7014 rss_params->update_rss_ind_table = 1; 7015 rss_params->update_rss_key = 1; 7016 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | 7017 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; 7018 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */ 7019 7020 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 7021 fp = &ha->fp_array[(i % ha->num_rss)]; 7022 rss_params->rss_ind_table[i] = fp->rxq->handle; 7023 } 7024 7025 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 7026 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i]; 7027 7028 } else { 7029 memset(rss_params, 0, sizeof(*rss_params)); 7030 } 7031 7032 7033 /* Prepare and send the vport enable */ 7034 memset(&vport_update_params, 0, sizeof(vport_update_params)); 7035 vport_update_params.vport_id = vport_id; 7036 vport_update_params.update_vport_active_tx_flg = 1; 7037 vport_update_params.vport_active_tx_flg = 1; 7038 vport_update_params.update_vport_active_rx_flg = 1; 7039 vport_update_params.vport_active_rx_flg = 1; 7040 vport_update_params.rss_params = rss_params; 7041 vport_update_params.update_inner_vlan_removal_flg = 1; 7042 vport_update_params.inner_vlan_removal_flg = 1; 7043 7044 if (hw_lro_enable) { 7045 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params)); 7046 7047 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 7048 7049 tpa_params.update_tpa_en_flg = 1; 7050 tpa_params.tpa_ipv4_en_flg = 1; 7051 tpa_params.tpa_ipv6_en_flg = 1; 7052 7053 tpa_params.update_tpa_param_flg = 1; 7054 tpa_params.tpa_pkt_split_flg = 0; 7055 tpa_params.tpa_hdr_data_split_flg = 0; 7056 tpa_params.tpa_gro_consistent_flg = 0; 7057 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 7058 tpa_params.tpa_max_size = (uint16_t)(-1); 7059 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2; 7060 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2; 7061 7062 vport_update_params.sge_tpa_params = &tpa_params; 7063 } 7064 7065 rc = qlnx_update_vport(cdev, &vport_update_params); 7066 if (rc) { 7067 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc); 7068 return rc; 7069 } 7070 7071 return 0; 7072 } 7073 7074 static int 7075 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 7076 struct qlnx_tx_queue *txq) 7077 { 7078 uint16_t hw_bd_cons; 7079 uint16_t ecore_cons_idx; 7080 7081 QL_DPRINT2(ha, "enter\n"); 7082 7083 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 7084 7085 while (hw_bd_cons != 7086 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 7087 7088 mtx_lock(&fp->tx_mtx); 7089 7090 (void)qlnx_tx_int(ha, fp, txq); 7091 7092 mtx_unlock(&fp->tx_mtx); 7093 7094 qlnx_mdelay(__func__, 2); 7095 7096 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 7097 } 7098 7099 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index); 7100 7101 return 0; 7102 } 7103 7104 static int 7105 qlnx_stop_queues(qlnx_host_t *ha) 7106 { 7107 struct qlnx_update_vport_params vport_update_params; 7108 struct ecore_dev *cdev; 7109 struct qlnx_fastpath *fp; 7110 int rc, tc, i; 7111 7112 cdev = &ha->cdev; 7113 7114 /* Disable the vport */ 7115 7116 memset(&vport_update_params, 0, sizeof(vport_update_params)); 7117 7118 vport_update_params.vport_id = 0; 7119 vport_update_params.update_vport_active_tx_flg = 1; 7120 vport_update_params.vport_active_tx_flg = 0; 7121 vport_update_params.update_vport_active_rx_flg = 1; 7122 vport_update_params.vport_active_rx_flg = 0; 7123 vport_update_params.rss_params = &ha->rss_params; 7124 vport_update_params.rss_params->update_rss_config = 0; 7125 vport_update_params.rss_params->rss_enable = 0; 7126 vport_update_params.update_inner_vlan_removal_flg = 0; 7127 vport_update_params.inner_vlan_removal_flg = 0; 7128 7129 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id); 7130 7131 rc = qlnx_update_vport(cdev, &vport_update_params); 7132 if (rc) { 7133 QL_DPRINT1(ha, "Failed to update vport\n"); 7134 return rc; 7135 } 7136 7137 /* Flush Tx queues. If needed, request drain from MCP */ 7138 for_each_rss(i) { 7139 fp = &ha->fp_array[i]; 7140 7141 for (tc = 0; tc < ha->num_tc; tc++) { 7142 struct qlnx_tx_queue *txq = fp->txq[tc]; 7143 7144 rc = qlnx_drain_txq(ha, fp, txq); 7145 if (rc) 7146 return rc; 7147 } 7148 } 7149 7150 /* Stop all Queues in reverse order*/ 7151 for (i = ha->num_rss - 1; i >= 0; i--) { 7152 7153 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)]; 7154 7155 fp = &ha->fp_array[i]; 7156 7157 /* Stop the Tx Queue(s)*/ 7158 for (tc = 0; tc < ha->num_tc; tc++) { 7159 int tx_queue_id; 7160 7161 tx_queue_id = tc * ha->num_rss + i; 7162 rc = ecore_eth_tx_queue_stop(p_hwfn, 7163 fp->txq[tc]->handle); 7164 7165 if (rc) { 7166 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n", 7167 tx_queue_id); 7168 return rc; 7169 } 7170 } 7171 7172 /* Stop the Rx Queue*/ 7173 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false, 7174 false); 7175 if (rc) { 7176 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i); 7177 return rc; 7178 } 7179 } 7180 7181 /* Stop the vport */ 7182 for_each_hwfn(cdev, i) { 7183 7184 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 7185 7186 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0); 7187 7188 if (rc) { 7189 QL_DPRINT1(ha, "Failed to stop VPORT\n"); 7190 return rc; 7191 } 7192 } 7193 7194 return rc; 7195 } 7196 7197 static int 7198 qlnx_set_ucast_rx_mac(qlnx_host_t *ha, 7199 enum ecore_filter_opcode opcode, 7200 unsigned char mac[ETH_ALEN]) 7201 { 7202 struct ecore_filter_ucast ucast; 7203 struct ecore_dev *cdev; 7204 int rc; 7205 7206 cdev = &ha->cdev; 7207 7208 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 7209 7210 ucast.opcode = opcode; 7211 ucast.type = ECORE_FILTER_MAC; 7212 ucast.is_rx_filter = 1; 7213 ucast.vport_to_add_to = 0; 7214 memcpy(&ucast.mac[0], mac, ETH_ALEN); 7215 7216 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 7217 7218 return (rc); 7219 } 7220 7221 static int 7222 qlnx_remove_all_ucast_mac(qlnx_host_t *ha) 7223 { 7224 struct ecore_filter_ucast ucast; 7225 struct ecore_dev *cdev; 7226 int rc; 7227 7228 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 7229 7230 ucast.opcode = ECORE_FILTER_REPLACE; 7231 ucast.type = ECORE_FILTER_MAC; 7232 ucast.is_rx_filter = 1; 7233 7234 cdev = &ha->cdev; 7235 7236 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 7237 7238 return (rc); 7239 } 7240 7241 static int 7242 qlnx_remove_all_mcast_mac(qlnx_host_t *ha) 7243 { 7244 struct ecore_filter_mcast *mcast; 7245 struct ecore_dev *cdev; 7246 int rc, i; 7247 7248 cdev = &ha->cdev; 7249 7250 mcast = &ha->ecore_mcast; 7251 bzero(mcast, sizeof(struct ecore_filter_mcast)); 7252 7253 mcast->opcode = ECORE_FILTER_REMOVE; 7254 7255 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 7256 7257 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] || 7258 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] || 7259 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) { 7260 7261 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN); 7262 mcast->num_mc_addrs++; 7263 } 7264 } 7265 mcast = &ha->ecore_mcast; 7266 7267 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 7268 7269 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS)); 7270 ha->nmcast = 0; 7271 7272 return (rc); 7273 } 7274 7275 static int 7276 qlnx_clean_filters(qlnx_host_t *ha) 7277 { 7278 int rc = 0; 7279 7280 /* Remove all unicast macs */ 7281 rc = qlnx_remove_all_ucast_mac(ha); 7282 if (rc) 7283 return rc; 7284 7285 /* Remove all multicast macs */ 7286 rc = qlnx_remove_all_mcast_mac(ha); 7287 if (rc) 7288 return rc; 7289 7290 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac); 7291 7292 return (rc); 7293 } 7294 7295 static int 7296 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter) 7297 { 7298 struct ecore_filter_accept_flags accept; 7299 int rc = 0; 7300 struct ecore_dev *cdev; 7301 7302 cdev = &ha->cdev; 7303 7304 bzero(&accept, sizeof(struct ecore_filter_accept_flags)); 7305 7306 accept.update_rx_mode_config = 1; 7307 accept.rx_accept_filter = filter; 7308 7309 accept.update_tx_mode_config = 1; 7310 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 7311 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST; 7312 7313 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false, 7314 ECORE_SPQ_MODE_CB, NULL); 7315 7316 return (rc); 7317 } 7318 7319 static int 7320 qlnx_set_rx_mode(qlnx_host_t *ha) 7321 { 7322 int rc = 0; 7323 uint8_t filter; 7324 7325 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac); 7326 if (rc) 7327 return rc; 7328 7329 rc = qlnx_remove_all_mcast_mac(ha); 7330 if (rc) 7331 return rc; 7332 7333 filter = ECORE_ACCEPT_UCAST_MATCHED | 7334 ECORE_ACCEPT_MCAST_MATCHED | 7335 ECORE_ACCEPT_BCAST; 7336 7337 if (qlnx_vf_device(ha) == 0) { 7338 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 7339 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 7340 } 7341 ha->filter = filter; 7342 7343 rc = qlnx_set_rx_accept_filter(ha, filter); 7344 7345 return (rc); 7346 } 7347 7348 static int 7349 qlnx_set_link(qlnx_host_t *ha, bool link_up) 7350 { 7351 int i, rc = 0; 7352 struct ecore_dev *cdev; 7353 struct ecore_hwfn *hwfn; 7354 struct ecore_ptt *ptt; 7355 7356 if (qlnx_vf_device(ha) == 0) 7357 return (0); 7358 7359 cdev = &ha->cdev; 7360 7361 for_each_hwfn(cdev, i) { 7362 7363 hwfn = &cdev->hwfns[i]; 7364 7365 ptt = ecore_ptt_acquire(hwfn); 7366 if (!ptt) 7367 return -EBUSY; 7368 7369 rc = ecore_mcp_set_link(hwfn, ptt, link_up); 7370 7371 ecore_ptt_release(hwfn, ptt); 7372 7373 if (rc) 7374 return rc; 7375 } 7376 return (rc); 7377 } 7378 7379 #if __FreeBSD_version >= 1100000 7380 static uint64_t 7381 qlnx_get_counter(if_t ifp, ift_counter cnt) 7382 { 7383 qlnx_host_t *ha; 7384 uint64_t count; 7385 7386 ha = (qlnx_host_t *)if_getsoftc(ifp); 7387 7388 switch (cnt) { 7389 7390 case IFCOUNTER_IPACKETS: 7391 count = ha->hw_stats.common.rx_ucast_pkts + 7392 ha->hw_stats.common.rx_mcast_pkts + 7393 ha->hw_stats.common.rx_bcast_pkts; 7394 break; 7395 7396 case IFCOUNTER_IERRORS: 7397 count = ha->hw_stats.common.rx_crc_errors + 7398 ha->hw_stats.common.rx_align_errors + 7399 ha->hw_stats.common.rx_oversize_packets + 7400 ha->hw_stats.common.rx_undersize_packets; 7401 break; 7402 7403 case IFCOUNTER_OPACKETS: 7404 count = ha->hw_stats.common.tx_ucast_pkts + 7405 ha->hw_stats.common.tx_mcast_pkts + 7406 ha->hw_stats.common.tx_bcast_pkts; 7407 break; 7408 7409 case IFCOUNTER_OERRORS: 7410 count = ha->hw_stats.common.tx_err_drop_pkts; 7411 break; 7412 7413 case IFCOUNTER_COLLISIONS: 7414 return (0); 7415 7416 case IFCOUNTER_IBYTES: 7417 count = ha->hw_stats.common.rx_ucast_bytes + 7418 ha->hw_stats.common.rx_mcast_bytes + 7419 ha->hw_stats.common.rx_bcast_bytes; 7420 break; 7421 7422 case IFCOUNTER_OBYTES: 7423 count = ha->hw_stats.common.tx_ucast_bytes + 7424 ha->hw_stats.common.tx_mcast_bytes + 7425 ha->hw_stats.common.tx_bcast_bytes; 7426 break; 7427 7428 case IFCOUNTER_IMCASTS: 7429 count = ha->hw_stats.common.rx_mcast_bytes; 7430 break; 7431 7432 case IFCOUNTER_OMCASTS: 7433 count = ha->hw_stats.common.tx_mcast_bytes; 7434 break; 7435 7436 case IFCOUNTER_IQDROPS: 7437 case IFCOUNTER_OQDROPS: 7438 case IFCOUNTER_NOPROTO: 7439 7440 default: 7441 return (if_get_counter_default(ifp, cnt)); 7442 } 7443 return (count); 7444 } 7445 #endif 7446 7447 7448 static void 7449 qlnx_timer(void *arg) 7450 { 7451 qlnx_host_t *ha; 7452 7453 ha = (qlnx_host_t *)arg; 7454 7455 if (ha->error_recovery) { 7456 ha->error_recovery = 0; 7457 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 7458 return; 7459 } 7460 7461 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats); 7462 7463 if (ha->storm_stats_gather) 7464 qlnx_sample_storm_stats(ha); 7465 7466 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7467 7468 return; 7469 } 7470 7471 static int 7472 qlnx_load(qlnx_host_t *ha) 7473 { 7474 int i; 7475 int rc = 0; 7476 struct ecore_dev *cdev; 7477 device_t dev; 7478 7479 cdev = &ha->cdev; 7480 dev = ha->pci_dev; 7481 7482 QL_DPRINT2(ha, "enter\n"); 7483 7484 rc = qlnx_alloc_mem_arrays(ha); 7485 if (rc) 7486 goto qlnx_load_exit0; 7487 7488 qlnx_init_fp(ha); 7489 7490 rc = qlnx_alloc_mem_load(ha); 7491 if (rc) 7492 goto qlnx_load_exit1; 7493 7494 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n", 7495 ha->num_rss, ha->num_tc); 7496 7497 for (i = 0; i < ha->num_rss; i++) { 7498 7499 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq, 7500 (INTR_TYPE_NET | INTR_MPSAFE), 7501 NULL, qlnx_fp_isr, &ha->irq_vec[i], 7502 &ha->irq_vec[i].handle))) { 7503 7504 QL_DPRINT1(ha, "could not setup interrupt\n"); 7505 goto qlnx_load_exit2; 7506 } 7507 7508 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \ 7509 irq %p handle %p\n", i, 7510 ha->irq_vec[i].irq_rid, 7511 ha->irq_vec[i].irq, ha->irq_vec[i].handle); 7512 7513 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus)); 7514 } 7515 7516 rc = qlnx_start_queues(ha); 7517 if (rc) 7518 goto qlnx_load_exit2; 7519 7520 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n"); 7521 7522 /* Add primary mac and set Rx filters */ 7523 rc = qlnx_set_rx_mode(ha); 7524 if (rc) 7525 goto qlnx_load_exit2; 7526 7527 /* Ask for link-up using current configuration */ 7528 qlnx_set_link(ha, true); 7529 7530 if (qlnx_vf_device(ha) == 0) 7531 qlnx_link_update(&ha->cdev.hwfns[0]); 7532 7533 ha->state = QLNX_STATE_OPEN; 7534 7535 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats)); 7536 7537 if (ha->flags.callout_init) 7538 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7539 7540 goto qlnx_load_exit0; 7541 7542 qlnx_load_exit2: 7543 qlnx_free_mem_load(ha); 7544 7545 qlnx_load_exit1: 7546 ha->num_rss = 0; 7547 7548 qlnx_load_exit0: 7549 QL_DPRINT2(ha, "exit [%d]\n", rc); 7550 return rc; 7551 } 7552 7553 static void 7554 qlnx_drain_soft_lro(qlnx_host_t *ha) 7555 { 7556 #ifdef QLNX_SOFT_LRO 7557 7558 struct ifnet *ifp; 7559 int i; 7560 7561 ifp = ha->ifp; 7562 7563 7564 if (ifp->if_capenable & IFCAP_LRO) { 7565 7566 for (i = 0; i < ha->num_rss; i++) { 7567 7568 struct qlnx_fastpath *fp = &ha->fp_array[i]; 7569 struct lro_ctrl *lro; 7570 7571 lro = &fp->rxq->lro; 7572 7573 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 7574 7575 tcp_lro_flush_all(lro); 7576 7577 #else 7578 struct lro_entry *queued; 7579 7580 while ((!SLIST_EMPTY(&lro->lro_active))){ 7581 queued = SLIST_FIRST(&lro->lro_active); 7582 SLIST_REMOVE_HEAD(&lro->lro_active, next); 7583 tcp_lro_flush(lro, queued); 7584 } 7585 7586 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 7587 7588 } 7589 } 7590 7591 #endif /* #ifdef QLNX_SOFT_LRO */ 7592 7593 return; 7594 } 7595 7596 static void 7597 qlnx_unload(qlnx_host_t *ha) 7598 { 7599 struct ecore_dev *cdev; 7600 device_t dev; 7601 int i; 7602 7603 cdev = &ha->cdev; 7604 dev = ha->pci_dev; 7605 7606 QL_DPRINT2(ha, "enter\n"); 7607 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state); 7608 7609 if (ha->state == QLNX_STATE_OPEN) { 7610 7611 qlnx_set_link(ha, false); 7612 qlnx_clean_filters(ha); 7613 qlnx_stop_queues(ha); 7614 ecore_hw_stop_fastpath(cdev); 7615 7616 for (i = 0; i < ha->num_rss; i++) { 7617 if (ha->irq_vec[i].handle) { 7618 (void)bus_teardown_intr(dev, 7619 ha->irq_vec[i].irq, 7620 ha->irq_vec[i].handle); 7621 ha->irq_vec[i].handle = NULL; 7622 } 7623 } 7624 7625 qlnx_drain_fp_taskqueues(ha); 7626 qlnx_drain_soft_lro(ha); 7627 qlnx_free_mem_load(ha); 7628 } 7629 7630 if (ha->flags.callout_init) 7631 callout_drain(&ha->qlnx_callout); 7632 7633 qlnx_mdelay(__func__, 1000); 7634 7635 ha->state = QLNX_STATE_CLOSED; 7636 7637 QL_DPRINT2(ha, "exit\n"); 7638 return; 7639 } 7640 7641 static int 7642 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7643 { 7644 int rval = -1; 7645 struct ecore_hwfn *p_hwfn; 7646 struct ecore_ptt *p_ptt; 7647 7648 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7649 7650 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7651 p_ptt = ecore_ptt_acquire(p_hwfn); 7652 7653 if (!p_ptt) { 7654 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7655 return (rval); 7656 } 7657 7658 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7659 7660 if (rval == DBG_STATUS_OK) 7661 rval = 0; 7662 else { 7663 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed" 7664 "[0x%x]\n", rval); 7665 } 7666 7667 ecore_ptt_release(p_hwfn, p_ptt); 7668 7669 return (rval); 7670 } 7671 7672 static int 7673 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7674 { 7675 int rval = -1; 7676 struct ecore_hwfn *p_hwfn; 7677 struct ecore_ptt *p_ptt; 7678 7679 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7680 7681 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7682 p_ptt = ecore_ptt_acquire(p_hwfn); 7683 7684 if (!p_ptt) { 7685 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7686 return (rval); 7687 } 7688 7689 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7690 7691 if (rval == DBG_STATUS_OK) 7692 rval = 0; 7693 else { 7694 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed" 7695 " [0x%x]\n", rval); 7696 } 7697 7698 ecore_ptt_release(p_hwfn, p_ptt); 7699 7700 return (rval); 7701 } 7702 7703 7704 static void 7705 qlnx_sample_storm_stats(qlnx_host_t *ha) 7706 { 7707 int i, index; 7708 struct ecore_dev *cdev; 7709 qlnx_storm_stats_t *s_stats; 7710 uint32_t reg; 7711 struct ecore_ptt *p_ptt; 7712 struct ecore_hwfn *hwfn; 7713 7714 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) { 7715 ha->storm_stats_gather = 0; 7716 return; 7717 } 7718 7719 cdev = &ha->cdev; 7720 7721 for_each_hwfn(cdev, i) { 7722 7723 hwfn = &cdev->hwfns[i]; 7724 7725 p_ptt = ecore_ptt_acquire(hwfn); 7726 if (!p_ptt) 7727 return; 7728 7729 index = ha->storm_stats_index + 7730 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN); 7731 7732 s_stats = &ha->storm_stats[index]; 7733 7734 /* XSTORM */ 7735 reg = XSEM_REG_FAST_MEMORY + 7736 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7737 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7738 7739 reg = XSEM_REG_FAST_MEMORY + 7740 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7741 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7742 7743 reg = XSEM_REG_FAST_MEMORY + 7744 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7745 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7746 7747 reg = XSEM_REG_FAST_MEMORY + 7748 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7749 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7750 7751 /* YSTORM */ 7752 reg = YSEM_REG_FAST_MEMORY + 7753 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7754 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7755 7756 reg = YSEM_REG_FAST_MEMORY + 7757 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7758 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7759 7760 reg = YSEM_REG_FAST_MEMORY + 7761 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7762 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7763 7764 reg = YSEM_REG_FAST_MEMORY + 7765 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7766 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7767 7768 /* PSTORM */ 7769 reg = PSEM_REG_FAST_MEMORY + 7770 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7771 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7772 7773 reg = PSEM_REG_FAST_MEMORY + 7774 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7775 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7776 7777 reg = PSEM_REG_FAST_MEMORY + 7778 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7779 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7780 7781 reg = PSEM_REG_FAST_MEMORY + 7782 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7783 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7784 7785 /* TSTORM */ 7786 reg = TSEM_REG_FAST_MEMORY + 7787 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7788 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7789 7790 reg = TSEM_REG_FAST_MEMORY + 7791 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7792 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7793 7794 reg = TSEM_REG_FAST_MEMORY + 7795 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7796 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7797 7798 reg = TSEM_REG_FAST_MEMORY + 7799 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7800 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7801 7802 /* MSTORM */ 7803 reg = MSEM_REG_FAST_MEMORY + 7804 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7805 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7806 7807 reg = MSEM_REG_FAST_MEMORY + 7808 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7809 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7810 7811 reg = MSEM_REG_FAST_MEMORY + 7812 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7813 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7814 7815 reg = MSEM_REG_FAST_MEMORY + 7816 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7817 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7818 7819 /* USTORM */ 7820 reg = USEM_REG_FAST_MEMORY + 7821 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7822 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7823 7824 reg = USEM_REG_FAST_MEMORY + 7825 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7826 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7827 7828 reg = USEM_REG_FAST_MEMORY + 7829 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7830 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7831 7832 reg = USEM_REG_FAST_MEMORY + 7833 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7834 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7835 7836 ecore_ptt_release(hwfn, p_ptt); 7837 } 7838 7839 ha->storm_stats_index++; 7840 7841 return; 7842 } 7843 7844 /* 7845 * Name: qlnx_dump_buf8 7846 * Function: dumps a buffer as bytes 7847 */ 7848 static void 7849 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len) 7850 { 7851 device_t dev; 7852 uint32_t i = 0; 7853 uint8_t *buf; 7854 7855 dev = ha->pci_dev; 7856 buf = dbuf; 7857 7858 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len); 7859 7860 while (len >= 16) { 7861 device_printf(dev,"0x%08x:" 7862 " %02x %02x %02x %02x %02x %02x %02x %02x" 7863 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7864 buf[0], buf[1], buf[2], buf[3], 7865 buf[4], buf[5], buf[6], buf[7], 7866 buf[8], buf[9], buf[10], buf[11], 7867 buf[12], buf[13], buf[14], buf[15]); 7868 i += 16; 7869 len -= 16; 7870 buf += 16; 7871 } 7872 switch (len) { 7873 case 1: 7874 device_printf(dev,"0x%08x: %02x\n", i, buf[0]); 7875 break; 7876 case 2: 7877 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]); 7878 break; 7879 case 3: 7880 device_printf(dev,"0x%08x: %02x %02x %02x\n", 7881 i, buf[0], buf[1], buf[2]); 7882 break; 7883 case 4: 7884 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i, 7885 buf[0], buf[1], buf[2], buf[3]); 7886 break; 7887 case 5: 7888 device_printf(dev,"0x%08x:" 7889 " %02x %02x %02x %02x %02x\n", i, 7890 buf[0], buf[1], buf[2], buf[3], buf[4]); 7891 break; 7892 case 6: 7893 device_printf(dev,"0x%08x:" 7894 " %02x %02x %02x %02x %02x %02x\n", i, 7895 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); 7896 break; 7897 case 7: 7898 device_printf(dev,"0x%08x:" 7899 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7900 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); 7901 break; 7902 case 8: 7903 device_printf(dev,"0x%08x:" 7904 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7905 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7906 buf[7]); 7907 break; 7908 case 9: 7909 device_printf(dev,"0x%08x:" 7910 " %02x %02x %02x %02x %02x %02x %02x %02x" 7911 " %02x\n", i, 7912 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7913 buf[7], buf[8]); 7914 break; 7915 case 10: 7916 device_printf(dev,"0x%08x:" 7917 " %02x %02x %02x %02x %02x %02x %02x %02x" 7918 " %02x %02x\n", i, 7919 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7920 buf[7], buf[8], buf[9]); 7921 break; 7922 case 11: 7923 device_printf(dev,"0x%08x:" 7924 " %02x %02x %02x %02x %02x %02x %02x %02x" 7925 " %02x %02x %02x\n", i, 7926 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7927 buf[7], buf[8], buf[9], buf[10]); 7928 break; 7929 case 12: 7930 device_printf(dev,"0x%08x:" 7931 " %02x %02x %02x %02x %02x %02x %02x %02x" 7932 " %02x %02x %02x %02x\n", i, 7933 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7934 buf[7], buf[8], buf[9], buf[10], buf[11]); 7935 break; 7936 case 13: 7937 device_printf(dev,"0x%08x:" 7938 " %02x %02x %02x %02x %02x %02x %02x %02x" 7939 " %02x %02x %02x %02x %02x\n", i, 7940 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7941 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]); 7942 break; 7943 case 14: 7944 device_printf(dev,"0x%08x:" 7945 " %02x %02x %02x %02x %02x %02x %02x %02x" 7946 " %02x %02x %02x %02x %02x %02x\n", i, 7947 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7948 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7949 buf[13]); 7950 break; 7951 case 15: 7952 device_printf(dev,"0x%08x:" 7953 " %02x %02x %02x %02x %02x %02x %02x %02x" 7954 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7955 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7956 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7957 buf[13], buf[14]); 7958 break; 7959 default: 7960 break; 7961 } 7962 7963 device_printf(dev, "%s: %s dump end\n", __func__, msg); 7964 7965 return; 7966 } 7967 7968 #ifdef CONFIG_ECORE_SRIOV 7969 7970 static void 7971 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id) 7972 { 7973 struct ecore_public_vf_info *vf_info; 7974 7975 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false); 7976 7977 if (!vf_info) 7978 return; 7979 7980 /* Clear the VF mac */ 7981 memset(vf_info->forced_mac, 0, ETH_ALEN); 7982 7983 vf_info->forced_vlan = 0; 7984 7985 return; 7986 } 7987 7988 void 7989 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id) 7990 { 7991 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id); 7992 return; 7993 } 7994 7995 static int 7996 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid, 7997 struct ecore_filter_ucast *params) 7998 { 7999 struct ecore_public_vf_info *vf; 8000 8001 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) { 8002 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev), 8003 "VF[%d] vport not initialized\n", vfid); 8004 return ECORE_INVAL; 8005 } 8006 8007 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true); 8008 if (!vf) 8009 return -EINVAL; 8010 8011 /* No real decision to make; Store the configured MAC */ 8012 if (params->type == ECORE_FILTER_MAC || 8013 params->type == ECORE_FILTER_MAC_VLAN) 8014 memcpy(params->mac, vf->forced_mac, ETH_ALEN); 8015 8016 return 0; 8017 } 8018 8019 int 8020 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params) 8021 { 8022 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params)); 8023 } 8024 8025 static int 8026 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid, 8027 struct ecore_sp_vport_update_params *params, uint16_t * tlvs) 8028 { 8029 uint8_t mask; 8030 struct ecore_filter_accept_flags *flags; 8031 8032 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) { 8033 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev), 8034 "VF[%d] vport not initialized\n", vfid); 8035 return ECORE_INVAL; 8036 } 8037 8038 /* Untrusted VFs can't even be trusted to know that fact. 8039 * Simply indicate everything is configured fine, and trace 8040 * configuration 'behind their back'. 8041 */ 8042 mask = ECORE_ACCEPT_UCAST_UNMATCHED | ECORE_ACCEPT_MCAST_UNMATCHED; 8043 flags = ¶ms->accept_flags; 8044 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM))) 8045 return 0; 8046 8047 return 0; 8048 8049 } 8050 int 8051 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs) 8052 { 8053 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs)); 8054 } 8055 8056 static int 8057 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn) 8058 { 8059 int i; 8060 struct ecore_dev *cdev; 8061 8062 cdev = p_hwfn->p_dev; 8063 8064 for (i = 0; i < cdev->num_hwfns; i++) { 8065 if (&cdev->hwfns[i] == p_hwfn) 8066 break; 8067 } 8068 8069 if (i >= cdev->num_hwfns) 8070 return (-1); 8071 8072 return (i); 8073 } 8074 8075 static int 8076 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id) 8077 { 8078 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 8079 int i; 8080 8081 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n", 8082 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id); 8083 8084 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8085 return (-1); 8086 8087 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8088 8089 atomic_testandset_32(&ha->sriov_task[i].flags, 8090 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG); 8091 8092 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 8093 &ha->sriov_task[i].pf_task); 8094 8095 } 8096 8097 return (ECORE_SUCCESS); 8098 } 8099 8100 8101 int 8102 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id) 8103 { 8104 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id)); 8105 } 8106 8107 static void 8108 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn) 8109 { 8110 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 8111 int i; 8112 8113 if (!ha->sriov_initialized) 8114 return; 8115 8116 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 8117 ha, p_hwfn->p_dev, p_hwfn); 8118 8119 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8120 return; 8121 8122 8123 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8124 8125 atomic_testandset_32(&ha->sriov_task[i].flags, 8126 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE); 8127 8128 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 8129 &ha->sriov_task[i].pf_task); 8130 } 8131 8132 return; 8133 } 8134 8135 8136 void 8137 qlnx_vf_flr_update(void *p_hwfn) 8138 { 8139 __qlnx_vf_flr_update(p_hwfn); 8140 8141 return; 8142 } 8143 8144 #ifndef QLNX_VF 8145 8146 static void 8147 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn) 8148 { 8149 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 8150 int i; 8151 8152 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 8153 ha, p_hwfn->p_dev, p_hwfn); 8154 8155 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8156 return; 8157 8158 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n", 8159 ha, p_hwfn->p_dev, p_hwfn, i); 8160 8161 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8162 8163 atomic_testandset_32(&ha->sriov_task[i].flags, 8164 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE); 8165 8166 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 8167 &ha->sriov_task[i].pf_task); 8168 } 8169 } 8170 8171 static void 8172 qlnx_initialize_sriov(qlnx_host_t *ha) 8173 { 8174 device_t dev; 8175 nvlist_t *pf_schema, *vf_schema; 8176 int iov_error; 8177 8178 dev = ha->pci_dev; 8179 8180 pf_schema = pci_iov_schema_alloc_node(); 8181 vf_schema = pci_iov_schema_alloc_node(); 8182 8183 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 8184 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 8185 IOV_SCHEMA_HASDEFAULT, FALSE); 8186 pci_iov_schema_add_bool(vf_schema, "allow-promisc", 8187 IOV_SCHEMA_HASDEFAULT, FALSE); 8188 pci_iov_schema_add_uint16(vf_schema, "num-queues", 8189 IOV_SCHEMA_HASDEFAULT, 1); 8190 8191 iov_error = pci_iov_attach(dev, pf_schema, vf_schema); 8192 8193 if (iov_error != 0) { 8194 ha->sriov_initialized = 0; 8195 } else { 8196 device_printf(dev, "SRIOV initialized\n"); 8197 ha->sriov_initialized = 1; 8198 } 8199 8200 return; 8201 } 8202 8203 static void 8204 qlnx_sriov_disable(qlnx_host_t *ha) 8205 { 8206 struct ecore_dev *cdev; 8207 int i, j; 8208 8209 cdev = &ha->cdev; 8210 8211 ecore_iov_set_vfs_to_disable(cdev, true); 8212 8213 8214 for_each_hwfn(cdev, i) { 8215 8216 struct ecore_hwfn *hwfn = &cdev->hwfns[i]; 8217 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 8218 8219 if (!ptt) { 8220 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 8221 return; 8222 } 8223 /* Clean WFQ db and configure equal weight for all vports */ 8224 ecore_clean_wfq_db(hwfn, ptt); 8225 8226 ecore_for_each_vf(hwfn, j) { 8227 int k = 0; 8228 8229 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false)) 8230 continue; 8231 8232 if (ecore_iov_is_vf_started(hwfn, j)) { 8233 /* Wait until VF is disabled before releasing */ 8234 8235 for (k = 0; k < 100; k++) { 8236 if (!ecore_iov_is_vf_stopped(hwfn, j)) { 8237 qlnx_mdelay(__func__, 10); 8238 } else 8239 break; 8240 } 8241 } 8242 8243 if (k < 100) 8244 ecore_iov_release_hw_for_vf(&cdev->hwfns[i], 8245 ptt, j); 8246 else { 8247 QL_DPRINT1(ha, 8248 "Timeout waiting for VF's FLR to end\n"); 8249 } 8250 } 8251 ecore_ptt_release(hwfn, ptt); 8252 } 8253 8254 ecore_iov_set_vfs_to_disable(cdev, false); 8255 8256 return; 8257 } 8258 8259 8260 static void 8261 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid, 8262 struct ecore_iov_vf_init_params *params) 8263 { 8264 u16 base, i; 8265 8266 /* Since we have an equal resource distribution per-VF, and we assume 8267 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting 8268 * sequentially from there. 8269 */ 8270 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues; 8271 8272 params->rel_vf_id = vfid; 8273 8274 for (i = 0; i < params->num_queues; i++) { 8275 params->req_rx_queue[i] = base + i; 8276 params->req_tx_queue[i] = base + i; 8277 } 8278 8279 /* PF uses indices 0 for itself; Set vport/RSS afterwards */ 8280 params->vport_id = vfid + 1; 8281 params->rss_eng_id = vfid + 1; 8282 8283 return; 8284 } 8285 8286 static int 8287 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params) 8288 { 8289 qlnx_host_t *ha; 8290 struct ecore_dev *cdev; 8291 struct ecore_iov_vf_init_params params; 8292 int ret, j, i; 8293 uint32_t max_vfs; 8294 8295 if ((ha = device_get_softc(dev)) == NULL) { 8296 device_printf(dev, "%s: cannot get softc\n", __func__); 8297 return (-1); 8298 } 8299 8300 if (qlnx_create_pf_taskqueues(ha) != 0) 8301 goto qlnx_iov_init_err0; 8302 8303 cdev = &ha->cdev; 8304 8305 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT); 8306 8307 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n", 8308 dev, num_vfs, max_vfs); 8309 8310 if (num_vfs >= max_vfs) { 8311 QL_DPRINT1(ha, "Can start at most %d VFs\n", 8312 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1)); 8313 goto qlnx_iov_init_err0; 8314 } 8315 8316 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF, 8317 M_NOWAIT); 8318 8319 if (ha->vf_attr == NULL) 8320 goto qlnx_iov_init_err0; 8321 8322 8323 memset(¶ms, 0, sizeof(params)); 8324 8325 /* Initialize HW for VF access */ 8326 for_each_hwfn(cdev, j) { 8327 struct ecore_hwfn *hwfn = &cdev->hwfns[j]; 8328 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 8329 8330 /* Make sure not to use more than 16 queues per VF */ 8331 params.num_queues = min_t(int, 8332 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs), 8333 16); 8334 8335 if (!ptt) { 8336 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 8337 goto qlnx_iov_init_err1; 8338 } 8339 8340 for (i = 0; i < num_vfs; i++) { 8341 8342 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true)) 8343 continue; 8344 8345 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms); 8346 8347 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms); 8348 8349 if (ret) { 8350 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i); 8351 ecore_ptt_release(hwfn, ptt); 8352 goto qlnx_iov_init_err1; 8353 } 8354 } 8355 8356 ecore_ptt_release(hwfn, ptt); 8357 } 8358 8359 ha->num_vfs = num_vfs; 8360 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha); 8361 8362 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs); 8363 8364 return (0); 8365 8366 qlnx_iov_init_err1: 8367 qlnx_sriov_disable(ha); 8368 8369 qlnx_iov_init_err0: 8370 qlnx_destroy_pf_taskqueues(ha); 8371 ha->num_vfs = 0; 8372 8373 return (-1); 8374 } 8375 8376 static void 8377 qlnx_iov_uninit(device_t dev) 8378 { 8379 qlnx_host_t *ha; 8380 8381 if ((ha = device_get_softc(dev)) == NULL) { 8382 device_printf(dev, "%s: cannot get softc\n", __func__); 8383 return; 8384 } 8385 8386 QL_DPRINT2(ha," dev = %p enter\n", dev); 8387 8388 qlnx_sriov_disable(ha); 8389 qlnx_destroy_pf_taskqueues(ha); 8390 8391 free(ha->vf_attr, M_QLNXBUF); 8392 ha->vf_attr = NULL; 8393 8394 ha->num_vfs = 0; 8395 8396 QL_DPRINT2(ha," dev = %p exit\n", dev); 8397 return; 8398 } 8399 8400 static int 8401 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 8402 { 8403 qlnx_host_t *ha; 8404 qlnx_vf_attr_t *vf_attr; 8405 unsigned const char *mac; 8406 size_t size; 8407 struct ecore_hwfn *p_hwfn; 8408 8409 if ((ha = device_get_softc(dev)) == NULL) { 8410 device_printf(dev, "%s: cannot get softc\n", __func__); 8411 return (-1); 8412 } 8413 8414 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum); 8415 8416 if (vfnum > (ha->num_vfs - 1)) { 8417 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n", 8418 vfnum, (ha->num_vfs - 1)); 8419 } 8420 8421 vf_attr = &ha->vf_attr[vfnum]; 8422 8423 if (nvlist_exists_binary(params, "mac-addr")) { 8424 mac = nvlist_get_binary(params, "mac-addr", &size); 8425 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN); 8426 device_printf(dev, 8427 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", 8428 __func__, vf_attr->mac_addr[0], 8429 vf_attr->mac_addr[1], vf_attr->mac_addr[2], 8430 vf_attr->mac_addr[3], vf_attr->mac_addr[4], 8431 vf_attr->mac_addr[5]); 8432 p_hwfn = &ha->cdev.hwfns[0]; 8433 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr, 8434 vfnum); 8435 } 8436 8437 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum); 8438 return (0); 8439 } 8440 8441 static void 8442 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8443 { 8444 uint64_t events[ECORE_VF_ARRAY_LENGTH]; 8445 struct ecore_ptt *ptt; 8446 int i; 8447 8448 ptt = ecore_ptt_acquire(p_hwfn); 8449 if (!ptt) { 8450 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8451 __qlnx_pf_vf_msg(p_hwfn, 0); 8452 return; 8453 } 8454 8455 ecore_iov_pf_get_pending_events(p_hwfn, events); 8456 8457 QL_DPRINT2(ha, "Event mask of VF events:" 8458 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n", 8459 events[0], events[1], events[2]); 8460 8461 ecore_for_each_vf(p_hwfn, i) { 8462 8463 /* Skip VFs with no pending messages */ 8464 if (!(events[i / 64] & (1ULL << (i % 64)))) 8465 continue; 8466 8467 QL_DPRINT2(ha, 8468 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", 8469 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i); 8470 8471 /* Copy VF's message to PF's request buffer for that VF */ 8472 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i)) 8473 continue; 8474 8475 ecore_iov_process_mbx_req(p_hwfn, ptt, i); 8476 } 8477 8478 ecore_ptt_release(p_hwfn, ptt); 8479 8480 return; 8481 } 8482 8483 static void 8484 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8485 { 8486 struct ecore_ptt *ptt; 8487 int ret; 8488 8489 ptt = ecore_ptt_acquire(p_hwfn); 8490 8491 if (!ptt) { 8492 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8493 __qlnx_vf_flr_update(p_hwfn); 8494 return; 8495 } 8496 8497 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt); 8498 8499 if (ret) { 8500 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n"); 8501 } 8502 8503 ecore_ptt_release(p_hwfn, ptt); 8504 8505 return; 8506 } 8507 8508 static void 8509 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8510 { 8511 struct ecore_ptt *ptt; 8512 int i; 8513 8514 ptt = ecore_ptt_acquire(p_hwfn); 8515 8516 if (!ptt) { 8517 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8518 qlnx_vf_bulleting_update(p_hwfn); 8519 return; 8520 } 8521 8522 ecore_for_each_vf(p_hwfn, i) { 8523 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n", 8524 p_hwfn, i); 8525 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt); 8526 } 8527 8528 ecore_ptt_release(p_hwfn, ptt); 8529 8530 return; 8531 } 8532 8533 static void 8534 qlnx_pf_taskqueue(void *context, int pending) 8535 { 8536 struct ecore_hwfn *p_hwfn; 8537 qlnx_host_t *ha; 8538 int i; 8539 8540 p_hwfn = context; 8541 8542 if (p_hwfn == NULL) 8543 return; 8544 8545 ha = (qlnx_host_t *)(p_hwfn->p_dev); 8546 8547 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8548 return; 8549 8550 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8551 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG)) 8552 qlnx_handle_vf_msg(ha, p_hwfn); 8553 8554 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8555 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE)) 8556 qlnx_handle_vf_flr_update(ha, p_hwfn); 8557 8558 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8559 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE)) 8560 qlnx_handle_bulletin_update(ha, p_hwfn); 8561 8562 return; 8563 } 8564 8565 static int 8566 qlnx_create_pf_taskqueues(qlnx_host_t *ha) 8567 { 8568 int i; 8569 uint8_t tq_name[32]; 8570 8571 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8572 8573 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 8574 8575 bzero(tq_name, sizeof (tq_name)); 8576 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i); 8577 8578 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn); 8579 8580 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 8581 taskqueue_thread_enqueue, 8582 &ha->sriov_task[i].pf_taskqueue); 8583 8584 if (ha->sriov_task[i].pf_taskqueue == NULL) 8585 return (-1); 8586 8587 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1, 8588 PI_NET, "%s", tq_name); 8589 8590 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue); 8591 } 8592 8593 return (0); 8594 } 8595 8596 static void 8597 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha) 8598 { 8599 int i; 8600 8601 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8602 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8603 taskqueue_drain(ha->sriov_task[i].pf_taskqueue, 8604 &ha->sriov_task[i].pf_task); 8605 taskqueue_free(ha->sriov_task[i].pf_taskqueue); 8606 ha->sriov_task[i].pf_taskqueue = NULL; 8607 } 8608 } 8609 return; 8610 } 8611 8612 static void 8613 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha) 8614 { 8615 struct ecore_mcp_link_capabilities caps; 8616 struct ecore_mcp_link_params params; 8617 struct ecore_mcp_link_state link; 8618 int i; 8619 8620 if (!p_hwfn->pf_iov_info) 8621 return; 8622 8623 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params)); 8624 memset(&link, 0, sizeof(struct ecore_mcp_link_state)); 8625 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities)); 8626 8627 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 8628 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link)); 8629 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params)); 8630 8631 QL_DPRINT2(ha, "called\n"); 8632 8633 /* Update bulletin of all future possible VFs with link configuration */ 8634 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) { 8635 8636 /* Modify link according to the VF's configured link state */ 8637 8638 link.link_up = false; 8639 8640 if (ha->link_up) { 8641 link.link_up = true; 8642 /* Set speed according to maximum supported by HW. 8643 * that is 40G for regular devices and 100G for CMT 8644 * mode devices. 8645 */ 8646 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ? 8647 100000 : link.speed; 8648 } 8649 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up); 8650 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps); 8651 } 8652 8653 qlnx_vf_bulleting_update(p_hwfn); 8654 8655 return; 8656 } 8657 #endif /* #ifndef QLNX_VF */ 8658 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 8659