1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 29 /* 30 * File: qlnx_os.c 31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "qlnx_os.h" 38 #include "bcm_osal.h" 39 #include "reg_addr.h" 40 #include "ecore_gtt_reg_addr.h" 41 #include "ecore.h" 42 #include "ecore_chain.h" 43 #include "ecore_status.h" 44 #include "ecore_hw.h" 45 #include "ecore_rt_defs.h" 46 #include "ecore_init_ops.h" 47 #include "ecore_int.h" 48 #include "ecore_cxt.h" 49 #include "ecore_spq.h" 50 #include "ecore_init_fw_funcs.h" 51 #include "ecore_sp_commands.h" 52 #include "ecore_dev_api.h" 53 #include "ecore_l2_api.h" 54 #include "ecore_mcp.h" 55 #include "ecore_hw_defs.h" 56 #include "mcp_public.h" 57 #include "ecore_iro.h" 58 #include "nvm_cfg.h" 59 #include "ecore_dev_api.h" 60 #include "ecore_dbg_fw_funcs.h" 61 #include "ecore_iov_api.h" 62 #include "ecore_vf_api.h" 63 64 #include "qlnx_ioctl.h" 65 #include "qlnx_def.h" 66 #include "qlnx_ver.h" 67 68 #ifdef QLNX_ENABLE_IWARP 69 #include "qlnx_rdma.h" 70 #endif /* #ifdef QLNX_ENABLE_IWARP */ 71 72 #include <sys/smp.h> 73 74 75 /* 76 * static functions 77 */ 78 /* 79 * ioctl related functions 80 */ 81 static void qlnx_add_sysctls(qlnx_host_t *ha); 82 83 /* 84 * main driver 85 */ 86 static void qlnx_release(qlnx_host_t *ha); 87 static void qlnx_fp_isr(void *arg); 88 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha); 89 static void qlnx_init(void *arg); 90 static void qlnx_init_locked(qlnx_host_t *ha); 91 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi); 92 static int qlnx_set_promisc(qlnx_host_t *ha); 93 static int qlnx_set_allmulti(qlnx_host_t *ha); 94 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 95 static int qlnx_media_change(struct ifnet *ifp); 96 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 97 static void qlnx_stop(qlnx_host_t *ha); 98 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, 99 struct mbuf **m_headp); 100 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha); 101 static uint32_t qlnx_get_optics(qlnx_host_t *ha, 102 struct qlnx_link_output *if_link); 103 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp); 104 static int qlnx_transmit_locked(struct ifnet *ifp, struct qlnx_fastpath *fp, 105 struct mbuf *mp); 106 static void qlnx_qflush(struct ifnet *ifp); 107 108 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha); 109 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha); 110 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha); 111 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha); 112 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha); 113 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha); 114 115 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver); 116 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size); 117 118 static int qlnx_nic_setup(struct ecore_dev *cdev, 119 struct ecore_pf_params *func_params); 120 static int qlnx_nic_start(struct ecore_dev *cdev); 121 static int qlnx_slowpath_start(qlnx_host_t *ha); 122 static int qlnx_slowpath_stop(qlnx_host_t *ha); 123 static int qlnx_init_hw(qlnx_host_t *ha); 124 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 125 char ver_str[VER_SIZE]); 126 static void qlnx_unload(qlnx_host_t *ha); 127 static int qlnx_load(qlnx_host_t *ha); 128 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 129 uint32_t add_mac); 130 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, 131 uint32_t len); 132 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq); 133 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq); 134 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, 135 struct qlnx_rx_queue *rxq); 136 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter); 137 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, 138 int hwfn_index); 139 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, 140 int hwfn_index); 141 static void qlnx_timer(void *arg); 142 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 143 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 144 static void qlnx_trigger_dump(qlnx_host_t *ha); 145 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 146 struct qlnx_tx_queue *txq); 147 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 148 struct qlnx_tx_queue *txq); 149 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 150 int lro_enable); 151 static void qlnx_fp_taskqueue(void *context, int pending); 152 static void qlnx_sample_storm_stats(qlnx_host_t *ha); 153 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 154 struct qlnx_agg_info *tpa); 155 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa); 156 157 #if __FreeBSD_version >= 1100000 158 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt); 159 #endif 160 161 162 /* 163 * Hooks to the Operating Systems 164 */ 165 static int qlnx_pci_probe (device_t); 166 static int qlnx_pci_attach (device_t); 167 static int qlnx_pci_detach (device_t); 168 169 #ifndef QLNX_VF 170 171 #ifdef CONFIG_ECORE_SRIOV 172 173 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params); 174 static void qlnx_iov_uninit(device_t dev); 175 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params); 176 static void qlnx_initialize_sriov(qlnx_host_t *ha); 177 static void qlnx_pf_taskqueue(void *context, int pending); 178 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha); 179 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha); 180 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha); 181 182 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 183 184 static device_method_t qlnx_pci_methods[] = { 185 /* Device interface */ 186 DEVMETHOD(device_probe, qlnx_pci_probe), 187 DEVMETHOD(device_attach, qlnx_pci_attach), 188 DEVMETHOD(device_detach, qlnx_pci_detach), 189 190 #ifdef CONFIG_ECORE_SRIOV 191 DEVMETHOD(pci_iov_init, qlnx_iov_init), 192 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit), 193 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf), 194 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 195 { 0, 0 } 196 }; 197 198 static driver_t qlnx_pci_driver = { 199 "ql", qlnx_pci_methods, sizeof (qlnx_host_t), 200 }; 201 202 static devclass_t qlnx_devclass; 203 204 MODULE_VERSION(if_qlnxe,1); 205 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0); 206 207 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1); 208 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1); 209 210 #else 211 212 static device_method_t qlnxv_pci_methods[] = { 213 /* Device interface */ 214 DEVMETHOD(device_probe, qlnx_pci_probe), 215 DEVMETHOD(device_attach, qlnx_pci_attach), 216 DEVMETHOD(device_detach, qlnx_pci_detach), 217 { 0, 0 } 218 }; 219 220 static driver_t qlnxv_pci_driver = { 221 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t), 222 }; 223 224 static devclass_t qlnxv_devclass; 225 MODULE_VERSION(if_qlnxev,1); 226 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, qlnxv_devclass, 0, 0); 227 228 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1); 229 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1); 230 231 #endif /* #ifdef QLNX_VF */ 232 233 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver"); 234 235 236 static char qlnx_dev_str[128]; 237 static char qlnx_ver_str[VER_SIZE]; 238 static char qlnx_name_str[NAME_SIZE]; 239 240 /* 241 * Some PCI Configuration Space Related Defines 242 */ 243 244 #ifndef PCI_VENDOR_QLOGIC 245 #define PCI_VENDOR_QLOGIC 0x1077 246 #endif 247 248 /* 40G Adapter QLE45xxx*/ 249 #ifndef QLOGIC_PCI_DEVICE_ID_1634 250 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634 251 #endif 252 253 /* 100G Adapter QLE45xxx*/ 254 #ifndef QLOGIC_PCI_DEVICE_ID_1644 255 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644 256 #endif 257 258 /* 25G Adapter QLE45xxx*/ 259 #ifndef QLOGIC_PCI_DEVICE_ID_1656 260 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656 261 #endif 262 263 /* 50G Adapter QLE45xxx*/ 264 #ifndef QLOGIC_PCI_DEVICE_ID_1654 265 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654 266 #endif 267 268 /* 10G/25G/40G Adapter QLE41xxx*/ 269 #ifndef QLOGIC_PCI_DEVICE_ID_8070 270 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070 271 #endif 272 273 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/ 274 #ifndef QLOGIC_PCI_DEVICE_ID_8090 275 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090 276 #endif 277 278 279 280 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD, 0, "qlnxe driver parameters"); 281 282 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */ 283 static int qlnxe_queue_count = QLNX_DEFAULT_RSS; 284 285 #if __FreeBSD_version < 1100000 286 287 TUNABLE_INT("hw.qlnxe.queue_count", &qlnxe_queue_count); 288 289 #endif 290 291 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 292 &qlnxe_queue_count, 0, "Multi-Queue queue count"); 293 294 295 /* 296 * Note on RDMA personality setting 297 * 298 * Read the personality configured in NVRAM 299 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and 300 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT 301 * use the personality in NVRAM. 302 303 * Otherwise use t the personality configured in sysctl. 304 * 305 */ 306 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */ 307 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */ 308 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */ 309 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */ 310 #define QLNX_PERSONALITY_BITS_PER_FUNC 4 311 #define QLNX_PERSONALIY_MASK 0xF 312 313 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/ 314 static uint64_t qlnxe_rdma_configuration = 0x22222222; 315 316 #if __FreeBSD_version < 1100000 317 318 TUNABLE_QUAD("hw.qlnxe.rdma_configuration", &qlnxe_rdma_configuration); 319 320 SYSCTL_UQUAD(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN, 321 &qlnxe_rdma_configuration, 0, "RDMA Configuration"); 322 323 #else 324 325 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN, 326 &qlnxe_rdma_configuration, 0, "RDMA Configuration"); 327 328 #endif /* #if __FreeBSD_version < 1100000 */ 329 330 int 331 qlnx_vf_device(qlnx_host_t *ha) 332 { 333 uint16_t device_id; 334 335 device_id = ha->device_id; 336 337 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 338 return 0; 339 340 return -1; 341 } 342 343 static int 344 qlnx_valid_device(qlnx_host_t *ha) 345 { 346 uint16_t device_id; 347 348 device_id = ha->device_id; 349 350 #ifndef QLNX_VF 351 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 352 (device_id == QLOGIC_PCI_DEVICE_ID_1644) || 353 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 354 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 355 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 356 return 0; 357 #else 358 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 359 return 0; 360 361 #endif /* #ifndef QLNX_VF */ 362 return -1; 363 } 364 365 #ifdef QLNX_ENABLE_IWARP 366 static int 367 qlnx_rdma_supported(struct qlnx_host *ha) 368 { 369 uint16_t device_id; 370 371 device_id = pci_get_device(ha->pci_dev); 372 373 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 374 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 375 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 376 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 377 return (0); 378 379 return (-1); 380 } 381 #endif /* #ifdef QLNX_ENABLE_IWARP */ 382 383 /* 384 * Name: qlnx_pci_probe 385 * Function: Validate the PCI device to be a QLA80XX device 386 */ 387 static int 388 qlnx_pci_probe(device_t dev) 389 { 390 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d", 391 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); 392 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx"); 393 394 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) { 395 return (ENXIO); 396 } 397 398 switch (pci_get_device(dev)) { 399 400 #ifndef QLNX_VF 401 402 case QLOGIC_PCI_DEVICE_ID_1644: 403 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 404 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function", 405 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 406 QLNX_VERSION_BUILD); 407 device_set_desc_copy(dev, qlnx_dev_str); 408 409 break; 410 411 case QLOGIC_PCI_DEVICE_ID_1634: 412 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 413 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function", 414 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 415 QLNX_VERSION_BUILD); 416 device_set_desc_copy(dev, qlnx_dev_str); 417 418 break; 419 420 case QLOGIC_PCI_DEVICE_ID_1656: 421 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 422 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function", 423 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 424 QLNX_VERSION_BUILD); 425 device_set_desc_copy(dev, qlnx_dev_str); 426 427 break; 428 429 case QLOGIC_PCI_DEVICE_ID_1654: 430 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 431 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function", 432 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 433 QLNX_VERSION_BUILD); 434 device_set_desc_copy(dev, qlnx_dev_str); 435 436 break; 437 438 case QLOGIC_PCI_DEVICE_ID_8070: 439 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 440 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)" 441 " Adapter-Ethernet Function", 442 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 443 QLNX_VERSION_BUILD); 444 device_set_desc_copy(dev, qlnx_dev_str); 445 446 break; 447 448 #else 449 case QLOGIC_PCI_DEVICE_ID_8090: 450 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 451 "Qlogic SRIOV PCI CNA (AH) " 452 "Adapter-Ethernet Function", 453 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 454 QLNX_VERSION_BUILD); 455 device_set_desc_copy(dev, qlnx_dev_str); 456 457 break; 458 459 #endif /* #ifndef QLNX_VF */ 460 461 default: 462 return (ENXIO); 463 } 464 465 #ifdef QLNX_ENABLE_IWARP 466 qlnx_rdma_init(); 467 #endif /* #ifdef QLNX_ENABLE_IWARP */ 468 469 return (BUS_PROBE_DEFAULT); 470 } 471 472 static uint16_t 473 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 474 struct qlnx_tx_queue *txq) 475 { 476 u16 hw_bd_cons; 477 u16 ecore_cons_idx; 478 uint16_t diff; 479 480 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 481 482 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl); 483 if (hw_bd_cons < ecore_cons_idx) { 484 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 485 } else { 486 diff = hw_bd_cons - ecore_cons_idx; 487 } 488 return diff; 489 } 490 491 492 static void 493 qlnx_sp_intr(void *arg) 494 { 495 struct ecore_hwfn *p_hwfn; 496 qlnx_host_t *ha; 497 int i; 498 499 p_hwfn = arg; 500 501 if (p_hwfn == NULL) { 502 printf("%s: spurious slowpath intr\n", __func__); 503 return; 504 } 505 506 ha = (qlnx_host_t *)p_hwfn->p_dev; 507 508 QL_DPRINT2(ha, "enter\n"); 509 510 for (i = 0; i < ha->cdev.num_hwfns; i++) { 511 if (&ha->cdev.hwfns[i] == p_hwfn) { 512 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]); 513 break; 514 } 515 } 516 QL_DPRINT2(ha, "exit\n"); 517 518 return; 519 } 520 521 static void 522 qlnx_sp_taskqueue(void *context, int pending) 523 { 524 struct ecore_hwfn *p_hwfn; 525 526 p_hwfn = context; 527 528 if (p_hwfn != NULL) { 529 qlnx_sp_isr(p_hwfn); 530 } 531 return; 532 } 533 534 static int 535 qlnx_create_sp_taskqueues(qlnx_host_t *ha) 536 { 537 int i; 538 uint8_t tq_name[32]; 539 540 for (i = 0; i < ha->cdev.num_hwfns; i++) { 541 542 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 543 544 bzero(tq_name, sizeof (tq_name)); 545 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i); 546 547 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn); 548 549 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT, 550 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]); 551 552 if (ha->sp_taskqueue[i] == NULL) 553 return (-1); 554 555 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s", 556 tq_name); 557 558 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]); 559 } 560 561 return (0); 562 } 563 564 static void 565 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha) 566 { 567 int i; 568 569 for (i = 0; i < ha->cdev.num_hwfns; i++) { 570 if (ha->sp_taskqueue[i] != NULL) { 571 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]); 572 taskqueue_free(ha->sp_taskqueue[i]); 573 } 574 } 575 return; 576 } 577 578 static void 579 qlnx_fp_taskqueue(void *context, int pending) 580 { 581 struct qlnx_fastpath *fp; 582 qlnx_host_t *ha; 583 struct ifnet *ifp; 584 585 fp = context; 586 587 if (fp == NULL) 588 return; 589 590 ha = (qlnx_host_t *)fp->edev; 591 592 ifp = ha->ifp; 593 594 if(ifp->if_drv_flags & IFF_DRV_RUNNING) { 595 596 if (!drbr_empty(ifp, fp->tx_br)) { 597 598 if(mtx_trylock(&fp->tx_mtx)) { 599 600 #ifdef QLNX_TRACE_PERF_DATA 601 tx_pkts = fp->tx_pkts_transmitted; 602 tx_compl = fp->tx_pkts_completed; 603 #endif 604 605 qlnx_transmit_locked(ifp, fp, NULL); 606 607 #ifdef QLNX_TRACE_PERF_DATA 608 fp->tx_pkts_trans_fp += 609 (fp->tx_pkts_transmitted - tx_pkts); 610 fp->tx_pkts_compl_fp += 611 (fp->tx_pkts_completed - tx_compl); 612 #endif 613 mtx_unlock(&fp->tx_mtx); 614 } 615 } 616 } 617 618 QL_DPRINT2(ha, "exit \n"); 619 return; 620 } 621 622 static int 623 qlnx_create_fp_taskqueues(qlnx_host_t *ha) 624 { 625 int i; 626 uint8_t tq_name[32]; 627 struct qlnx_fastpath *fp; 628 629 for (i = 0; i < ha->num_rss; i++) { 630 631 fp = &ha->fp_array[i]; 632 633 bzero(tq_name, sizeof (tq_name)); 634 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 635 636 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp); 637 638 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 639 taskqueue_thread_enqueue, 640 &fp->fp_taskqueue); 641 642 if (fp->fp_taskqueue == NULL) 643 return (-1); 644 645 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 646 tq_name); 647 648 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue); 649 } 650 651 return (0); 652 } 653 654 static void 655 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha) 656 { 657 int i; 658 struct qlnx_fastpath *fp; 659 660 for (i = 0; i < ha->num_rss; i++) { 661 662 fp = &ha->fp_array[i]; 663 664 if (fp->fp_taskqueue != NULL) { 665 666 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 667 taskqueue_free(fp->fp_taskqueue); 668 fp->fp_taskqueue = NULL; 669 } 670 } 671 return; 672 } 673 674 static void 675 qlnx_drain_fp_taskqueues(qlnx_host_t *ha) 676 { 677 int i; 678 struct qlnx_fastpath *fp; 679 680 for (i = 0; i < ha->num_rss; i++) { 681 fp = &ha->fp_array[i]; 682 683 if (fp->fp_taskqueue != NULL) { 684 QLNX_UNLOCK(ha); 685 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 686 QLNX_LOCK(ha); 687 } 688 } 689 return; 690 } 691 692 static void 693 qlnx_get_params(qlnx_host_t *ha) 694 { 695 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) { 696 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n", 697 qlnxe_queue_count); 698 qlnxe_queue_count = 0; 699 } 700 return; 701 } 702 703 static void 704 qlnx_error_recovery_taskqueue(void *context, int pending) 705 { 706 qlnx_host_t *ha; 707 708 ha = context; 709 710 QL_DPRINT2(ha, "enter\n"); 711 712 QLNX_LOCK(ha); 713 qlnx_stop(ha); 714 QLNX_UNLOCK(ha); 715 716 #ifdef QLNX_ENABLE_IWARP 717 qlnx_rdma_dev_remove(ha); 718 #endif /* #ifdef QLNX_ENABLE_IWARP */ 719 720 qlnx_slowpath_stop(ha); 721 qlnx_slowpath_start(ha); 722 723 #ifdef QLNX_ENABLE_IWARP 724 qlnx_rdma_dev_add(ha); 725 #endif /* #ifdef QLNX_ENABLE_IWARP */ 726 727 qlnx_init(ha); 728 729 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 730 731 QL_DPRINT2(ha, "exit\n"); 732 733 return; 734 } 735 736 static int 737 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha) 738 { 739 uint8_t tq_name[32]; 740 741 bzero(tq_name, sizeof (tq_name)); 742 snprintf(tq_name, sizeof (tq_name), "ql_err_tq"); 743 744 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha); 745 746 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 747 taskqueue_thread_enqueue, &ha->err_taskqueue); 748 749 750 if (ha->err_taskqueue == NULL) 751 return (-1); 752 753 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name); 754 755 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue); 756 757 return (0); 758 } 759 760 static void 761 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha) 762 { 763 if (ha->err_taskqueue != NULL) { 764 taskqueue_drain(ha->err_taskqueue, &ha->err_task); 765 taskqueue_free(ha->err_taskqueue); 766 } 767 768 ha->err_taskqueue = NULL; 769 770 return; 771 } 772 773 /* 774 * Name: qlnx_pci_attach 775 * Function: attaches the device to the operating system 776 */ 777 static int 778 qlnx_pci_attach(device_t dev) 779 { 780 qlnx_host_t *ha = NULL; 781 uint32_t rsrc_len_reg = 0; 782 uint32_t rsrc_len_dbells = 0; 783 uint32_t rsrc_len_msix = 0; 784 int i; 785 uint32_t mfw_ver; 786 uint32_t num_sp_msix = 0; 787 uint32_t num_rdma_irqs = 0; 788 789 if ((ha = device_get_softc(dev)) == NULL) { 790 device_printf(dev, "cannot get softc\n"); 791 return (ENOMEM); 792 } 793 794 memset(ha, 0, sizeof (qlnx_host_t)); 795 796 ha->device_id = pci_get_device(dev); 797 798 if (qlnx_valid_device(ha) != 0) { 799 device_printf(dev, "device is not valid device\n"); 800 return (ENXIO); 801 } 802 ha->pci_func = pci_get_function(dev); 803 804 ha->pci_dev = dev; 805 806 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 807 808 ha->flags.lock_init = 1; 809 810 pci_enable_busmaster(dev); 811 812 /* 813 * map the PCI BARs 814 */ 815 816 ha->reg_rid = PCIR_BAR(0); 817 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 818 RF_ACTIVE); 819 820 if (ha->pci_reg == NULL) { 821 device_printf(dev, "unable to map BAR0\n"); 822 goto qlnx_pci_attach_err; 823 } 824 825 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 826 ha->reg_rid); 827 828 ha->dbells_rid = PCIR_BAR(2); 829 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, 830 SYS_RES_MEMORY, 831 ha->dbells_rid); 832 if (rsrc_len_dbells) { 833 834 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 835 &ha->dbells_rid, RF_ACTIVE); 836 837 if (ha->pci_dbells == NULL) { 838 device_printf(dev, "unable to map BAR1\n"); 839 goto qlnx_pci_attach_err; 840 } 841 ha->dbells_phys_addr = (uint64_t) 842 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid); 843 844 ha->dbells_size = rsrc_len_dbells; 845 } else { 846 if (qlnx_vf_device(ha) != 0) { 847 device_printf(dev, " BAR1 size is zero\n"); 848 goto qlnx_pci_attach_err; 849 } 850 } 851 852 ha->msix_rid = PCIR_BAR(4); 853 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 854 &ha->msix_rid, RF_ACTIVE); 855 856 if (ha->msix_bar == NULL) { 857 device_printf(dev, "unable to map BAR2\n"); 858 goto qlnx_pci_attach_err; 859 } 860 861 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 862 ha->msix_rid); 863 864 ha->dbg_level = 0x0000; 865 866 QL_DPRINT1(ha, "\n\t\t\t" 867 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 868 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 869 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 870 " msix_avail = 0x%x " 871 "\n\t\t\t[ncpus = %d]\n", 872 ha->pci_dev, ha->pci_reg, rsrc_len_reg, 873 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 874 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 875 mp_ncpus); 876 /* 877 * allocate dma tags 878 */ 879 880 if (qlnx_alloc_parent_dma_tag(ha)) 881 goto qlnx_pci_attach_err; 882 883 if (qlnx_alloc_tx_dma_tag(ha)) 884 goto qlnx_pci_attach_err; 885 886 if (qlnx_alloc_rx_dma_tag(ha)) 887 goto qlnx_pci_attach_err; 888 889 890 if (qlnx_init_hw(ha) != 0) 891 goto qlnx_pci_attach_err; 892 893 ha->flags.hw_init = 1; 894 895 qlnx_get_params(ha); 896 897 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) && 898 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) { 899 qlnxe_queue_count = QLNX_MAX_RSS; 900 } 901 902 /* 903 * Allocate MSI-x vectors 904 */ 905 if (qlnx_vf_device(ha) != 0) { 906 907 if (qlnxe_queue_count == 0) 908 ha->num_rss = QLNX_DEFAULT_RSS; 909 else 910 ha->num_rss = qlnxe_queue_count; 911 912 num_sp_msix = ha->cdev.num_hwfns; 913 } else { 914 uint8_t max_rxq; 915 uint8_t max_txq; 916 917 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq); 918 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq); 919 920 if (max_rxq < max_txq) 921 ha->num_rss = max_rxq; 922 else 923 ha->num_rss = max_txq; 924 925 if (ha->num_rss > QLNX_MAX_VF_RSS) 926 ha->num_rss = QLNX_MAX_VF_RSS; 927 928 num_sp_msix = 0; 929 } 930 931 if (ha->num_rss > mp_ncpus) 932 ha->num_rss = mp_ncpus; 933 934 ha->num_tc = QLNX_MAX_TC; 935 936 ha->msix_count = pci_msix_count(dev); 937 938 #ifdef QLNX_ENABLE_IWARP 939 940 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha); 941 942 #endif /* #ifdef QLNX_ENABLE_IWARP */ 943 944 if (!ha->msix_count || 945 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) { 946 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 947 ha->msix_count); 948 goto qlnx_pci_attach_err; 949 } 950 951 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs)) 952 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs; 953 else 954 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs); 955 956 QL_DPRINT1(ha, "\n\t\t\t" 957 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 958 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 959 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 960 " msix_avail = 0x%x msix_alloc = 0x%x" 961 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n", 962 ha->pci_reg, rsrc_len_reg, 963 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 964 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 965 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc); 966 967 if (pci_alloc_msix(dev, &ha->msix_count)) { 968 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__, 969 ha->msix_count); 970 ha->msix_count = 0; 971 goto qlnx_pci_attach_err; 972 } 973 974 /* 975 * Initialize slow path interrupt and task queue 976 */ 977 978 if (num_sp_msix) { 979 980 if (qlnx_create_sp_taskqueues(ha) != 0) 981 goto qlnx_pci_attach_err; 982 983 for (i = 0; i < ha->cdev.num_hwfns; i++) { 984 985 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 986 987 ha->sp_irq_rid[i] = i + 1; 988 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, 989 &ha->sp_irq_rid[i], 990 (RF_ACTIVE | RF_SHAREABLE)); 991 if (ha->sp_irq[i] == NULL) { 992 device_printf(dev, 993 "could not allocate mbx interrupt\n"); 994 goto qlnx_pci_attach_err; 995 } 996 997 if (bus_setup_intr(dev, ha->sp_irq[i], 998 (INTR_TYPE_NET | INTR_MPSAFE), NULL, 999 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) { 1000 device_printf(dev, 1001 "could not setup slow path interrupt\n"); 1002 goto qlnx_pci_attach_err; 1003 } 1004 1005 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d" 1006 " sp_irq %p sp_handle %p\n", p_hwfn, 1007 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]); 1008 } 1009 } 1010 1011 /* 1012 * initialize fast path interrupt 1013 */ 1014 if (qlnx_create_fp_taskqueues(ha) != 0) 1015 goto qlnx_pci_attach_err; 1016 1017 for (i = 0; i < ha->num_rss; i++) { 1018 ha->irq_vec[i].rss_idx = i; 1019 ha->irq_vec[i].ha = ha; 1020 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i; 1021 1022 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1023 &ha->irq_vec[i].irq_rid, 1024 (RF_ACTIVE | RF_SHAREABLE)); 1025 1026 if (ha->irq_vec[i].irq == NULL) { 1027 device_printf(dev, 1028 "could not allocate interrupt[%d] irq_rid = %d\n", 1029 i, ha->irq_vec[i].irq_rid); 1030 goto qlnx_pci_attach_err; 1031 } 1032 1033 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) { 1034 device_printf(dev, "could not allocate tx_br[%d]\n", i); 1035 goto qlnx_pci_attach_err; 1036 1037 } 1038 } 1039 1040 1041 if (qlnx_vf_device(ha) != 0) { 1042 1043 callout_init(&ha->qlnx_callout, 1); 1044 ha->flags.callout_init = 1; 1045 1046 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1047 1048 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0) 1049 goto qlnx_pci_attach_err; 1050 if (ha->grcdump_size[i] == 0) 1051 goto qlnx_pci_attach_err; 1052 1053 ha->grcdump_size[i] = ha->grcdump_size[i] << 2; 1054 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n", 1055 i, ha->grcdump_size[i]); 1056 1057 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]); 1058 if (ha->grcdump[i] == NULL) { 1059 device_printf(dev, "grcdump alloc[%d] failed\n", i); 1060 goto qlnx_pci_attach_err; 1061 } 1062 1063 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0) 1064 goto qlnx_pci_attach_err; 1065 if (ha->idle_chk_size[i] == 0) 1066 goto qlnx_pci_attach_err; 1067 1068 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2; 1069 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n", 1070 i, ha->idle_chk_size[i]); 1071 1072 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]); 1073 1074 if (ha->idle_chk[i] == NULL) { 1075 device_printf(dev, "idle_chk alloc failed\n"); 1076 goto qlnx_pci_attach_err; 1077 } 1078 } 1079 1080 if (qlnx_create_error_recovery_taskqueue(ha) != 0) 1081 goto qlnx_pci_attach_err; 1082 } 1083 1084 if (qlnx_slowpath_start(ha) != 0) 1085 goto qlnx_pci_attach_err; 1086 else 1087 ha->flags.slowpath_start = 1; 1088 1089 if (qlnx_vf_device(ha) != 0) { 1090 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) { 1091 qlnx_mdelay(__func__, 1000); 1092 qlnx_trigger_dump(ha); 1093 1094 goto qlnx_pci_attach_err0; 1095 } 1096 1097 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) { 1098 qlnx_mdelay(__func__, 1000); 1099 qlnx_trigger_dump(ha); 1100 1101 goto qlnx_pci_attach_err0; 1102 } 1103 } else { 1104 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 1105 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL); 1106 } 1107 1108 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d", 1109 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF), 1110 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF)); 1111 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d", 1112 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, 1113 FW_ENGINEERING_VERSION); 1114 1115 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n", 1116 ha->stormfw_ver, ha->mfw_ver); 1117 1118 qlnx_init_ifnet(dev, ha); 1119 1120 /* 1121 * add sysctls 1122 */ 1123 qlnx_add_sysctls(ha); 1124 1125 qlnx_pci_attach_err0: 1126 /* 1127 * create ioctl device interface 1128 */ 1129 if (qlnx_vf_device(ha) != 0) { 1130 1131 if (qlnx_make_cdev(ha)) { 1132 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 1133 goto qlnx_pci_attach_err; 1134 } 1135 1136 #ifdef QLNX_ENABLE_IWARP 1137 qlnx_rdma_dev_add(ha); 1138 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1139 } 1140 1141 #ifndef QLNX_VF 1142 #ifdef CONFIG_ECORE_SRIOV 1143 1144 if (qlnx_vf_device(ha) != 0) 1145 qlnx_initialize_sriov(ha); 1146 1147 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1148 #endif /* #ifdef QLNX_VF */ 1149 1150 QL_DPRINT2(ha, "success\n"); 1151 1152 return (0); 1153 1154 qlnx_pci_attach_err: 1155 1156 qlnx_release(ha); 1157 1158 return (ENXIO); 1159 } 1160 1161 /* 1162 * Name: qlnx_pci_detach 1163 * Function: Unhooks the device from the operating system 1164 */ 1165 static int 1166 qlnx_pci_detach(device_t dev) 1167 { 1168 qlnx_host_t *ha = NULL; 1169 1170 if ((ha = device_get_softc(dev)) == NULL) { 1171 device_printf(dev, "%s: cannot get softc\n", __func__); 1172 return (ENOMEM); 1173 } 1174 1175 if (qlnx_vf_device(ha) != 0) { 1176 #ifdef CONFIG_ECORE_SRIOV 1177 int ret; 1178 1179 ret = pci_iov_detach(dev); 1180 if (ret) { 1181 device_printf(dev, "%s: SRIOV in use\n", __func__); 1182 return (ret); 1183 } 1184 1185 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1186 1187 #ifdef QLNX_ENABLE_IWARP 1188 if (qlnx_rdma_dev_remove(ha) != 0) 1189 return (EBUSY); 1190 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1191 } 1192 1193 QLNX_LOCK(ha); 1194 qlnx_stop(ha); 1195 QLNX_UNLOCK(ha); 1196 1197 qlnx_release(ha); 1198 1199 return (0); 1200 } 1201 1202 #ifdef QLNX_ENABLE_IWARP 1203 1204 static uint8_t 1205 qlnx_get_personality(uint8_t pci_func) 1206 { 1207 uint8_t personality; 1208 1209 personality = (qlnxe_rdma_configuration >> 1210 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) & 1211 QLNX_PERSONALIY_MASK; 1212 return (personality); 1213 } 1214 1215 static void 1216 qlnx_set_personality(qlnx_host_t *ha) 1217 { 1218 struct ecore_hwfn *p_hwfn; 1219 uint8_t personality; 1220 1221 p_hwfn = &ha->cdev.hwfns[0]; 1222 1223 personality = qlnx_get_personality(ha->pci_func); 1224 1225 switch (personality) { 1226 1227 case QLNX_PERSONALITY_DEFAULT: 1228 device_printf(ha->pci_dev, "%s: DEFAULT\n", 1229 __func__); 1230 ha->personality = ECORE_PCI_DEFAULT; 1231 break; 1232 1233 case QLNX_PERSONALITY_ETH_ONLY: 1234 device_printf(ha->pci_dev, "%s: ETH_ONLY\n", 1235 __func__); 1236 ha->personality = ECORE_PCI_ETH; 1237 break; 1238 1239 case QLNX_PERSONALITY_ETH_IWARP: 1240 device_printf(ha->pci_dev, "%s: ETH_IWARP\n", 1241 __func__); 1242 ha->personality = ECORE_PCI_ETH_IWARP; 1243 break; 1244 1245 case QLNX_PERSONALITY_ETH_ROCE: 1246 device_printf(ha->pci_dev, "%s: ETH_ROCE\n", 1247 __func__); 1248 ha->personality = ECORE_PCI_ETH_ROCE; 1249 break; 1250 } 1251 1252 return; 1253 } 1254 1255 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1256 1257 static int 1258 qlnx_init_hw(qlnx_host_t *ha) 1259 { 1260 int rval = 0; 1261 struct ecore_hw_prepare_params params; 1262 1263 ecore_init_struct(&ha->cdev); 1264 1265 /* ha->dp_module = ECORE_MSG_PROBE | 1266 ECORE_MSG_INTR | 1267 ECORE_MSG_SP | 1268 ECORE_MSG_LINK | 1269 ECORE_MSG_SPQ | 1270 ECORE_MSG_RDMA; 1271 ha->dp_level = ECORE_LEVEL_VERBOSE;*/ 1272 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2; 1273 ha->dp_level = ECORE_LEVEL_NOTICE; 1274 //ha->dp_level = ECORE_LEVEL_VERBOSE; 1275 1276 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev); 1277 1278 ha->cdev.regview = ha->pci_reg; 1279 1280 ha->personality = ECORE_PCI_DEFAULT; 1281 1282 if (qlnx_vf_device(ha) == 0) { 1283 ha->cdev.b_is_vf = true; 1284 1285 if (ha->pci_dbells != NULL) { 1286 ha->cdev.doorbells = ha->pci_dbells; 1287 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1288 ha->cdev.db_size = ha->dbells_size; 1289 } else { 1290 ha->pci_dbells = ha->pci_reg; 1291 } 1292 } else { 1293 ha->cdev.doorbells = ha->pci_dbells; 1294 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1295 ha->cdev.db_size = ha->dbells_size; 1296 1297 #ifdef QLNX_ENABLE_IWARP 1298 1299 if (qlnx_rdma_supported(ha) == 0) 1300 qlnx_set_personality(ha); 1301 1302 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1303 1304 } 1305 QL_DPRINT2(ha, "%s: %s\n", __func__, 1306 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet")); 1307 1308 bzero(¶ms, sizeof (struct ecore_hw_prepare_params)); 1309 1310 params.personality = ha->personality; 1311 1312 params.drv_resc_alloc = false; 1313 params.chk_reg_fifo = false; 1314 params.initiate_pf_flr = true; 1315 params.epoch = 0; 1316 1317 ecore_hw_prepare(&ha->cdev, ¶ms); 1318 1319 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str); 1320 1321 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n", 1322 ha, &ha->cdev, &ha->cdev.hwfns[0]); 1323 1324 return (rval); 1325 } 1326 1327 static void 1328 qlnx_release(qlnx_host_t *ha) 1329 { 1330 device_t dev; 1331 int i; 1332 1333 dev = ha->pci_dev; 1334 1335 QL_DPRINT2(ha, "enter\n"); 1336 1337 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) { 1338 if (ha->idle_chk[i] != NULL) { 1339 free(ha->idle_chk[i], M_QLNXBUF); 1340 ha->idle_chk[i] = NULL; 1341 } 1342 1343 if (ha->grcdump[i] != NULL) { 1344 free(ha->grcdump[i], M_QLNXBUF); 1345 ha->grcdump[i] = NULL; 1346 } 1347 } 1348 1349 if (ha->flags.callout_init) 1350 callout_drain(&ha->qlnx_callout); 1351 1352 if (ha->flags.slowpath_start) { 1353 qlnx_slowpath_stop(ha); 1354 } 1355 1356 if (ha->flags.hw_init) 1357 ecore_hw_remove(&ha->cdev); 1358 1359 qlnx_del_cdev(ha); 1360 1361 if (ha->ifp != NULL) 1362 ether_ifdetach(ha->ifp); 1363 1364 qlnx_free_tx_dma_tag(ha); 1365 1366 qlnx_free_rx_dma_tag(ha); 1367 1368 qlnx_free_parent_dma_tag(ha); 1369 1370 if (qlnx_vf_device(ha) != 0) { 1371 qlnx_destroy_error_recovery_taskqueue(ha); 1372 } 1373 1374 for (i = 0; i < ha->num_rss; i++) { 1375 struct qlnx_fastpath *fp = &ha->fp_array[i]; 1376 1377 if (ha->irq_vec[i].handle) { 1378 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 1379 ha->irq_vec[i].handle); 1380 } 1381 1382 if (ha->irq_vec[i].irq) { 1383 (void)bus_release_resource(dev, SYS_RES_IRQ, 1384 ha->irq_vec[i].irq_rid, 1385 ha->irq_vec[i].irq); 1386 } 1387 1388 qlnx_free_tx_br(ha, fp); 1389 } 1390 qlnx_destroy_fp_taskqueues(ha); 1391 1392 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1393 if (ha->sp_handle[i]) 1394 (void)bus_teardown_intr(dev, ha->sp_irq[i], 1395 ha->sp_handle[i]); 1396 1397 if (ha->sp_irq[i]) 1398 (void) bus_release_resource(dev, SYS_RES_IRQ, 1399 ha->sp_irq_rid[i], ha->sp_irq[i]); 1400 } 1401 1402 qlnx_destroy_sp_taskqueues(ha); 1403 1404 if (ha->msix_count) 1405 pci_release_msi(dev); 1406 1407 if (ha->flags.lock_init) { 1408 mtx_destroy(&ha->hw_lock); 1409 } 1410 1411 if (ha->pci_reg) 1412 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 1413 ha->pci_reg); 1414 1415 if (ha->dbells_size && ha->pci_dbells) 1416 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid, 1417 ha->pci_dbells); 1418 1419 if (ha->msix_bar) 1420 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid, 1421 ha->msix_bar); 1422 1423 QL_DPRINT2(ha, "exit\n"); 1424 return; 1425 } 1426 1427 static void 1428 qlnx_trigger_dump(qlnx_host_t *ha) 1429 { 1430 int i; 1431 1432 if (ha->ifp != NULL) 1433 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 1434 1435 QL_DPRINT2(ha, "enter\n"); 1436 1437 if (qlnx_vf_device(ha) == 0) 1438 return; 1439 1440 ha->error_recovery = 1; 1441 1442 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1443 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i); 1444 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i); 1445 } 1446 1447 QL_DPRINT2(ha, "exit\n"); 1448 1449 return; 1450 } 1451 1452 static int 1453 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS) 1454 { 1455 int err, ret = 0; 1456 qlnx_host_t *ha; 1457 1458 err = sysctl_handle_int(oidp, &ret, 0, req); 1459 1460 if (err || !req->newptr) 1461 return (err); 1462 1463 if (ret == 1) { 1464 ha = (qlnx_host_t *)arg1; 1465 qlnx_trigger_dump(ha); 1466 } 1467 return (err); 1468 } 1469 1470 static int 1471 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS) 1472 { 1473 int err, i, ret = 0, usecs = 0; 1474 qlnx_host_t *ha; 1475 struct ecore_hwfn *p_hwfn; 1476 struct qlnx_fastpath *fp; 1477 1478 err = sysctl_handle_int(oidp, &usecs, 0, req); 1479 1480 if (err || !req->newptr || !usecs || (usecs > 255)) 1481 return (err); 1482 1483 ha = (qlnx_host_t *)arg1; 1484 1485 if (qlnx_vf_device(ha) == 0) 1486 return (-1); 1487 1488 for (i = 0; i < ha->num_rss; i++) { 1489 1490 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1491 1492 fp = &ha->fp_array[i]; 1493 1494 if (fp->txq[0]->handle != NULL) { 1495 ret = ecore_set_queue_coalesce(p_hwfn, 0, 1496 (uint16_t)usecs, fp->txq[0]->handle); 1497 } 1498 } 1499 1500 if (!ret) 1501 ha->tx_coalesce_usecs = (uint8_t)usecs; 1502 1503 return (err); 1504 } 1505 1506 static int 1507 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS) 1508 { 1509 int err, i, ret = 0, usecs = 0; 1510 qlnx_host_t *ha; 1511 struct ecore_hwfn *p_hwfn; 1512 struct qlnx_fastpath *fp; 1513 1514 err = sysctl_handle_int(oidp, &usecs, 0, req); 1515 1516 if (err || !req->newptr || !usecs || (usecs > 255)) 1517 return (err); 1518 1519 ha = (qlnx_host_t *)arg1; 1520 1521 if (qlnx_vf_device(ha) == 0) 1522 return (-1); 1523 1524 for (i = 0; i < ha->num_rss; i++) { 1525 1526 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1527 1528 fp = &ha->fp_array[i]; 1529 1530 if (fp->rxq->handle != NULL) { 1531 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs, 1532 0, fp->rxq->handle); 1533 } 1534 } 1535 1536 if (!ret) 1537 ha->rx_coalesce_usecs = (uint8_t)usecs; 1538 1539 return (err); 1540 } 1541 1542 static void 1543 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha) 1544 { 1545 struct sysctl_ctx_list *ctx; 1546 struct sysctl_oid_list *children; 1547 struct sysctl_oid *ctx_oid; 1548 1549 ctx = device_get_sysctl_ctx(ha->pci_dev); 1550 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1551 1552 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat", 1553 CTLFLAG_RD, NULL, "spstat"); 1554 children = SYSCTL_CHILDREN(ctx_oid); 1555 1556 SYSCTL_ADD_QUAD(ctx, children, 1557 OID_AUTO, "sp_interrupts", 1558 CTLFLAG_RD, &ha->sp_interrupts, 1559 "No. of slowpath interrupts"); 1560 1561 return; 1562 } 1563 1564 static void 1565 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha) 1566 { 1567 struct sysctl_ctx_list *ctx; 1568 struct sysctl_oid_list *children; 1569 struct sysctl_oid_list *node_children; 1570 struct sysctl_oid *ctx_oid; 1571 int i, j; 1572 uint8_t name_str[16]; 1573 1574 ctx = device_get_sysctl_ctx(ha->pci_dev); 1575 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1576 1577 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat", 1578 CTLFLAG_RD, NULL, "fpstat"); 1579 children = SYSCTL_CHILDREN(ctx_oid); 1580 1581 for (i = 0; i < ha->num_rss; i++) { 1582 1583 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1584 snprintf(name_str, sizeof(name_str), "%d", i); 1585 1586 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 1587 CTLFLAG_RD, NULL, name_str); 1588 node_children = SYSCTL_CHILDREN(ctx_oid); 1589 1590 /* Tx Related */ 1591 1592 SYSCTL_ADD_QUAD(ctx, node_children, 1593 OID_AUTO, "tx_pkts_processed", 1594 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed, 1595 "No. of packets processed for transmission"); 1596 1597 SYSCTL_ADD_QUAD(ctx, node_children, 1598 OID_AUTO, "tx_pkts_freed", 1599 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed, 1600 "No. of freed packets"); 1601 1602 SYSCTL_ADD_QUAD(ctx, node_children, 1603 OID_AUTO, "tx_pkts_transmitted", 1604 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted, 1605 "No. of transmitted packets"); 1606 1607 SYSCTL_ADD_QUAD(ctx, node_children, 1608 OID_AUTO, "tx_pkts_completed", 1609 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed, 1610 "No. of transmit completions"); 1611 1612 SYSCTL_ADD_QUAD(ctx, node_children, 1613 OID_AUTO, "tx_non_tso_pkts", 1614 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts, 1615 "No. of non LSO transmited packets"); 1616 1617 #ifdef QLNX_TRACE_PERF_DATA 1618 1619 SYSCTL_ADD_QUAD(ctx, node_children, 1620 OID_AUTO, "tx_pkts_trans_ctx", 1621 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx, 1622 "No. of transmitted packets in transmit context"); 1623 1624 SYSCTL_ADD_QUAD(ctx, node_children, 1625 OID_AUTO, "tx_pkts_compl_ctx", 1626 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx, 1627 "No. of transmit completions in transmit context"); 1628 1629 SYSCTL_ADD_QUAD(ctx, node_children, 1630 OID_AUTO, "tx_pkts_trans_fp", 1631 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp, 1632 "No. of transmitted packets in taskqueue"); 1633 1634 SYSCTL_ADD_QUAD(ctx, node_children, 1635 OID_AUTO, "tx_pkts_compl_fp", 1636 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp, 1637 "No. of transmit completions in taskqueue"); 1638 1639 SYSCTL_ADD_QUAD(ctx, node_children, 1640 OID_AUTO, "tx_pkts_compl_intr", 1641 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr, 1642 "No. of transmit completions in interrupt ctx"); 1643 #endif 1644 1645 SYSCTL_ADD_QUAD(ctx, node_children, 1646 OID_AUTO, "tx_tso_pkts", 1647 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts, 1648 "No. of LSO transmited packets"); 1649 1650 SYSCTL_ADD_QUAD(ctx, node_children, 1651 OID_AUTO, "tx_lso_wnd_min_len", 1652 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len, 1653 "tx_lso_wnd_min_len"); 1654 1655 SYSCTL_ADD_QUAD(ctx, node_children, 1656 OID_AUTO, "tx_defrag", 1657 CTLFLAG_RD, &ha->fp_array[i].tx_defrag, 1658 "tx_defrag"); 1659 1660 SYSCTL_ADD_QUAD(ctx, node_children, 1661 OID_AUTO, "tx_nsegs_gt_elem_left", 1662 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left, 1663 "tx_nsegs_gt_elem_left"); 1664 1665 SYSCTL_ADD_UINT(ctx, node_children, 1666 OID_AUTO, "tx_tso_max_nsegs", 1667 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs, 1668 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs"); 1669 1670 SYSCTL_ADD_UINT(ctx, node_children, 1671 OID_AUTO, "tx_tso_min_nsegs", 1672 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs, 1673 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs"); 1674 1675 SYSCTL_ADD_UINT(ctx, node_children, 1676 OID_AUTO, "tx_tso_max_pkt_len", 1677 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len, 1678 ha->fp_array[i].tx_tso_max_pkt_len, 1679 "tx_tso_max_pkt_len"); 1680 1681 SYSCTL_ADD_UINT(ctx, node_children, 1682 OID_AUTO, "tx_tso_min_pkt_len", 1683 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len, 1684 ha->fp_array[i].tx_tso_min_pkt_len, 1685 "tx_tso_min_pkt_len"); 1686 1687 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) { 1688 1689 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1690 snprintf(name_str, sizeof(name_str), 1691 "tx_pkts_nseg_%02d", (j+1)); 1692 1693 SYSCTL_ADD_QUAD(ctx, node_children, 1694 OID_AUTO, name_str, CTLFLAG_RD, 1695 &ha->fp_array[i].tx_pkts[j], name_str); 1696 } 1697 1698 #ifdef QLNX_TRACE_PERF_DATA 1699 for (j = 0; j < 18; j++) { 1700 1701 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1702 snprintf(name_str, sizeof(name_str), 1703 "tx_pkts_hist_%02d", (j+1)); 1704 1705 SYSCTL_ADD_QUAD(ctx, node_children, 1706 OID_AUTO, name_str, CTLFLAG_RD, 1707 &ha->fp_array[i].tx_pkts_hist[j], name_str); 1708 } 1709 for (j = 0; j < 5; j++) { 1710 1711 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1712 snprintf(name_str, sizeof(name_str), 1713 "tx_comInt_%02d", (j+1)); 1714 1715 SYSCTL_ADD_QUAD(ctx, node_children, 1716 OID_AUTO, name_str, CTLFLAG_RD, 1717 &ha->fp_array[i].tx_comInt[j], name_str); 1718 } 1719 for (j = 0; j < 18; j++) { 1720 1721 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1722 snprintf(name_str, sizeof(name_str), 1723 "tx_pkts_q_%02d", (j+1)); 1724 1725 SYSCTL_ADD_QUAD(ctx, node_children, 1726 OID_AUTO, name_str, CTLFLAG_RD, 1727 &ha->fp_array[i].tx_pkts_q[j], name_str); 1728 } 1729 #endif 1730 1731 SYSCTL_ADD_QUAD(ctx, node_children, 1732 OID_AUTO, "err_tx_nsegs_gt_elem_left", 1733 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left, 1734 "err_tx_nsegs_gt_elem_left"); 1735 1736 SYSCTL_ADD_QUAD(ctx, node_children, 1737 OID_AUTO, "err_tx_dmamap_create", 1738 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create, 1739 "err_tx_dmamap_create"); 1740 1741 SYSCTL_ADD_QUAD(ctx, node_children, 1742 OID_AUTO, "err_tx_defrag_dmamap_load", 1743 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load, 1744 "err_tx_defrag_dmamap_load"); 1745 1746 SYSCTL_ADD_QUAD(ctx, node_children, 1747 OID_AUTO, "err_tx_non_tso_max_seg", 1748 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg, 1749 "err_tx_non_tso_max_seg"); 1750 1751 SYSCTL_ADD_QUAD(ctx, node_children, 1752 OID_AUTO, "err_tx_dmamap_load", 1753 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load, 1754 "err_tx_dmamap_load"); 1755 1756 SYSCTL_ADD_QUAD(ctx, node_children, 1757 OID_AUTO, "err_tx_defrag", 1758 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag, 1759 "err_tx_defrag"); 1760 1761 SYSCTL_ADD_QUAD(ctx, node_children, 1762 OID_AUTO, "err_tx_free_pkt_null", 1763 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null, 1764 "err_tx_free_pkt_null"); 1765 1766 SYSCTL_ADD_QUAD(ctx, node_children, 1767 OID_AUTO, "err_tx_cons_idx_conflict", 1768 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict, 1769 "err_tx_cons_idx_conflict"); 1770 1771 SYSCTL_ADD_QUAD(ctx, node_children, 1772 OID_AUTO, "lro_cnt_64", 1773 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64, 1774 "lro_cnt_64"); 1775 1776 SYSCTL_ADD_QUAD(ctx, node_children, 1777 OID_AUTO, "lro_cnt_128", 1778 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128, 1779 "lro_cnt_128"); 1780 1781 SYSCTL_ADD_QUAD(ctx, node_children, 1782 OID_AUTO, "lro_cnt_256", 1783 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256, 1784 "lro_cnt_256"); 1785 1786 SYSCTL_ADD_QUAD(ctx, node_children, 1787 OID_AUTO, "lro_cnt_512", 1788 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512, 1789 "lro_cnt_512"); 1790 1791 SYSCTL_ADD_QUAD(ctx, node_children, 1792 OID_AUTO, "lro_cnt_1024", 1793 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024, 1794 "lro_cnt_1024"); 1795 1796 /* Rx Related */ 1797 1798 SYSCTL_ADD_QUAD(ctx, node_children, 1799 OID_AUTO, "rx_pkts", 1800 CTLFLAG_RD, &ha->fp_array[i].rx_pkts, 1801 "No. of received packets"); 1802 1803 SYSCTL_ADD_QUAD(ctx, node_children, 1804 OID_AUTO, "tpa_start", 1805 CTLFLAG_RD, &ha->fp_array[i].tpa_start, 1806 "No. of tpa_start packets"); 1807 1808 SYSCTL_ADD_QUAD(ctx, node_children, 1809 OID_AUTO, "tpa_cont", 1810 CTLFLAG_RD, &ha->fp_array[i].tpa_cont, 1811 "No. of tpa_cont packets"); 1812 1813 SYSCTL_ADD_QUAD(ctx, node_children, 1814 OID_AUTO, "tpa_end", 1815 CTLFLAG_RD, &ha->fp_array[i].tpa_end, 1816 "No. of tpa_end packets"); 1817 1818 SYSCTL_ADD_QUAD(ctx, node_children, 1819 OID_AUTO, "err_m_getcl", 1820 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl, 1821 "err_m_getcl"); 1822 1823 SYSCTL_ADD_QUAD(ctx, node_children, 1824 OID_AUTO, "err_m_getjcl", 1825 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl, 1826 "err_m_getjcl"); 1827 1828 SYSCTL_ADD_QUAD(ctx, node_children, 1829 OID_AUTO, "err_rx_hw_errors", 1830 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors, 1831 "err_rx_hw_errors"); 1832 1833 SYSCTL_ADD_QUAD(ctx, node_children, 1834 OID_AUTO, "err_rx_alloc_errors", 1835 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors, 1836 "err_rx_alloc_errors"); 1837 } 1838 1839 return; 1840 } 1841 1842 static void 1843 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha) 1844 { 1845 struct sysctl_ctx_list *ctx; 1846 struct sysctl_oid_list *children; 1847 struct sysctl_oid *ctx_oid; 1848 1849 ctx = device_get_sysctl_ctx(ha->pci_dev); 1850 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1851 1852 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat", 1853 CTLFLAG_RD, NULL, "hwstat"); 1854 children = SYSCTL_CHILDREN(ctx_oid); 1855 1856 SYSCTL_ADD_QUAD(ctx, children, 1857 OID_AUTO, "no_buff_discards", 1858 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards, 1859 "No. of packets discarded due to lack of buffer"); 1860 1861 SYSCTL_ADD_QUAD(ctx, children, 1862 OID_AUTO, "packet_too_big_discard", 1863 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard, 1864 "No. of packets discarded because packet was too big"); 1865 1866 SYSCTL_ADD_QUAD(ctx, children, 1867 OID_AUTO, "ttl0_discard", 1868 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard, 1869 "ttl0_discard"); 1870 1871 SYSCTL_ADD_QUAD(ctx, children, 1872 OID_AUTO, "rx_ucast_bytes", 1873 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes, 1874 "rx_ucast_bytes"); 1875 1876 SYSCTL_ADD_QUAD(ctx, children, 1877 OID_AUTO, "rx_mcast_bytes", 1878 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes, 1879 "rx_mcast_bytes"); 1880 1881 SYSCTL_ADD_QUAD(ctx, children, 1882 OID_AUTO, "rx_bcast_bytes", 1883 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes, 1884 "rx_bcast_bytes"); 1885 1886 SYSCTL_ADD_QUAD(ctx, children, 1887 OID_AUTO, "rx_ucast_pkts", 1888 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts, 1889 "rx_ucast_pkts"); 1890 1891 SYSCTL_ADD_QUAD(ctx, children, 1892 OID_AUTO, "rx_mcast_pkts", 1893 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts, 1894 "rx_mcast_pkts"); 1895 1896 SYSCTL_ADD_QUAD(ctx, children, 1897 OID_AUTO, "rx_bcast_pkts", 1898 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts, 1899 "rx_bcast_pkts"); 1900 1901 SYSCTL_ADD_QUAD(ctx, children, 1902 OID_AUTO, "mftag_filter_discards", 1903 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards, 1904 "mftag_filter_discards"); 1905 1906 SYSCTL_ADD_QUAD(ctx, children, 1907 OID_AUTO, "mac_filter_discards", 1908 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards, 1909 "mac_filter_discards"); 1910 1911 SYSCTL_ADD_QUAD(ctx, children, 1912 OID_AUTO, "tx_ucast_bytes", 1913 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes, 1914 "tx_ucast_bytes"); 1915 1916 SYSCTL_ADD_QUAD(ctx, children, 1917 OID_AUTO, "tx_mcast_bytes", 1918 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes, 1919 "tx_mcast_bytes"); 1920 1921 SYSCTL_ADD_QUAD(ctx, children, 1922 OID_AUTO, "tx_bcast_bytes", 1923 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes, 1924 "tx_bcast_bytes"); 1925 1926 SYSCTL_ADD_QUAD(ctx, children, 1927 OID_AUTO, "tx_ucast_pkts", 1928 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts, 1929 "tx_ucast_pkts"); 1930 1931 SYSCTL_ADD_QUAD(ctx, children, 1932 OID_AUTO, "tx_mcast_pkts", 1933 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts, 1934 "tx_mcast_pkts"); 1935 1936 SYSCTL_ADD_QUAD(ctx, children, 1937 OID_AUTO, "tx_bcast_pkts", 1938 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts, 1939 "tx_bcast_pkts"); 1940 1941 SYSCTL_ADD_QUAD(ctx, children, 1942 OID_AUTO, "tx_err_drop_pkts", 1943 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts, 1944 "tx_err_drop_pkts"); 1945 1946 SYSCTL_ADD_QUAD(ctx, children, 1947 OID_AUTO, "tpa_coalesced_pkts", 1948 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts, 1949 "tpa_coalesced_pkts"); 1950 1951 SYSCTL_ADD_QUAD(ctx, children, 1952 OID_AUTO, "tpa_coalesced_events", 1953 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events, 1954 "tpa_coalesced_events"); 1955 1956 SYSCTL_ADD_QUAD(ctx, children, 1957 OID_AUTO, "tpa_aborts_num", 1958 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num, 1959 "tpa_aborts_num"); 1960 1961 SYSCTL_ADD_QUAD(ctx, children, 1962 OID_AUTO, "tpa_not_coalesced_pkts", 1963 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts, 1964 "tpa_not_coalesced_pkts"); 1965 1966 SYSCTL_ADD_QUAD(ctx, children, 1967 OID_AUTO, "tpa_coalesced_bytes", 1968 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes, 1969 "tpa_coalesced_bytes"); 1970 1971 SYSCTL_ADD_QUAD(ctx, children, 1972 OID_AUTO, "rx_64_byte_packets", 1973 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets, 1974 "rx_64_byte_packets"); 1975 1976 SYSCTL_ADD_QUAD(ctx, children, 1977 OID_AUTO, "rx_65_to_127_byte_packets", 1978 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets, 1979 "rx_65_to_127_byte_packets"); 1980 1981 SYSCTL_ADD_QUAD(ctx, children, 1982 OID_AUTO, "rx_128_to_255_byte_packets", 1983 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets, 1984 "rx_128_to_255_byte_packets"); 1985 1986 SYSCTL_ADD_QUAD(ctx, children, 1987 OID_AUTO, "rx_256_to_511_byte_packets", 1988 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets, 1989 "rx_256_to_511_byte_packets"); 1990 1991 SYSCTL_ADD_QUAD(ctx, children, 1992 OID_AUTO, "rx_512_to_1023_byte_packets", 1993 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets, 1994 "rx_512_to_1023_byte_packets"); 1995 1996 SYSCTL_ADD_QUAD(ctx, children, 1997 OID_AUTO, "rx_1024_to_1518_byte_packets", 1998 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets, 1999 "rx_1024_to_1518_byte_packets"); 2000 2001 SYSCTL_ADD_QUAD(ctx, children, 2002 OID_AUTO, "rx_1519_to_1522_byte_packets", 2003 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets, 2004 "rx_1519_to_1522_byte_packets"); 2005 2006 SYSCTL_ADD_QUAD(ctx, children, 2007 OID_AUTO, "rx_1523_to_2047_byte_packets", 2008 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets, 2009 "rx_1523_to_2047_byte_packets"); 2010 2011 SYSCTL_ADD_QUAD(ctx, children, 2012 OID_AUTO, "rx_2048_to_4095_byte_packets", 2013 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets, 2014 "rx_2048_to_4095_byte_packets"); 2015 2016 SYSCTL_ADD_QUAD(ctx, children, 2017 OID_AUTO, "rx_4096_to_9216_byte_packets", 2018 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets, 2019 "rx_4096_to_9216_byte_packets"); 2020 2021 SYSCTL_ADD_QUAD(ctx, children, 2022 OID_AUTO, "rx_9217_to_16383_byte_packets", 2023 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets, 2024 "rx_9217_to_16383_byte_packets"); 2025 2026 SYSCTL_ADD_QUAD(ctx, children, 2027 OID_AUTO, "rx_crc_errors", 2028 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors, 2029 "rx_crc_errors"); 2030 2031 SYSCTL_ADD_QUAD(ctx, children, 2032 OID_AUTO, "rx_mac_crtl_frames", 2033 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames, 2034 "rx_mac_crtl_frames"); 2035 2036 SYSCTL_ADD_QUAD(ctx, children, 2037 OID_AUTO, "rx_pause_frames", 2038 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames, 2039 "rx_pause_frames"); 2040 2041 SYSCTL_ADD_QUAD(ctx, children, 2042 OID_AUTO, "rx_pfc_frames", 2043 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames, 2044 "rx_pfc_frames"); 2045 2046 SYSCTL_ADD_QUAD(ctx, children, 2047 OID_AUTO, "rx_align_errors", 2048 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors, 2049 "rx_align_errors"); 2050 2051 SYSCTL_ADD_QUAD(ctx, children, 2052 OID_AUTO, "rx_carrier_errors", 2053 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors, 2054 "rx_carrier_errors"); 2055 2056 SYSCTL_ADD_QUAD(ctx, children, 2057 OID_AUTO, "rx_oversize_packets", 2058 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets, 2059 "rx_oversize_packets"); 2060 2061 SYSCTL_ADD_QUAD(ctx, children, 2062 OID_AUTO, "rx_jabbers", 2063 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers, 2064 "rx_jabbers"); 2065 2066 SYSCTL_ADD_QUAD(ctx, children, 2067 OID_AUTO, "rx_undersize_packets", 2068 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets, 2069 "rx_undersize_packets"); 2070 2071 SYSCTL_ADD_QUAD(ctx, children, 2072 OID_AUTO, "rx_fragments", 2073 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments, 2074 "rx_fragments"); 2075 2076 SYSCTL_ADD_QUAD(ctx, children, 2077 OID_AUTO, "tx_64_byte_packets", 2078 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets, 2079 "tx_64_byte_packets"); 2080 2081 SYSCTL_ADD_QUAD(ctx, children, 2082 OID_AUTO, "tx_65_to_127_byte_packets", 2083 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets, 2084 "tx_65_to_127_byte_packets"); 2085 2086 SYSCTL_ADD_QUAD(ctx, children, 2087 OID_AUTO, "tx_128_to_255_byte_packets", 2088 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets, 2089 "tx_128_to_255_byte_packets"); 2090 2091 SYSCTL_ADD_QUAD(ctx, children, 2092 OID_AUTO, "tx_256_to_511_byte_packets", 2093 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets, 2094 "tx_256_to_511_byte_packets"); 2095 2096 SYSCTL_ADD_QUAD(ctx, children, 2097 OID_AUTO, "tx_512_to_1023_byte_packets", 2098 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets, 2099 "tx_512_to_1023_byte_packets"); 2100 2101 SYSCTL_ADD_QUAD(ctx, children, 2102 OID_AUTO, "tx_1024_to_1518_byte_packets", 2103 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets, 2104 "tx_1024_to_1518_byte_packets"); 2105 2106 SYSCTL_ADD_QUAD(ctx, children, 2107 OID_AUTO, "tx_1519_to_2047_byte_packets", 2108 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets, 2109 "tx_1519_to_2047_byte_packets"); 2110 2111 SYSCTL_ADD_QUAD(ctx, children, 2112 OID_AUTO, "tx_2048_to_4095_byte_packets", 2113 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets, 2114 "tx_2048_to_4095_byte_packets"); 2115 2116 SYSCTL_ADD_QUAD(ctx, children, 2117 OID_AUTO, "tx_4096_to_9216_byte_packets", 2118 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets, 2119 "tx_4096_to_9216_byte_packets"); 2120 2121 SYSCTL_ADD_QUAD(ctx, children, 2122 OID_AUTO, "tx_9217_to_16383_byte_packets", 2123 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets, 2124 "tx_9217_to_16383_byte_packets"); 2125 2126 SYSCTL_ADD_QUAD(ctx, children, 2127 OID_AUTO, "tx_pause_frames", 2128 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames, 2129 "tx_pause_frames"); 2130 2131 SYSCTL_ADD_QUAD(ctx, children, 2132 OID_AUTO, "tx_pfc_frames", 2133 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames, 2134 "tx_pfc_frames"); 2135 2136 SYSCTL_ADD_QUAD(ctx, children, 2137 OID_AUTO, "tx_lpi_entry_count", 2138 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count, 2139 "tx_lpi_entry_count"); 2140 2141 SYSCTL_ADD_QUAD(ctx, children, 2142 OID_AUTO, "tx_total_collisions", 2143 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions, 2144 "tx_total_collisions"); 2145 2146 SYSCTL_ADD_QUAD(ctx, children, 2147 OID_AUTO, "brb_truncates", 2148 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates, 2149 "brb_truncates"); 2150 2151 SYSCTL_ADD_QUAD(ctx, children, 2152 OID_AUTO, "brb_discards", 2153 CTLFLAG_RD, &ha->hw_stats.common.brb_discards, 2154 "brb_discards"); 2155 2156 SYSCTL_ADD_QUAD(ctx, children, 2157 OID_AUTO, "rx_mac_bytes", 2158 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes, 2159 "rx_mac_bytes"); 2160 2161 SYSCTL_ADD_QUAD(ctx, children, 2162 OID_AUTO, "rx_mac_uc_packets", 2163 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets, 2164 "rx_mac_uc_packets"); 2165 2166 SYSCTL_ADD_QUAD(ctx, children, 2167 OID_AUTO, "rx_mac_mc_packets", 2168 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets, 2169 "rx_mac_mc_packets"); 2170 2171 SYSCTL_ADD_QUAD(ctx, children, 2172 OID_AUTO, "rx_mac_bc_packets", 2173 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets, 2174 "rx_mac_bc_packets"); 2175 2176 SYSCTL_ADD_QUAD(ctx, children, 2177 OID_AUTO, "rx_mac_frames_ok", 2178 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok, 2179 "rx_mac_frames_ok"); 2180 2181 SYSCTL_ADD_QUAD(ctx, children, 2182 OID_AUTO, "tx_mac_bytes", 2183 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes, 2184 "tx_mac_bytes"); 2185 2186 SYSCTL_ADD_QUAD(ctx, children, 2187 OID_AUTO, "tx_mac_uc_packets", 2188 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets, 2189 "tx_mac_uc_packets"); 2190 2191 SYSCTL_ADD_QUAD(ctx, children, 2192 OID_AUTO, "tx_mac_mc_packets", 2193 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets, 2194 "tx_mac_mc_packets"); 2195 2196 SYSCTL_ADD_QUAD(ctx, children, 2197 OID_AUTO, "tx_mac_bc_packets", 2198 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets, 2199 "tx_mac_bc_packets"); 2200 2201 SYSCTL_ADD_QUAD(ctx, children, 2202 OID_AUTO, "tx_mac_ctrl_frames", 2203 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames, 2204 "tx_mac_ctrl_frames"); 2205 return; 2206 } 2207 2208 static void 2209 qlnx_add_sysctls(qlnx_host_t *ha) 2210 { 2211 device_t dev = ha->pci_dev; 2212 struct sysctl_ctx_list *ctx; 2213 struct sysctl_oid_list *children; 2214 2215 ctx = device_get_sysctl_ctx(dev); 2216 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2217 2218 qlnx_add_fp_stats_sysctls(ha); 2219 qlnx_add_sp_stats_sysctls(ha); 2220 2221 if (qlnx_vf_device(ha) != 0) 2222 qlnx_add_hw_stats_sysctls(ha); 2223 2224 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version", 2225 CTLFLAG_RD, qlnx_ver_str, 0, 2226 "Driver Version"); 2227 2228 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version", 2229 CTLFLAG_RD, ha->stormfw_ver, 0, 2230 "STORM Firmware Version"); 2231 2232 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version", 2233 CTLFLAG_RD, ha->mfw_ver, 0, 2234 "Management Firmware Version"); 2235 2236 SYSCTL_ADD_UINT(ctx, children, 2237 OID_AUTO, "personality", CTLFLAG_RD, 2238 &ha->personality, ha->personality, 2239 "\tpersonality = 0 => Ethernet Only\n" 2240 "\tpersonality = 3 => Ethernet and RoCE\n" 2241 "\tpersonality = 4 => Ethernet and iWARP\n" 2242 "\tpersonality = 6 => Default in Shared Memory\n"); 2243 2244 ha->dbg_level = 0; 2245 SYSCTL_ADD_UINT(ctx, children, 2246 OID_AUTO, "debug", CTLFLAG_RW, 2247 &ha->dbg_level, ha->dbg_level, "Debug Level"); 2248 2249 ha->dp_level = 0x01; 2250 SYSCTL_ADD_UINT(ctx, children, 2251 OID_AUTO, "dp_level", CTLFLAG_RW, 2252 &ha->dp_level, ha->dp_level, "DP Level"); 2253 2254 ha->dbg_trace_lro_cnt = 0; 2255 SYSCTL_ADD_UINT(ctx, children, 2256 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW, 2257 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt, 2258 "Trace LRO Counts"); 2259 2260 ha->dbg_trace_tso_pkt_len = 0; 2261 SYSCTL_ADD_UINT(ctx, children, 2262 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW, 2263 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len, 2264 "Trace TSO packet lengths"); 2265 2266 ha->dp_module = 0; 2267 SYSCTL_ADD_UINT(ctx, children, 2268 OID_AUTO, "dp_module", CTLFLAG_RW, 2269 &ha->dp_module, ha->dp_module, "DP Module"); 2270 2271 ha->err_inject = 0; 2272 2273 SYSCTL_ADD_UINT(ctx, children, 2274 OID_AUTO, "err_inject", CTLFLAG_RW, 2275 &ha->err_inject, ha->err_inject, "Error Inject"); 2276 2277 ha->storm_stats_enable = 0; 2278 2279 SYSCTL_ADD_UINT(ctx, children, 2280 OID_AUTO, "storm_stats_enable", CTLFLAG_RW, 2281 &ha->storm_stats_enable, ha->storm_stats_enable, 2282 "Enable Storm Statistics Gathering"); 2283 2284 ha->storm_stats_index = 0; 2285 2286 SYSCTL_ADD_UINT(ctx, children, 2287 OID_AUTO, "storm_stats_index", CTLFLAG_RD, 2288 &ha->storm_stats_index, ha->storm_stats_index, 2289 "Enable Storm Statistics Gathering Current Index"); 2290 2291 ha->grcdump_taken = 0; 2292 SYSCTL_ADD_UINT(ctx, children, 2293 OID_AUTO, "grcdump_taken", CTLFLAG_RD, 2294 &ha->grcdump_taken, ha->grcdump_taken, 2295 "grcdump_taken"); 2296 2297 ha->idle_chk_taken = 0; 2298 SYSCTL_ADD_UINT(ctx, children, 2299 OID_AUTO, "idle_chk_taken", CTLFLAG_RD, 2300 &ha->idle_chk_taken, ha->idle_chk_taken, 2301 "idle_chk_taken"); 2302 2303 SYSCTL_ADD_UINT(ctx, children, 2304 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD, 2305 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs, 2306 "rx_coalesce_usecs"); 2307 2308 SYSCTL_ADD_UINT(ctx, children, 2309 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD, 2310 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs, 2311 "tx_coalesce_usecs"); 2312 2313 SYSCTL_ADD_PROC(ctx, children, 2314 OID_AUTO, "trigger_dump", (CTLTYPE_INT | CTLFLAG_RW), 2315 (void *)ha, 0, 2316 qlnx_trigger_dump_sysctl, "I", "trigger_dump"); 2317 2318 SYSCTL_ADD_PROC(ctx, children, 2319 OID_AUTO, "set_rx_coalesce_usecs", 2320 (CTLTYPE_INT | CTLFLAG_RW), 2321 (void *)ha, 0, 2322 qlnx_set_rx_coalesce, "I", 2323 "rx interrupt coalesce period microseconds"); 2324 2325 SYSCTL_ADD_PROC(ctx, children, 2326 OID_AUTO, "set_tx_coalesce_usecs", 2327 (CTLTYPE_INT | CTLFLAG_RW), 2328 (void *)ha, 0, 2329 qlnx_set_tx_coalesce, "I", 2330 "tx interrupt coalesce period microseconds"); 2331 2332 ha->rx_pkt_threshold = 128; 2333 SYSCTL_ADD_UINT(ctx, children, 2334 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW, 2335 &ha->rx_pkt_threshold, ha->rx_pkt_threshold, 2336 "No. of Rx Pkts to process at a time"); 2337 2338 ha->rx_jumbo_buf_eq_mtu = 0; 2339 SYSCTL_ADD_UINT(ctx, children, 2340 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW, 2341 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu, 2342 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n" 2343 "otherwise Rx Jumbo buffers are set to >= MTU size\n"); 2344 2345 SYSCTL_ADD_QUAD(ctx, children, 2346 OID_AUTO, "err_illegal_intr", CTLFLAG_RD, 2347 &ha->err_illegal_intr, "err_illegal_intr"); 2348 2349 SYSCTL_ADD_QUAD(ctx, children, 2350 OID_AUTO, "err_fp_null", CTLFLAG_RD, 2351 &ha->err_fp_null, "err_fp_null"); 2352 2353 SYSCTL_ADD_QUAD(ctx, children, 2354 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD, 2355 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type"); 2356 return; 2357 } 2358 2359 2360 2361 /***************************************************************************** 2362 * Operating System Network Interface Functions 2363 *****************************************************************************/ 2364 2365 static void 2366 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) 2367 { 2368 uint16_t device_id; 2369 struct ifnet *ifp; 2370 2371 ifp = ha->ifp = if_alloc(IFT_ETHER); 2372 2373 if (ifp == NULL) 2374 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 2375 2376 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2377 2378 device_id = pci_get_device(ha->pci_dev); 2379 2380 #if __FreeBSD_version >= 1000000 2381 2382 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) 2383 ifp->if_baudrate = IF_Gbps(40); 2384 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2385 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 2386 ifp->if_baudrate = IF_Gbps(25); 2387 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) 2388 ifp->if_baudrate = IF_Gbps(50); 2389 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) 2390 ifp->if_baudrate = IF_Gbps(100); 2391 2392 ifp->if_capabilities = IFCAP_LINKSTATE; 2393 #else 2394 ifp->if_mtu = ETHERMTU; 2395 ifp->if_baudrate = (1 * 1000 * 1000 *1000); 2396 2397 #endif /* #if __FreeBSD_version >= 1000000 */ 2398 2399 ifp->if_init = qlnx_init; 2400 ifp->if_softc = ha; 2401 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2402 ifp->if_ioctl = qlnx_ioctl; 2403 ifp->if_transmit = qlnx_transmit; 2404 ifp->if_qflush = qlnx_qflush; 2405 2406 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha)); 2407 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha); 2408 IFQ_SET_READY(&ifp->if_snd); 2409 2410 #if __FreeBSD_version >= 1100036 2411 if_setgetcounterfn(ifp, qlnx_get_counter); 2412 #endif 2413 2414 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2415 2416 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN); 2417 2418 if (!ha->primary_mac[0] && !ha->primary_mac[1] && 2419 !ha->primary_mac[2] && !ha->primary_mac[3] && 2420 !ha->primary_mac[4] && !ha->primary_mac[5]) { 2421 uint32_t rnd; 2422 2423 rnd = arc4random(); 2424 2425 ha->primary_mac[0] = 0x00; 2426 ha->primary_mac[1] = 0x0e; 2427 ha->primary_mac[2] = 0x1e; 2428 ha->primary_mac[3] = rnd & 0xFF; 2429 ha->primary_mac[4] = (rnd >> 8) & 0xFF; 2430 ha->primary_mac[5] = (rnd >> 16) & 0xFF; 2431 } 2432 2433 ether_ifattach(ifp, ha->primary_mac); 2434 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN); 2435 2436 ifp->if_capabilities = IFCAP_HWCSUM; 2437 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 2438 2439 ifp->if_capabilities |= IFCAP_VLAN_MTU; 2440 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 2441 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 2442 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 2443 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 2444 ifp->if_capabilities |= IFCAP_TSO4; 2445 ifp->if_capabilities |= IFCAP_TSO6; 2446 ifp->if_capabilities |= IFCAP_LRO; 2447 2448 ifp->if_hw_tsomax = QLNX_MAX_TSO_FRAME_SIZE - 2449 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 2450 ifp->if_hw_tsomaxsegcount = QLNX_MAX_SEGMENTS - 1 /* hdr */; 2451 ifp->if_hw_tsomaxsegsize = QLNX_MAX_TX_MBUF_SIZE; 2452 2453 2454 ifp->if_capenable = ifp->if_capabilities; 2455 2456 ifp->if_hwassist = CSUM_IP; 2457 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP; 2458 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; 2459 ifp->if_hwassist |= CSUM_TSO; 2460 2461 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 2462 2463 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\ 2464 qlnx_media_status); 2465 2466 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) { 2467 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL); 2468 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL); 2469 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL); 2470 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2471 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) { 2472 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL); 2473 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL); 2474 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) { 2475 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL); 2476 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL); 2477 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) { 2478 ifmedia_add(&ha->media, 2479 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL); 2480 ifmedia_add(&ha->media, 2481 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL); 2482 ifmedia_add(&ha->media, 2483 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL); 2484 } 2485 2486 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL); 2487 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 2488 2489 2490 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 2491 2492 QL_DPRINT2(ha, "exit\n"); 2493 2494 return; 2495 } 2496 2497 static void 2498 qlnx_init_locked(qlnx_host_t *ha) 2499 { 2500 struct ifnet *ifp = ha->ifp; 2501 2502 QL_DPRINT1(ha, "Driver Initialization start \n"); 2503 2504 qlnx_stop(ha); 2505 2506 if (qlnx_load(ha) == 0) { 2507 2508 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2509 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2510 2511 #ifdef QLNX_ENABLE_IWARP 2512 if (qlnx_vf_device(ha) != 0) { 2513 qlnx_rdma_dev_open(ha); 2514 } 2515 #endif /* #ifdef QLNX_ENABLE_IWARP */ 2516 } 2517 2518 return; 2519 } 2520 2521 static void 2522 qlnx_init(void *arg) 2523 { 2524 qlnx_host_t *ha; 2525 2526 ha = (qlnx_host_t *)arg; 2527 2528 QL_DPRINT2(ha, "enter\n"); 2529 2530 QLNX_LOCK(ha); 2531 qlnx_init_locked(ha); 2532 QLNX_UNLOCK(ha); 2533 2534 QL_DPRINT2(ha, "exit\n"); 2535 2536 return; 2537 } 2538 2539 static int 2540 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac) 2541 { 2542 struct ecore_filter_mcast *mcast; 2543 struct ecore_dev *cdev; 2544 int rc; 2545 2546 cdev = &ha->cdev; 2547 2548 mcast = &ha->ecore_mcast; 2549 bzero(mcast, sizeof(struct ecore_filter_mcast)); 2550 2551 if (add_mac) 2552 mcast->opcode = ECORE_FILTER_ADD; 2553 else 2554 mcast->opcode = ECORE_FILTER_REMOVE; 2555 2556 mcast->num_mc_addrs = 1; 2557 memcpy(mcast->mac, mac_addr, ETH_ALEN); 2558 2559 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 2560 2561 return (rc); 2562 } 2563 2564 static int 2565 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta) 2566 { 2567 int i; 2568 2569 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2570 2571 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) 2572 return 0; /* its been already added */ 2573 } 2574 2575 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2576 2577 if ((ha->mcast[i].addr[0] == 0) && 2578 (ha->mcast[i].addr[1] == 0) && 2579 (ha->mcast[i].addr[2] == 0) && 2580 (ha->mcast[i].addr[3] == 0) && 2581 (ha->mcast[i].addr[4] == 0) && 2582 (ha->mcast[i].addr[5] == 0)) { 2583 2584 if (qlnx_config_mcast_mac_addr(ha, mta, 1)) 2585 return (-1); 2586 2587 bcopy(mta, ha->mcast[i].addr, ETH_ALEN); 2588 ha->nmcast++; 2589 2590 return 0; 2591 } 2592 } 2593 return 0; 2594 } 2595 2596 static int 2597 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta) 2598 { 2599 int i; 2600 2601 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2602 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { 2603 2604 if (qlnx_config_mcast_mac_addr(ha, mta, 0)) 2605 return (-1); 2606 2607 ha->mcast[i].addr[0] = 0; 2608 ha->mcast[i].addr[1] = 0; 2609 ha->mcast[i].addr[2] = 0; 2610 ha->mcast[i].addr[3] = 0; 2611 ha->mcast[i].addr[4] = 0; 2612 ha->mcast[i].addr[5] = 0; 2613 2614 ha->nmcast--; 2615 2616 return 0; 2617 } 2618 } 2619 return 0; 2620 } 2621 2622 /* 2623 * Name: qls_hw_set_multi 2624 * Function: Sets the Multicast Addresses provided the host O.S into the 2625 * hardware (for the given interface) 2626 */ 2627 static void 2628 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 2629 uint32_t add_mac) 2630 { 2631 int i; 2632 2633 for (i = 0; i < mcnt; i++) { 2634 if (add_mac) { 2635 if (qlnx_hw_add_mcast(ha, mta)) 2636 break; 2637 } else { 2638 if (qlnx_hw_del_mcast(ha, mta)) 2639 break; 2640 } 2641 2642 mta += ETHER_HDR_LEN; 2643 } 2644 return; 2645 } 2646 2647 2648 #define QLNX_MCAST_ADDRS_SIZE (QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN) 2649 static int 2650 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi) 2651 { 2652 uint8_t mta[QLNX_MCAST_ADDRS_SIZE]; 2653 struct ifmultiaddr *ifma; 2654 int mcnt = 0; 2655 struct ifnet *ifp = ha->ifp; 2656 int ret = 0; 2657 2658 if (qlnx_vf_device(ha) == 0) 2659 return (0); 2660 2661 if_maddr_rlock(ifp); 2662 2663 CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2664 2665 if (ifma->ifma_addr->sa_family != AF_LINK) 2666 continue; 2667 2668 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS) 2669 break; 2670 2671 bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), 2672 &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN); 2673 2674 mcnt++; 2675 } 2676 2677 if_maddr_runlock(ifp); 2678 2679 QLNX_LOCK(ha); 2680 qlnx_hw_set_multi(ha, mta, mcnt, add_multi); 2681 QLNX_UNLOCK(ha); 2682 2683 return (ret); 2684 } 2685 2686 static int 2687 qlnx_set_promisc(qlnx_host_t *ha) 2688 { 2689 int rc = 0; 2690 uint8_t filter; 2691 2692 if (qlnx_vf_device(ha) == 0) 2693 return (0); 2694 2695 filter = ha->filter; 2696 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2697 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 2698 2699 rc = qlnx_set_rx_accept_filter(ha, filter); 2700 return (rc); 2701 } 2702 2703 static int 2704 qlnx_set_allmulti(qlnx_host_t *ha) 2705 { 2706 int rc = 0; 2707 uint8_t filter; 2708 2709 if (qlnx_vf_device(ha) == 0) 2710 return (0); 2711 2712 filter = ha->filter; 2713 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2714 rc = qlnx_set_rx_accept_filter(ha, filter); 2715 2716 return (rc); 2717 } 2718 2719 2720 static int 2721 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2722 { 2723 int ret = 0, mask; 2724 struct ifreq *ifr = (struct ifreq *)data; 2725 struct ifaddr *ifa = (struct ifaddr *)data; 2726 qlnx_host_t *ha; 2727 2728 ha = (qlnx_host_t *)ifp->if_softc; 2729 2730 switch (cmd) { 2731 case SIOCSIFADDR: 2732 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd); 2733 2734 if (ifa->ifa_addr->sa_family == AF_INET) { 2735 ifp->if_flags |= IFF_UP; 2736 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2737 QLNX_LOCK(ha); 2738 qlnx_init_locked(ha); 2739 QLNX_UNLOCK(ha); 2740 } 2741 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 2742 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)); 2743 2744 arp_ifinit(ifp, ifa); 2745 } else { 2746 ether_ioctl(ifp, cmd, data); 2747 } 2748 break; 2749 2750 case SIOCSIFMTU: 2751 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd); 2752 2753 if (ifr->ifr_mtu > QLNX_MAX_MTU) { 2754 ret = EINVAL; 2755 } else { 2756 QLNX_LOCK(ha); 2757 ifp->if_mtu = ifr->ifr_mtu; 2758 ha->max_frame_size = 2759 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2760 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2761 qlnx_init_locked(ha); 2762 } 2763 2764 QLNX_UNLOCK(ha); 2765 } 2766 2767 break; 2768 2769 case SIOCSIFFLAGS: 2770 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd); 2771 2772 QLNX_LOCK(ha); 2773 2774 if (ifp->if_flags & IFF_UP) { 2775 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2776 if ((ifp->if_flags ^ ha->if_flags) & 2777 IFF_PROMISC) { 2778 ret = qlnx_set_promisc(ha); 2779 } else if ((ifp->if_flags ^ ha->if_flags) & 2780 IFF_ALLMULTI) { 2781 ret = qlnx_set_allmulti(ha); 2782 } 2783 } else { 2784 ha->max_frame_size = ifp->if_mtu + 2785 ETHER_HDR_LEN + ETHER_CRC_LEN; 2786 qlnx_init_locked(ha); 2787 } 2788 } else { 2789 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2790 qlnx_stop(ha); 2791 ha->if_flags = ifp->if_flags; 2792 } 2793 2794 QLNX_UNLOCK(ha); 2795 break; 2796 2797 case SIOCADDMULTI: 2798 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd); 2799 2800 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2801 if (qlnx_set_multi(ha, 1)) 2802 ret = EINVAL; 2803 } 2804 break; 2805 2806 case SIOCDELMULTI: 2807 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd); 2808 2809 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2810 if (qlnx_set_multi(ha, 0)) 2811 ret = EINVAL; 2812 } 2813 break; 2814 2815 case SIOCSIFMEDIA: 2816 case SIOCGIFMEDIA: 2817 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd); 2818 2819 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 2820 break; 2821 2822 case SIOCSIFCAP: 2823 2824 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2825 2826 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd); 2827 2828 if (mask & IFCAP_HWCSUM) 2829 ifp->if_capenable ^= IFCAP_HWCSUM; 2830 if (mask & IFCAP_TSO4) 2831 ifp->if_capenable ^= IFCAP_TSO4; 2832 if (mask & IFCAP_TSO6) 2833 ifp->if_capenable ^= IFCAP_TSO6; 2834 if (mask & IFCAP_VLAN_HWTAGGING) 2835 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2836 if (mask & IFCAP_VLAN_HWTSO) 2837 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2838 if (mask & IFCAP_LRO) 2839 ifp->if_capenable ^= IFCAP_LRO; 2840 2841 QLNX_LOCK(ha); 2842 2843 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2844 qlnx_init_locked(ha); 2845 2846 QLNX_UNLOCK(ha); 2847 2848 VLAN_CAPABILITIES(ifp); 2849 break; 2850 2851 #if (__FreeBSD_version >= 1100101) 2852 2853 case SIOCGI2C: 2854 { 2855 struct ifi2creq i2c; 2856 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 2857 struct ecore_ptt *p_ptt; 2858 2859 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 2860 2861 if (ret) 2862 break; 2863 2864 if ((i2c.len > sizeof (i2c.data)) || 2865 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) { 2866 ret = EINVAL; 2867 break; 2868 } 2869 2870 p_ptt = ecore_ptt_acquire(p_hwfn); 2871 2872 if (!p_ptt) { 2873 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 2874 ret = -1; 2875 break; 2876 } 2877 2878 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt, 2879 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset, 2880 i2c.len, &i2c.data[0]); 2881 2882 ecore_ptt_release(p_hwfn, p_ptt); 2883 2884 if (ret) { 2885 ret = -1; 2886 break; 2887 } 2888 2889 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 2890 2891 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \ 2892 len = %d addr = 0x%02x offset = 0x%04x \ 2893 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \ 2894 0x%02x 0x%02x 0x%02x\n", 2895 ret, i2c.len, i2c.dev_addr, i2c.offset, 2896 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3], 2897 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]); 2898 break; 2899 } 2900 #endif /* #if (__FreeBSD_version >= 1100101) */ 2901 2902 default: 2903 QL_DPRINT4(ha, "default (0x%lx)\n", cmd); 2904 ret = ether_ioctl(ifp, cmd, data); 2905 break; 2906 } 2907 2908 return (ret); 2909 } 2910 2911 static int 2912 qlnx_media_change(struct ifnet *ifp) 2913 { 2914 qlnx_host_t *ha; 2915 struct ifmedia *ifm; 2916 int ret = 0; 2917 2918 ha = (qlnx_host_t *)ifp->if_softc; 2919 2920 QL_DPRINT2(ha, "enter\n"); 2921 2922 ifm = &ha->media; 2923 2924 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2925 ret = EINVAL; 2926 2927 QL_DPRINT2(ha, "exit\n"); 2928 2929 return (ret); 2930 } 2931 2932 static void 2933 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2934 { 2935 qlnx_host_t *ha; 2936 2937 ha = (qlnx_host_t *)ifp->if_softc; 2938 2939 QL_DPRINT2(ha, "enter\n"); 2940 2941 ifmr->ifm_status = IFM_AVALID; 2942 ifmr->ifm_active = IFM_ETHER; 2943 2944 if (ha->link_up) { 2945 ifmr->ifm_status |= IFM_ACTIVE; 2946 ifmr->ifm_active |= 2947 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link)); 2948 2949 if (ha->if_link.link_partner_caps & 2950 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause)) 2951 ifmr->ifm_active |= 2952 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 2953 } 2954 2955 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down")); 2956 2957 return; 2958 } 2959 2960 2961 static void 2962 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2963 struct qlnx_tx_queue *txq) 2964 { 2965 u16 idx; 2966 struct mbuf *mp; 2967 bus_dmamap_t map; 2968 int i; 2969 struct eth_tx_bd *tx_data_bd; 2970 struct eth_tx_1st_bd *first_bd; 2971 int nbds = 0; 2972 2973 idx = txq->sw_tx_cons; 2974 mp = txq->sw_tx_ring[idx].mp; 2975 map = txq->sw_tx_ring[idx].map; 2976 2977 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){ 2978 2979 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL); 2980 2981 QL_DPRINT1(ha, "(mp == NULL) " 2982 " tx_idx = 0x%x" 2983 " ecore_prod_idx = 0x%x" 2984 " ecore_cons_idx = 0x%x" 2985 " hw_bd_cons = 0x%x" 2986 " txq_db_last = 0x%x" 2987 " elem_left = 0x%x\n", 2988 fp->rss_id, 2989 ecore_chain_get_prod_idx(&txq->tx_pbl), 2990 ecore_chain_get_cons_idx(&txq->tx_pbl), 2991 le16toh(*txq->hw_cons_ptr), 2992 txq->tx_db.raw, 2993 ecore_chain_get_elem_left(&txq->tx_pbl)); 2994 2995 fp->err_tx_free_pkt_null++; 2996 2997 //DEBUG 2998 qlnx_trigger_dump(ha); 2999 3000 return; 3001 } else { 3002 3003 QLNX_INC_OPACKETS((ha->ifp)); 3004 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len)); 3005 3006 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE); 3007 bus_dmamap_unload(ha->tx_tag, map); 3008 3009 fp->tx_pkts_freed++; 3010 fp->tx_pkts_completed++; 3011 3012 m_freem(mp); 3013 } 3014 3015 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl); 3016 nbds = first_bd->data.nbds; 3017 3018 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0); 3019 3020 for (i = 1; i < nbds; i++) { 3021 tx_data_bd = ecore_chain_consume(&txq->tx_pbl); 3022 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0); 3023 } 3024 txq->sw_tx_ring[idx].flags = 0; 3025 txq->sw_tx_ring[idx].mp = NULL; 3026 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0; 3027 3028 return; 3029 } 3030 3031 static void 3032 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3033 struct qlnx_tx_queue *txq) 3034 { 3035 u16 hw_bd_cons; 3036 u16 ecore_cons_idx; 3037 uint16_t diff; 3038 uint16_t idx, idx2; 3039 3040 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 3041 3042 while (hw_bd_cons != 3043 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 3044 3045 if (hw_bd_cons < ecore_cons_idx) { 3046 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 3047 } else { 3048 diff = hw_bd_cons - ecore_cons_idx; 3049 } 3050 if ((diff > TX_RING_SIZE) || 3051 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){ 3052 3053 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF); 3054 3055 QL_DPRINT1(ha, "(diff = 0x%x) " 3056 " tx_idx = 0x%x" 3057 " ecore_prod_idx = 0x%x" 3058 " ecore_cons_idx = 0x%x" 3059 " hw_bd_cons = 0x%x" 3060 " txq_db_last = 0x%x" 3061 " elem_left = 0x%x\n", 3062 diff, 3063 fp->rss_id, 3064 ecore_chain_get_prod_idx(&txq->tx_pbl), 3065 ecore_chain_get_cons_idx(&txq->tx_pbl), 3066 le16toh(*txq->hw_cons_ptr), 3067 txq->tx_db.raw, 3068 ecore_chain_get_elem_left(&txq->tx_pbl)); 3069 3070 fp->err_tx_cons_idx_conflict++; 3071 3072 //DEBUG 3073 qlnx_trigger_dump(ha); 3074 } 3075 3076 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 3077 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1); 3078 prefetch(txq->sw_tx_ring[idx].mp); 3079 prefetch(txq->sw_tx_ring[idx2].mp); 3080 3081 qlnx_free_tx_pkt(ha, fp, txq); 3082 3083 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 3084 } 3085 return; 3086 } 3087 3088 static int 3089 qlnx_transmit_locked(struct ifnet *ifp,struct qlnx_fastpath *fp, struct mbuf *mp) 3090 { 3091 int ret = 0; 3092 struct qlnx_tx_queue *txq; 3093 qlnx_host_t * ha; 3094 uint16_t elem_left; 3095 3096 txq = fp->txq[0]; 3097 ha = (qlnx_host_t *)fp->edev; 3098 3099 3100 if ((!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || (!ha->link_up)) { 3101 if(mp != NULL) 3102 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3103 return (ret); 3104 } 3105 3106 if(mp != NULL) 3107 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3108 3109 mp = drbr_peek(ifp, fp->tx_br); 3110 3111 while (mp != NULL) { 3112 3113 if (qlnx_send(ha, fp, &mp)) { 3114 3115 if (mp != NULL) { 3116 drbr_putback(ifp, fp->tx_br, mp); 3117 } else { 3118 fp->tx_pkts_processed++; 3119 drbr_advance(ifp, fp->tx_br); 3120 } 3121 goto qlnx_transmit_locked_exit; 3122 3123 } else { 3124 drbr_advance(ifp, fp->tx_br); 3125 fp->tx_pkts_transmitted++; 3126 fp->tx_pkts_processed++; 3127 } 3128 3129 mp = drbr_peek(ifp, fp->tx_br); 3130 } 3131 3132 qlnx_transmit_locked_exit: 3133 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) || 3134 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) 3135 < QLNX_TX_ELEM_MAX_THRESH)) 3136 (void)qlnx_tx_int(ha, fp, fp->txq[0]); 3137 3138 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret); 3139 return ret; 3140 } 3141 3142 3143 static int 3144 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp) 3145 { 3146 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc; 3147 struct qlnx_fastpath *fp; 3148 int rss_id = 0, ret = 0; 3149 3150 #ifdef QLNX_TRACEPERF_DATA 3151 uint64_t tx_pkts = 0, tx_compl = 0; 3152 #endif 3153 3154 QL_DPRINT2(ha, "enter\n"); 3155 3156 #if __FreeBSD_version >= 1100000 3157 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 3158 #else 3159 if (mp->m_flags & M_FLOWID) 3160 #endif 3161 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) % 3162 ha->num_rss; 3163 3164 fp = &ha->fp_array[rss_id]; 3165 3166 if (fp->tx_br == NULL) { 3167 ret = EINVAL; 3168 goto qlnx_transmit_exit; 3169 } 3170 3171 if (mtx_trylock(&fp->tx_mtx)) { 3172 3173 #ifdef QLNX_TRACEPERF_DATA 3174 tx_pkts = fp->tx_pkts_transmitted; 3175 tx_compl = fp->tx_pkts_completed; 3176 #endif 3177 3178 ret = qlnx_transmit_locked(ifp, fp, mp); 3179 3180 #ifdef QLNX_TRACEPERF_DATA 3181 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts); 3182 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl); 3183 #endif 3184 mtx_unlock(&fp->tx_mtx); 3185 } else { 3186 if (mp != NULL && (fp->fp_taskqueue != NULL)) { 3187 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3188 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 3189 } 3190 } 3191 3192 qlnx_transmit_exit: 3193 3194 QL_DPRINT2(ha, "exit ret = %d\n", ret); 3195 return ret; 3196 } 3197 3198 static void 3199 qlnx_qflush(struct ifnet *ifp) 3200 { 3201 int rss_id; 3202 struct qlnx_fastpath *fp; 3203 struct mbuf *mp; 3204 qlnx_host_t *ha; 3205 3206 ha = (qlnx_host_t *)ifp->if_softc; 3207 3208 QL_DPRINT2(ha, "enter\n"); 3209 3210 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 3211 3212 fp = &ha->fp_array[rss_id]; 3213 3214 if (fp == NULL) 3215 continue; 3216 3217 if (fp->tx_br) { 3218 mtx_lock(&fp->tx_mtx); 3219 3220 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 3221 fp->tx_pkts_freed++; 3222 m_freem(mp); 3223 } 3224 mtx_unlock(&fp->tx_mtx); 3225 } 3226 } 3227 QL_DPRINT2(ha, "exit\n"); 3228 3229 return; 3230 } 3231 3232 static void 3233 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value) 3234 { 3235 struct ecore_dev *cdev; 3236 uint32_t offset; 3237 3238 cdev = &ha->cdev; 3239 3240 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells); 3241 3242 bus_write_4(ha->pci_dbells, offset, value); 3243 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ); 3244 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ); 3245 3246 return; 3247 } 3248 3249 static uint32_t 3250 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp) 3251 { 3252 struct ether_vlan_header *eh = NULL; 3253 struct ip *ip = NULL; 3254 struct ip6_hdr *ip6 = NULL; 3255 struct tcphdr *th = NULL; 3256 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0; 3257 uint16_t etype = 0; 3258 device_t dev; 3259 uint8_t buf[sizeof(struct ip6_hdr)]; 3260 3261 dev = ha->pci_dev; 3262 3263 eh = mtod(mp, struct ether_vlan_header *); 3264 3265 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 3266 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3267 etype = ntohs(eh->evl_proto); 3268 } else { 3269 ehdrlen = ETHER_HDR_LEN; 3270 etype = ntohs(eh->evl_encap_proto); 3271 } 3272 3273 switch (etype) { 3274 3275 case ETHERTYPE_IP: 3276 ip = (struct ip *)(mp->m_data + ehdrlen); 3277 3278 ip_hlen = sizeof (struct ip); 3279 3280 if (mp->m_len < (ehdrlen + ip_hlen)) { 3281 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 3282 ip = (struct ip *)buf; 3283 } 3284 3285 th = (struct tcphdr *)(ip + 1); 3286 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3287 break; 3288 3289 case ETHERTYPE_IPV6: 3290 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 3291 3292 ip_hlen = sizeof(struct ip6_hdr); 3293 3294 if (mp->m_len < (ehdrlen + ip_hlen)) { 3295 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 3296 buf); 3297 ip6 = (struct ip6_hdr *)buf; 3298 } 3299 th = (struct tcphdr *)(ip6 + 1); 3300 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3301 break; 3302 3303 default: 3304 break; 3305 } 3306 3307 return (offset); 3308 } 3309 3310 static __inline int 3311 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs, 3312 uint32_t offset) 3313 { 3314 int i; 3315 uint32_t sum, nbds_in_hdr = 1; 3316 uint32_t window; 3317 bus_dma_segment_t *s_seg; 3318 3319 /* If the header spans mulitple segments, skip those segments */ 3320 3321 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM) 3322 return (0); 3323 3324 i = 0; 3325 3326 while ((i < nsegs) && (offset >= segs->ds_len)) { 3327 offset = offset - segs->ds_len; 3328 segs++; 3329 i++; 3330 nbds_in_hdr++; 3331 } 3332 3333 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr; 3334 3335 nsegs = nsegs - i; 3336 3337 while (nsegs >= window) { 3338 3339 sum = 0; 3340 s_seg = segs; 3341 3342 for (i = 0; i < window; i++){ 3343 sum += s_seg->ds_len; 3344 s_seg++; 3345 } 3346 3347 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) { 3348 fp->tx_lso_wnd_min_len++; 3349 return (-1); 3350 } 3351 3352 nsegs = nsegs - 1; 3353 segs++; 3354 } 3355 3356 return (0); 3357 } 3358 3359 static int 3360 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp) 3361 { 3362 bus_dma_segment_t *segs; 3363 bus_dmamap_t map = 0; 3364 uint32_t nsegs = 0; 3365 int ret = -1; 3366 struct mbuf *m_head = *m_headp; 3367 uint16_t idx = 0; 3368 uint16_t elem_left; 3369 3370 uint8_t nbd = 0; 3371 struct qlnx_tx_queue *txq; 3372 3373 struct eth_tx_1st_bd *first_bd; 3374 struct eth_tx_2nd_bd *second_bd; 3375 struct eth_tx_3rd_bd *third_bd; 3376 struct eth_tx_bd *tx_data_bd; 3377 3378 int seg_idx = 0; 3379 uint32_t nbds_in_hdr = 0; 3380 uint32_t offset = 0; 3381 3382 #ifdef QLNX_TRACE_PERF_DATA 3383 uint16_t bd_used; 3384 #endif 3385 3386 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id); 3387 3388 if (!ha->link_up) 3389 return (-1); 3390 3391 first_bd = NULL; 3392 second_bd = NULL; 3393 third_bd = NULL; 3394 tx_data_bd = NULL; 3395 3396 txq = fp->txq[0]; 3397 3398 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) < 3399 QLNX_TX_ELEM_MIN_THRESH) { 3400 3401 fp->tx_nsegs_gt_elem_left++; 3402 fp->err_tx_nsegs_gt_elem_left++; 3403 3404 return (ENOBUFS); 3405 } 3406 3407 idx = txq->sw_tx_prod; 3408 3409 map = txq->sw_tx_ring[idx].map; 3410 segs = txq->segs; 3411 3412 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 3413 BUS_DMA_NOWAIT); 3414 3415 if (ha->dbg_trace_tso_pkt_len) { 3416 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3417 if (!fp->tx_tso_min_pkt_len) { 3418 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3419 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3420 } else { 3421 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len) 3422 fp->tx_tso_min_pkt_len = 3423 m_head->m_pkthdr.len; 3424 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len) 3425 fp->tx_tso_max_pkt_len = 3426 m_head->m_pkthdr.len; 3427 } 3428 } 3429 } 3430 3431 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3432 offset = qlnx_tcp_offset(ha, m_head); 3433 3434 if ((ret == EFBIG) || 3435 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && ( 3436 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) || 3437 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) && 3438 qlnx_tso_check(fp, segs, nsegs, offset))))) { 3439 3440 struct mbuf *m; 3441 3442 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len); 3443 3444 fp->tx_defrag++; 3445 3446 m = m_defrag(m_head, M_NOWAIT); 3447 if (m == NULL) { 3448 fp->err_tx_defrag++; 3449 fp->tx_pkts_freed++; 3450 m_freem(m_head); 3451 *m_headp = NULL; 3452 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret); 3453 return (ENOBUFS); 3454 } 3455 3456 m_head = m; 3457 *m_headp = m_head; 3458 3459 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 3460 segs, &nsegs, BUS_DMA_NOWAIT))) { 3461 3462 fp->err_tx_defrag_dmamap_load++; 3463 3464 QL_DPRINT1(ha, 3465 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n", 3466 ret, m_head->m_pkthdr.len); 3467 3468 fp->tx_pkts_freed++; 3469 m_freem(m_head); 3470 *m_headp = NULL; 3471 3472 return (ret); 3473 } 3474 3475 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && 3476 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) { 3477 3478 fp->err_tx_non_tso_max_seg++; 3479 3480 QL_DPRINT1(ha, 3481 "(%d) nsegs too many for non-TSO [%d, %d]\n", 3482 ret, nsegs, m_head->m_pkthdr.len); 3483 3484 fp->tx_pkts_freed++; 3485 m_freem(m_head); 3486 *m_headp = NULL; 3487 3488 return (ret); 3489 } 3490 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3491 offset = qlnx_tcp_offset(ha, m_head); 3492 3493 } else if (ret) { 3494 3495 fp->err_tx_dmamap_load++; 3496 3497 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n", 3498 ret, m_head->m_pkthdr.len); 3499 fp->tx_pkts_freed++; 3500 m_freem(m_head); 3501 *m_headp = NULL; 3502 return (ret); 3503 } 3504 3505 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet")); 3506 3507 if (ha->dbg_trace_tso_pkt_len) { 3508 if (nsegs < QLNX_FP_MAX_SEGS) 3509 fp->tx_pkts[(nsegs - 1)]++; 3510 else 3511 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; 3512 } 3513 3514 #ifdef QLNX_TRACE_PERF_DATA 3515 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3516 if(m_head->m_pkthdr.len <= 2048) 3517 fp->tx_pkts_hist[0]++; 3518 else if((m_head->m_pkthdr.len > 2048) && 3519 (m_head->m_pkthdr.len <= 4096)) 3520 fp->tx_pkts_hist[1]++; 3521 else if((m_head->m_pkthdr.len > 4096) && 3522 (m_head->m_pkthdr.len <= 8192)) 3523 fp->tx_pkts_hist[2]++; 3524 else if((m_head->m_pkthdr.len > 8192) && 3525 (m_head->m_pkthdr.len <= 12288 )) 3526 fp->tx_pkts_hist[3]++; 3527 else if((m_head->m_pkthdr.len > 11288) && 3528 (m_head->m_pkthdr.len <= 16394)) 3529 fp->tx_pkts_hist[4]++; 3530 else if((m_head->m_pkthdr.len > 16384) && 3531 (m_head->m_pkthdr.len <= 20480)) 3532 fp->tx_pkts_hist[5]++; 3533 else if((m_head->m_pkthdr.len > 20480) && 3534 (m_head->m_pkthdr.len <= 24576)) 3535 fp->tx_pkts_hist[6]++; 3536 else if((m_head->m_pkthdr.len > 24576) && 3537 (m_head->m_pkthdr.len <= 28672)) 3538 fp->tx_pkts_hist[7]++; 3539 else if((m_head->m_pkthdr.len > 28762) && 3540 (m_head->m_pkthdr.len <= 32768)) 3541 fp->tx_pkts_hist[8]++; 3542 else if((m_head->m_pkthdr.len > 32768) && 3543 (m_head->m_pkthdr.len <= 36864)) 3544 fp->tx_pkts_hist[9]++; 3545 else if((m_head->m_pkthdr.len > 36864) && 3546 (m_head->m_pkthdr.len <= 40960)) 3547 fp->tx_pkts_hist[10]++; 3548 else if((m_head->m_pkthdr.len > 40960) && 3549 (m_head->m_pkthdr.len <= 45056)) 3550 fp->tx_pkts_hist[11]++; 3551 else if((m_head->m_pkthdr.len > 45056) && 3552 (m_head->m_pkthdr.len <= 49152)) 3553 fp->tx_pkts_hist[12]++; 3554 else if((m_head->m_pkthdr.len > 49512) && 3555 m_head->m_pkthdr.len <= 53248)) 3556 fp->tx_pkts_hist[13]++; 3557 else if((m_head->m_pkthdr.len > 53248) && 3558 (m_head->m_pkthdr.len <= 57344)) 3559 fp->tx_pkts_hist[14]++; 3560 else if((m_head->m_pkthdr.len > 53248) && 3561 (m_head->m_pkthdr.len <= 57344)) 3562 fp->tx_pkts_hist[15]++; 3563 else if((m_head->m_pkthdr.len > 57344) && 3564 (m_head->m_pkthdr.len <= 61440)) 3565 fp->tx_pkts_hist[16]++; 3566 else 3567 fp->tx_pkts_hist[17]++; 3568 } 3569 3570 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3571 3572 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl); 3573 bd_used = TX_RING_SIZE - elem_left; 3574 3575 if(bd_used <= 100) 3576 fp->tx_pkts_q[0]++; 3577 else if((bd_used > 100) && (bd_used <= 500)) 3578 fp->tx_pkts_q[1]++; 3579 else if((bd_used > 500) && (bd_used <= 1000)) 3580 fp->tx_pkts_q[2]++; 3581 else if((bd_used > 1000) && (bd_used <= 2000)) 3582 fp->tx_pkts_q[3]++; 3583 else if((bd_used > 3000) && (bd_used <= 4000)) 3584 fp->tx_pkts_q[4]++; 3585 else if((bd_used > 4000) && (bd_used <= 5000)) 3586 fp->tx_pkts_q[5]++; 3587 else if((bd_used > 6000) && (bd_used <= 7000)) 3588 fp->tx_pkts_q[6]++; 3589 else if((bd_used > 7000) && (bd_used <= 8000)) 3590 fp->tx_pkts_q[7]++; 3591 else if((bd_used > 8000) && (bd_used <= 9000)) 3592 fp->tx_pkts_q[8]++; 3593 else if((bd_used > 9000) && (bd_used <= 10000)) 3594 fp->tx_pkts_q[9]++; 3595 else if((bd_used > 10000) && (bd_used <= 11000)) 3596 fp->tx_pkts_q[10]++; 3597 else if((bd_used > 11000) && (bd_used <= 12000)) 3598 fp->tx_pkts_q[11]++; 3599 else if((bd_used > 12000) && (bd_used <= 13000)) 3600 fp->tx_pkts_q[12]++; 3601 else if((bd_used > 13000) && (bd_used <= 14000)) 3602 fp->tx_pkts_q[13]++; 3603 else if((bd_used > 14000) && (bd_used <= 15000)) 3604 fp->tx_pkts_q[14]++; 3605 else if((bd_used > 15000) && (bd_used <= 16000)) 3606 fp->tx_pkts_q[15]++; 3607 else 3608 fp->tx_pkts_q[16]++; 3609 } 3610 3611 #endif /* end of QLNX_TRACE_PERF_DATA */ 3612 3613 if ((nsegs + QLNX_TX_ELEM_RESERVE) > 3614 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) { 3615 3616 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs" 3617 " in chain[%d] trying to free packets\n", 3618 nsegs, elem_left, fp->rss_id); 3619 3620 fp->tx_nsegs_gt_elem_left++; 3621 3622 (void)qlnx_tx_int(ha, fp, txq); 3623 3624 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left = 3625 ecore_chain_get_elem_left(&txq->tx_pbl))) { 3626 3627 QL_DPRINT1(ha, 3628 "(%d, 0x%x) insuffient BDs in chain[%d]\n", 3629 nsegs, elem_left, fp->rss_id); 3630 3631 fp->err_tx_nsegs_gt_elem_left++; 3632 fp->tx_ring_full = 1; 3633 if (ha->storm_stats_enable) 3634 ha->storm_stats_gather = 1; 3635 return (ENOBUFS); 3636 } 3637 } 3638 3639 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 3640 3641 txq->sw_tx_ring[idx].mp = m_head; 3642 3643 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); 3644 3645 memset(first_bd, 0, sizeof(*first_bd)); 3646 3647 first_bd->data.bd_flags.bitfields = 3648 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 3649 3650 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len); 3651 3652 nbd++; 3653 3654 if (m_head->m_pkthdr.csum_flags & CSUM_IP) { 3655 first_bd->data.bd_flags.bitfields |= 3656 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3657 } 3658 3659 if (m_head->m_pkthdr.csum_flags & 3660 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) { 3661 first_bd->data.bd_flags.bitfields |= 3662 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT); 3663 } 3664 3665 if (m_head->m_flags & M_VLANTAG) { 3666 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag; 3667 first_bd->data.bd_flags.bitfields |= 3668 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT); 3669 } 3670 3671 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3672 3673 first_bd->data.bd_flags.bitfields |= 3674 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); 3675 first_bd->data.bd_flags.bitfields |= 3676 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3677 3678 nbds_in_hdr = 1; 3679 3680 if (offset == segs->ds_len) { 3681 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3682 segs++; 3683 seg_idx++; 3684 3685 second_bd = (struct eth_tx_2nd_bd *) 3686 ecore_chain_produce(&txq->tx_pbl); 3687 memset(second_bd, 0, sizeof(*second_bd)); 3688 nbd++; 3689 3690 if (seg_idx < nsegs) { 3691 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3692 (segs->ds_addr), (segs->ds_len)); 3693 segs++; 3694 seg_idx++; 3695 } 3696 3697 third_bd = (struct eth_tx_3rd_bd *) 3698 ecore_chain_produce(&txq->tx_pbl); 3699 memset(third_bd, 0, sizeof(*third_bd)); 3700 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3701 third_bd->data.bitfields |= 3702 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3703 nbd++; 3704 3705 if (seg_idx < nsegs) { 3706 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3707 (segs->ds_addr), (segs->ds_len)); 3708 segs++; 3709 seg_idx++; 3710 } 3711 3712 for (; seg_idx < nsegs; seg_idx++) { 3713 tx_data_bd = (struct eth_tx_bd *) 3714 ecore_chain_produce(&txq->tx_pbl); 3715 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3716 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3717 segs->ds_addr,\ 3718 segs->ds_len); 3719 segs++; 3720 nbd++; 3721 } 3722 3723 } else if (offset < segs->ds_len) { 3724 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3725 3726 second_bd = (struct eth_tx_2nd_bd *) 3727 ecore_chain_produce(&txq->tx_pbl); 3728 memset(second_bd, 0, sizeof(*second_bd)); 3729 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3730 (segs->ds_addr + offset),\ 3731 (segs->ds_len - offset)); 3732 nbd++; 3733 segs++; 3734 3735 third_bd = (struct eth_tx_3rd_bd *) 3736 ecore_chain_produce(&txq->tx_pbl); 3737 memset(third_bd, 0, sizeof(*third_bd)); 3738 3739 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3740 segs->ds_addr,\ 3741 segs->ds_len); 3742 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3743 third_bd->data.bitfields |= 3744 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3745 segs++; 3746 nbd++; 3747 3748 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) { 3749 tx_data_bd = (struct eth_tx_bd *) 3750 ecore_chain_produce(&txq->tx_pbl); 3751 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3752 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3753 segs->ds_addr,\ 3754 segs->ds_len); 3755 segs++; 3756 nbd++; 3757 } 3758 3759 } else { 3760 offset = offset - segs->ds_len; 3761 segs++; 3762 3763 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3764 3765 if (offset) 3766 nbds_in_hdr++; 3767 3768 tx_data_bd = (struct eth_tx_bd *) 3769 ecore_chain_produce(&txq->tx_pbl); 3770 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3771 3772 if (second_bd == NULL) { 3773 second_bd = (struct eth_tx_2nd_bd *) 3774 tx_data_bd; 3775 } else if (third_bd == NULL) { 3776 third_bd = (struct eth_tx_3rd_bd *) 3777 tx_data_bd; 3778 } 3779 3780 if (offset && (offset < segs->ds_len)) { 3781 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3782 segs->ds_addr, offset); 3783 3784 tx_data_bd = (struct eth_tx_bd *) 3785 ecore_chain_produce(&txq->tx_pbl); 3786 3787 memset(tx_data_bd, 0, 3788 sizeof(*tx_data_bd)); 3789 3790 if (second_bd == NULL) { 3791 second_bd = 3792 (struct eth_tx_2nd_bd *)tx_data_bd; 3793 } else if (third_bd == NULL) { 3794 third_bd = 3795 (struct eth_tx_3rd_bd *)tx_data_bd; 3796 } 3797 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3798 (segs->ds_addr + offset), \ 3799 (segs->ds_len - offset)); 3800 nbd++; 3801 offset = 0; 3802 } else { 3803 if (offset) 3804 offset = offset - segs->ds_len; 3805 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3806 segs->ds_addr, segs->ds_len); 3807 } 3808 segs++; 3809 nbd++; 3810 } 3811 3812 if (third_bd == NULL) { 3813 third_bd = (struct eth_tx_3rd_bd *) 3814 ecore_chain_produce(&txq->tx_pbl); 3815 memset(third_bd, 0, sizeof(*third_bd)); 3816 } 3817 3818 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3819 third_bd->data.bitfields |= 3820 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3821 } 3822 fp->tx_tso_pkts++; 3823 } else { 3824 segs++; 3825 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3826 tx_data_bd = (struct eth_tx_bd *) 3827 ecore_chain_produce(&txq->tx_pbl); 3828 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3829 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\ 3830 segs->ds_len); 3831 segs++; 3832 nbd++; 3833 } 3834 first_bd->data.bitfields = 3835 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) 3836 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 3837 first_bd->data.bitfields = 3838 htole16(first_bd->data.bitfields); 3839 fp->tx_non_tso_pkts++; 3840 } 3841 3842 3843 first_bd->data.nbds = nbd; 3844 3845 if (ha->dbg_trace_tso_pkt_len) { 3846 if (fp->tx_tso_max_nsegs < nsegs) 3847 fp->tx_tso_max_nsegs = nsegs; 3848 3849 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs)) 3850 fp->tx_tso_min_nsegs = nsegs; 3851 } 3852 3853 txq->sw_tx_ring[idx].nsegs = nsegs; 3854 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1); 3855 3856 txq->tx_db.data.bd_prod = 3857 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl)); 3858 3859 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw); 3860 3861 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id); 3862 return (0); 3863 } 3864 3865 static void 3866 qlnx_stop(qlnx_host_t *ha) 3867 { 3868 struct ifnet *ifp = ha->ifp; 3869 device_t dev; 3870 int i; 3871 3872 dev = ha->pci_dev; 3873 3874 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 3875 3876 /* 3877 * We simply lock and unlock each fp->tx_mtx to 3878 * propagate the if_drv_flags 3879 * state to each tx thread 3880 */ 3881 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state); 3882 3883 if (ha->state == QLNX_STATE_OPEN) { 3884 for (i = 0; i < ha->num_rss; i++) { 3885 struct qlnx_fastpath *fp = &ha->fp_array[i]; 3886 3887 mtx_lock(&fp->tx_mtx); 3888 mtx_unlock(&fp->tx_mtx); 3889 3890 if (fp->fp_taskqueue != NULL) 3891 taskqueue_enqueue(fp->fp_taskqueue, 3892 &fp->fp_task); 3893 } 3894 } 3895 #ifdef QLNX_ENABLE_IWARP 3896 if (qlnx_vf_device(ha) != 0) { 3897 qlnx_rdma_dev_close(ha); 3898 } 3899 #endif /* #ifdef QLNX_ENABLE_IWARP */ 3900 3901 qlnx_unload(ha); 3902 3903 return; 3904 } 3905 3906 static int 3907 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha) 3908 { 3909 return(TX_RING_SIZE - 1); 3910 } 3911 3912 uint8_t * 3913 qlnx_get_mac_addr(qlnx_host_t *ha) 3914 { 3915 struct ecore_hwfn *p_hwfn; 3916 unsigned char mac[ETHER_ADDR_LEN]; 3917 uint8_t p_is_forced; 3918 3919 p_hwfn = &ha->cdev.hwfns[0]; 3920 3921 if (qlnx_vf_device(ha) != 0) 3922 return (p_hwfn->hw_info.hw_mac_addr); 3923 3924 ecore_vf_read_bulletin(p_hwfn, &p_is_forced); 3925 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) == 3926 true) { 3927 device_printf(ha->pci_dev, "%s: p_is_forced = %d" 3928 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, 3929 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3930 memcpy(ha->primary_mac, mac, ETH_ALEN); 3931 } 3932 3933 return (ha->primary_mac); 3934 } 3935 3936 static uint32_t 3937 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link) 3938 { 3939 uint32_t ifm_type = 0; 3940 3941 switch (if_link->media_type) { 3942 3943 case MEDIA_MODULE_FIBER: 3944 case MEDIA_UNSPECIFIED: 3945 if (if_link->speed == (100 * 1000)) 3946 ifm_type = QLNX_IFM_100G_SR4; 3947 else if (if_link->speed == (40 * 1000)) 3948 ifm_type = IFM_40G_SR4; 3949 else if (if_link->speed == (25 * 1000)) 3950 ifm_type = QLNX_IFM_25G_SR; 3951 else if (if_link->speed == (10 * 1000)) 3952 ifm_type = (IFM_10G_LR | IFM_10G_SR); 3953 else if (if_link->speed == (1 * 1000)) 3954 ifm_type = (IFM_1000_SX | IFM_1000_LX); 3955 3956 break; 3957 3958 case MEDIA_DA_TWINAX: 3959 if (if_link->speed == (100 * 1000)) 3960 ifm_type = QLNX_IFM_100G_CR4; 3961 else if (if_link->speed == (40 * 1000)) 3962 ifm_type = IFM_40G_CR4; 3963 else if (if_link->speed == (25 * 1000)) 3964 ifm_type = QLNX_IFM_25G_CR; 3965 else if (if_link->speed == (10 * 1000)) 3966 ifm_type = IFM_10G_TWINAX; 3967 3968 break; 3969 3970 default : 3971 ifm_type = IFM_UNKNOWN; 3972 break; 3973 } 3974 return (ifm_type); 3975 } 3976 3977 3978 3979 /***************************************************************************** 3980 * Interrupt Service Functions 3981 *****************************************************************************/ 3982 3983 static int 3984 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3985 struct mbuf *mp_head, uint16_t len) 3986 { 3987 struct mbuf *mp, *mpf, *mpl; 3988 struct sw_rx_data *sw_rx_data; 3989 struct qlnx_rx_queue *rxq; 3990 uint16_t len_in_buffer; 3991 3992 rxq = fp->rxq; 3993 mpf = mpl = mp = NULL; 3994 3995 while (len) { 3996 3997 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3998 3999 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4000 mp = sw_rx_data->data; 4001 4002 if (mp == NULL) { 4003 QL_DPRINT1(ha, "mp = NULL\n"); 4004 fp->err_rx_mp_null++; 4005 rxq->sw_rx_cons = 4006 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4007 4008 if (mpf != NULL) 4009 m_freem(mpf); 4010 4011 return (-1); 4012 } 4013 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4014 BUS_DMASYNC_POSTREAD); 4015 4016 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4017 4018 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 4019 " incoming packet and reusing its buffer\n"); 4020 4021 qlnx_reuse_rx_data(rxq); 4022 fp->err_rx_alloc_errors++; 4023 4024 if (mpf != NULL) 4025 m_freem(mpf); 4026 4027 return (-1); 4028 } 4029 ecore_chain_consume(&rxq->rx_bd_ring); 4030 4031 if (len > rxq->rx_buf_size) 4032 len_in_buffer = rxq->rx_buf_size; 4033 else 4034 len_in_buffer = len; 4035 4036 len = len - len_in_buffer; 4037 4038 mp->m_flags &= ~M_PKTHDR; 4039 mp->m_next = NULL; 4040 mp->m_len = len_in_buffer; 4041 4042 if (mpf == NULL) 4043 mpf = mpl = mp; 4044 else { 4045 mpl->m_next = mp; 4046 mpl = mp; 4047 } 4048 } 4049 4050 if (mpf != NULL) 4051 mp_head->m_next = mpf; 4052 4053 return (0); 4054 } 4055 4056 static void 4057 qlnx_tpa_start(qlnx_host_t *ha, 4058 struct qlnx_fastpath *fp, 4059 struct qlnx_rx_queue *rxq, 4060 struct eth_fast_path_rx_tpa_start_cqe *cqe) 4061 { 4062 uint32_t agg_index; 4063 struct ifnet *ifp = ha->ifp; 4064 struct mbuf *mp; 4065 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4066 struct sw_rx_data *sw_rx_data; 4067 dma_addr_t addr; 4068 bus_dmamap_t map; 4069 struct eth_rx_bd *rx_bd; 4070 int i; 4071 device_t dev; 4072 #if __FreeBSD_version >= 1100000 4073 uint8_t hash_type; 4074 #endif /* #if __FreeBSD_version >= 1100000 */ 4075 4076 dev = ha->pci_dev; 4077 agg_index = cqe->tpa_agg_index; 4078 4079 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \ 4080 \t type = 0x%x\n \ 4081 \t bitfields = 0x%x\n \ 4082 \t seg_len = 0x%x\n \ 4083 \t pars_flags = 0x%x\n \ 4084 \t vlan_tag = 0x%x\n \ 4085 \t rss_hash = 0x%x\n \ 4086 \t len_on_first_bd = 0x%x\n \ 4087 \t placement_offset = 0x%x\n \ 4088 \t tpa_agg_index = 0x%x\n \ 4089 \t header_len = 0x%x\n \ 4090 \t ext_bd_len_list[0] = 0x%x\n \ 4091 \t ext_bd_len_list[1] = 0x%x\n \ 4092 \t ext_bd_len_list[2] = 0x%x\n \ 4093 \t ext_bd_len_list[3] = 0x%x\n \ 4094 \t ext_bd_len_list[4] = 0x%x\n", 4095 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len, 4096 cqe->pars_flags.flags, cqe->vlan_tag, 4097 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset, 4098 cqe->tpa_agg_index, cqe->header_len, 4099 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1], 4100 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3], 4101 cqe->ext_bd_len_list[4]); 4102 4103 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4104 fp->err_rx_tpa_invalid_agg_num++; 4105 return; 4106 } 4107 4108 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4109 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); 4110 mp = sw_rx_data->data; 4111 4112 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp); 4113 4114 if (mp == NULL) { 4115 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id); 4116 fp->err_rx_mp_null++; 4117 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4118 4119 return; 4120 } 4121 4122 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) { 4123 4124 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error," 4125 " flags = %x, dropping incoming packet\n", fp->rss_id, 4126 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags)); 4127 4128 fp->err_rx_hw_errors++; 4129 4130 qlnx_reuse_rx_data(rxq); 4131 4132 QLNX_INC_IERRORS(ifp); 4133 4134 return; 4135 } 4136 4137 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4138 4139 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4140 " dropping incoming packet and reusing its buffer\n", 4141 fp->rss_id); 4142 4143 fp->err_rx_alloc_errors++; 4144 QLNX_INC_IQDROPS(ifp); 4145 4146 /* 4147 * Load the tpa mbuf into the rx ring and save the 4148 * posted mbuf 4149 */ 4150 4151 map = sw_rx_data->map; 4152 addr = sw_rx_data->dma_addr; 4153 4154 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 4155 4156 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data; 4157 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr; 4158 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map; 4159 4160 rxq->tpa_info[agg_index].rx_buf.data = mp; 4161 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr; 4162 rxq->tpa_info[agg_index].rx_buf.map = map; 4163 4164 rx_bd = (struct eth_rx_bd *) 4165 ecore_chain_produce(&rxq->rx_bd_ring); 4166 4167 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr)); 4168 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr)); 4169 4170 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4171 BUS_DMASYNC_PREREAD); 4172 4173 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 4174 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4175 4176 ecore_chain_consume(&rxq->rx_bd_ring); 4177 4178 /* Now reuse any buffers posted in ext_bd_len_list */ 4179 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4180 4181 if (cqe->ext_bd_len_list[i] == 0) 4182 break; 4183 4184 qlnx_reuse_rx_data(rxq); 4185 } 4186 4187 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4188 return; 4189 } 4190 4191 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4192 4193 QL_DPRINT7(ha, "[%d]: invalid aggregation state," 4194 " dropping incoming packet and reusing its buffer\n", 4195 fp->rss_id); 4196 4197 QLNX_INC_IQDROPS(ifp); 4198 4199 /* if we already have mbuf head in aggregation free it */ 4200 if (rxq->tpa_info[agg_index].mpf) { 4201 m_freem(rxq->tpa_info[agg_index].mpf); 4202 rxq->tpa_info[agg_index].mpl = NULL; 4203 } 4204 rxq->tpa_info[agg_index].mpf = mp; 4205 rxq->tpa_info[agg_index].mpl = NULL; 4206 4207 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4208 ecore_chain_consume(&rxq->rx_bd_ring); 4209 4210 /* Now reuse any buffers posted in ext_bd_len_list */ 4211 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4212 4213 if (cqe->ext_bd_len_list[i] == 0) 4214 break; 4215 4216 qlnx_reuse_rx_data(rxq); 4217 } 4218 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4219 4220 return; 4221 } 4222 4223 /* 4224 * first process the ext_bd_len_list 4225 * if this fails then we simply drop the packet 4226 */ 4227 ecore_chain_consume(&rxq->rx_bd_ring); 4228 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4229 4230 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4231 4232 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id); 4233 4234 if (cqe->ext_bd_len_list[i] == 0) 4235 break; 4236 4237 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4238 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4239 BUS_DMASYNC_POSTREAD); 4240 4241 mpc = sw_rx_data->data; 4242 4243 if (mpc == NULL) { 4244 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4245 fp->err_rx_mp_null++; 4246 if (mpf != NULL) 4247 m_freem(mpf); 4248 mpf = mpl = NULL; 4249 rxq->tpa_info[agg_index].agg_state = 4250 QLNX_AGG_STATE_ERROR; 4251 ecore_chain_consume(&rxq->rx_bd_ring); 4252 rxq->sw_rx_cons = 4253 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4254 continue; 4255 } 4256 4257 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4258 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4259 " dropping incoming packet and reusing its" 4260 " buffer\n", fp->rss_id); 4261 4262 qlnx_reuse_rx_data(rxq); 4263 4264 if (mpf != NULL) 4265 m_freem(mpf); 4266 mpf = mpl = NULL; 4267 4268 rxq->tpa_info[agg_index].agg_state = 4269 QLNX_AGG_STATE_ERROR; 4270 4271 ecore_chain_consume(&rxq->rx_bd_ring); 4272 rxq->sw_rx_cons = 4273 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4274 4275 continue; 4276 } 4277 4278 mpc->m_flags &= ~M_PKTHDR; 4279 mpc->m_next = NULL; 4280 mpc->m_len = cqe->ext_bd_len_list[i]; 4281 4282 4283 if (mpf == NULL) { 4284 mpf = mpl = mpc; 4285 } else { 4286 mpl->m_len = ha->rx_buf_size; 4287 mpl->m_next = mpc; 4288 mpl = mpc; 4289 } 4290 4291 ecore_chain_consume(&rxq->rx_bd_ring); 4292 rxq->sw_rx_cons = 4293 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4294 } 4295 4296 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4297 4298 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping" 4299 " incoming packet and reusing its buffer\n", 4300 fp->rss_id); 4301 4302 QLNX_INC_IQDROPS(ifp); 4303 4304 rxq->tpa_info[agg_index].mpf = mp; 4305 rxq->tpa_info[agg_index].mpl = NULL; 4306 4307 return; 4308 } 4309 4310 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset; 4311 4312 if (mpf != NULL) { 4313 mp->m_len = ha->rx_buf_size; 4314 mp->m_next = mpf; 4315 rxq->tpa_info[agg_index].mpf = mp; 4316 rxq->tpa_info[agg_index].mpl = mpl; 4317 } else { 4318 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset; 4319 rxq->tpa_info[agg_index].mpf = mp; 4320 rxq->tpa_info[agg_index].mpl = mp; 4321 mp->m_next = NULL; 4322 } 4323 4324 mp->m_flags |= M_PKTHDR; 4325 4326 /* assign packet to this interface interface */ 4327 mp->m_pkthdr.rcvif = ifp; 4328 4329 /* assume no hardware checksum has complated */ 4330 mp->m_pkthdr.csum_flags = 0; 4331 4332 //mp->m_pkthdr.flowid = fp->rss_id; 4333 mp->m_pkthdr.flowid = cqe->rss_hash; 4334 4335 #if __FreeBSD_version >= 1100000 4336 4337 hash_type = cqe->bitfields & 4338 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4339 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4340 4341 switch (hash_type) { 4342 4343 case RSS_HASH_TYPE_IPV4: 4344 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4345 break; 4346 4347 case RSS_HASH_TYPE_TCP_IPV4: 4348 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4349 break; 4350 4351 case RSS_HASH_TYPE_IPV6: 4352 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4353 break; 4354 4355 case RSS_HASH_TYPE_TCP_IPV6: 4356 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4357 break; 4358 4359 default: 4360 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4361 break; 4362 } 4363 4364 #else 4365 mp->m_flags |= M_FLOWID; 4366 #endif 4367 4368 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | 4369 CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4370 4371 mp->m_pkthdr.csum_data = 0xFFFF; 4372 4373 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) { 4374 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag); 4375 mp->m_flags |= M_VLANTAG; 4376 } 4377 4378 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START; 4379 4380 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n", 4381 fp->rss_id, rxq->tpa_info[agg_index].agg_state, 4382 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl); 4383 4384 return; 4385 } 4386 4387 static void 4388 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4389 struct qlnx_rx_queue *rxq, 4390 struct eth_fast_path_rx_tpa_cont_cqe *cqe) 4391 { 4392 struct sw_rx_data *sw_rx_data; 4393 int i; 4394 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4395 struct mbuf *mp; 4396 uint32_t agg_index; 4397 device_t dev; 4398 4399 dev = ha->pci_dev; 4400 4401 QL_DPRINT7(ha, "[%d]: enter\n \ 4402 \t type = 0x%x\n \ 4403 \t tpa_agg_index = 0x%x\n \ 4404 \t len_list[0] = 0x%x\n \ 4405 \t len_list[1] = 0x%x\n \ 4406 \t len_list[2] = 0x%x\n \ 4407 \t len_list[3] = 0x%x\n \ 4408 \t len_list[4] = 0x%x\n \ 4409 \t len_list[5] = 0x%x\n", 4410 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4411 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4412 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]); 4413 4414 agg_index = cqe->tpa_agg_index; 4415 4416 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4417 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4418 fp->err_rx_tpa_invalid_agg_num++; 4419 return; 4420 } 4421 4422 4423 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) { 4424 4425 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4426 4427 if (cqe->len_list[i] == 0) 4428 break; 4429 4430 if (rxq->tpa_info[agg_index].agg_state != 4431 QLNX_AGG_STATE_START) { 4432 qlnx_reuse_rx_data(rxq); 4433 continue; 4434 } 4435 4436 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4437 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4438 BUS_DMASYNC_POSTREAD); 4439 4440 mpc = sw_rx_data->data; 4441 4442 if (mpc == NULL) { 4443 4444 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4445 4446 fp->err_rx_mp_null++; 4447 if (mpf != NULL) 4448 m_freem(mpf); 4449 mpf = mpl = NULL; 4450 rxq->tpa_info[agg_index].agg_state = 4451 QLNX_AGG_STATE_ERROR; 4452 ecore_chain_consume(&rxq->rx_bd_ring); 4453 rxq->sw_rx_cons = 4454 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4455 continue; 4456 } 4457 4458 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4459 4460 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4461 " dropping incoming packet and reusing its" 4462 " buffer\n", fp->rss_id); 4463 4464 qlnx_reuse_rx_data(rxq); 4465 4466 if (mpf != NULL) 4467 m_freem(mpf); 4468 mpf = mpl = NULL; 4469 4470 rxq->tpa_info[agg_index].agg_state = 4471 QLNX_AGG_STATE_ERROR; 4472 4473 ecore_chain_consume(&rxq->rx_bd_ring); 4474 rxq->sw_rx_cons = 4475 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4476 4477 continue; 4478 } 4479 4480 mpc->m_flags &= ~M_PKTHDR; 4481 mpc->m_next = NULL; 4482 mpc->m_len = cqe->len_list[i]; 4483 4484 4485 if (mpf == NULL) { 4486 mpf = mpl = mpc; 4487 } else { 4488 mpl->m_len = ha->rx_buf_size; 4489 mpl->m_next = mpc; 4490 mpl = mpc; 4491 } 4492 4493 ecore_chain_consume(&rxq->rx_bd_ring); 4494 rxq->sw_rx_cons = 4495 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4496 } 4497 4498 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n", 4499 fp->rss_id, mpf, mpl); 4500 4501 if (mpf != NULL) { 4502 mp = rxq->tpa_info[agg_index].mpl; 4503 mp->m_len = ha->rx_buf_size; 4504 mp->m_next = mpf; 4505 rxq->tpa_info[agg_index].mpl = mpl; 4506 } 4507 4508 return; 4509 } 4510 4511 static int 4512 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4513 struct qlnx_rx_queue *rxq, 4514 struct eth_fast_path_rx_tpa_end_cqe *cqe) 4515 { 4516 struct sw_rx_data *sw_rx_data; 4517 int i; 4518 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4519 struct mbuf *mp; 4520 uint32_t agg_index; 4521 uint32_t len = 0; 4522 struct ifnet *ifp = ha->ifp; 4523 device_t dev; 4524 4525 dev = ha->pci_dev; 4526 4527 QL_DPRINT7(ha, "[%d]: enter\n \ 4528 \t type = 0x%x\n \ 4529 \t tpa_agg_index = 0x%x\n \ 4530 \t total_packet_len = 0x%x\n \ 4531 \t num_of_bds = 0x%x\n \ 4532 \t end_reason = 0x%x\n \ 4533 \t num_of_coalesced_segs = 0x%x\n \ 4534 \t ts_delta = 0x%x\n \ 4535 \t len_list[0] = 0x%x\n \ 4536 \t len_list[1] = 0x%x\n \ 4537 \t len_list[2] = 0x%x\n \ 4538 \t len_list[3] = 0x%x\n", 4539 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4540 cqe->total_packet_len, cqe->num_of_bds, 4541 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta, 4542 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4543 cqe->len_list[3]); 4544 4545 agg_index = cqe->tpa_agg_index; 4546 4547 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4548 4549 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4550 4551 fp->err_rx_tpa_invalid_agg_num++; 4552 return (0); 4553 } 4554 4555 4556 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) { 4557 4558 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4559 4560 if (cqe->len_list[i] == 0) 4561 break; 4562 4563 if (rxq->tpa_info[agg_index].agg_state != 4564 QLNX_AGG_STATE_START) { 4565 4566 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id); 4567 4568 qlnx_reuse_rx_data(rxq); 4569 continue; 4570 } 4571 4572 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4573 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4574 BUS_DMASYNC_POSTREAD); 4575 4576 mpc = sw_rx_data->data; 4577 4578 if (mpc == NULL) { 4579 4580 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4581 4582 fp->err_rx_mp_null++; 4583 if (mpf != NULL) 4584 m_freem(mpf); 4585 mpf = mpl = NULL; 4586 rxq->tpa_info[agg_index].agg_state = 4587 QLNX_AGG_STATE_ERROR; 4588 ecore_chain_consume(&rxq->rx_bd_ring); 4589 rxq->sw_rx_cons = 4590 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4591 continue; 4592 } 4593 4594 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4595 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4596 " dropping incoming packet and reusing its" 4597 " buffer\n", fp->rss_id); 4598 4599 qlnx_reuse_rx_data(rxq); 4600 4601 if (mpf != NULL) 4602 m_freem(mpf); 4603 mpf = mpl = NULL; 4604 4605 rxq->tpa_info[agg_index].agg_state = 4606 QLNX_AGG_STATE_ERROR; 4607 4608 ecore_chain_consume(&rxq->rx_bd_ring); 4609 rxq->sw_rx_cons = 4610 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4611 4612 continue; 4613 } 4614 4615 mpc->m_flags &= ~M_PKTHDR; 4616 mpc->m_next = NULL; 4617 mpc->m_len = cqe->len_list[i]; 4618 4619 4620 if (mpf == NULL) { 4621 mpf = mpl = mpc; 4622 } else { 4623 mpl->m_len = ha->rx_buf_size; 4624 mpl->m_next = mpc; 4625 mpl = mpc; 4626 } 4627 4628 ecore_chain_consume(&rxq->rx_bd_ring); 4629 rxq->sw_rx_cons = 4630 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4631 } 4632 4633 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id); 4634 4635 if (mpf != NULL) { 4636 4637 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id); 4638 4639 mp = rxq->tpa_info[agg_index].mpl; 4640 mp->m_len = ha->rx_buf_size; 4641 mp->m_next = mpf; 4642 } 4643 4644 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { 4645 4646 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id); 4647 4648 if (rxq->tpa_info[agg_index].mpf != NULL) 4649 m_freem(rxq->tpa_info[agg_index].mpf); 4650 rxq->tpa_info[agg_index].mpf = NULL; 4651 rxq->tpa_info[agg_index].mpl = NULL; 4652 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4653 return (0); 4654 } 4655 4656 mp = rxq->tpa_info[agg_index].mpf; 4657 m_adj(mp, rxq->tpa_info[agg_index].placement_offset); 4658 mp->m_pkthdr.len = cqe->total_packet_len; 4659 4660 if (mp->m_next == NULL) 4661 mp->m_len = mp->m_pkthdr.len; 4662 else { 4663 /* compute the total packet length */ 4664 mpf = mp; 4665 while (mpf != NULL) { 4666 len += mpf->m_len; 4667 mpf = mpf->m_next; 4668 } 4669 4670 if (cqe->total_packet_len > len) { 4671 mpl = rxq->tpa_info[agg_index].mpl; 4672 mpl->m_len += (cqe->total_packet_len - len); 4673 } 4674 } 4675 4676 QLNX_INC_IPACKETS(ifp); 4677 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len)); 4678 4679 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \ 4680 m_len = 0x%x m_pkthdr_len = 0x%x\n", 4681 fp->rss_id, mp->m_pkthdr.csum_data, 4682 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len); 4683 4684 (*ifp->if_input)(ifp, mp); 4685 4686 rxq->tpa_info[agg_index].mpf = NULL; 4687 rxq->tpa_info[agg_index].mpl = NULL; 4688 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4689 4690 return (cqe->num_of_coalesced_segs); 4691 } 4692 4693 static int 4694 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 4695 int lro_enable) 4696 { 4697 uint16_t hw_comp_cons, sw_comp_cons; 4698 int rx_pkt = 0; 4699 struct qlnx_rx_queue *rxq = fp->rxq; 4700 struct ifnet *ifp = ha->ifp; 4701 struct ecore_dev *cdev = &ha->cdev; 4702 struct ecore_hwfn *p_hwfn; 4703 4704 #ifdef QLNX_SOFT_LRO 4705 struct lro_ctrl *lro; 4706 4707 lro = &rxq->lro; 4708 #endif /* #ifdef QLNX_SOFT_LRO */ 4709 4710 hw_comp_cons = le16toh(*rxq->hw_cons_ptr); 4711 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4712 4713 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)]; 4714 4715 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 4716 * / BD in the while-loop before reading hw_comp_cons. If the CQE is 4717 * read before it is written by FW, then FW writes CQE and SB, and then 4718 * the CPU reads the hw_comp_cons, it will use an old CQE. 4719 */ 4720 4721 /* Loop to complete all indicated BDs */ 4722 while (sw_comp_cons != hw_comp_cons) { 4723 union eth_rx_cqe *cqe; 4724 struct eth_fast_path_rx_reg_cqe *fp_cqe; 4725 struct sw_rx_data *sw_rx_data; 4726 register struct mbuf *mp; 4727 enum eth_rx_cqe_type cqe_type; 4728 uint16_t len, pad, len_on_first_bd; 4729 uint8_t *data; 4730 #if __FreeBSD_version >= 1100000 4731 uint8_t hash_type; 4732 #endif /* #if __FreeBSD_version >= 1100000 */ 4733 4734 /* Get the CQE from the completion ring */ 4735 cqe = (union eth_rx_cqe *) 4736 ecore_chain_consume(&rxq->rx_comp_ring); 4737 cqe_type = cqe->fast_path_regular.type; 4738 4739 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { 4740 QL_DPRINT3(ha, "Got a slowath CQE\n"); 4741 4742 ecore_eth_cqe_completion(p_hwfn, 4743 (struct eth_slow_path_rx_cqe *)cqe); 4744 goto next_cqe; 4745 } 4746 4747 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { 4748 4749 switch (cqe_type) { 4750 4751 case ETH_RX_CQE_TYPE_TPA_START: 4752 qlnx_tpa_start(ha, fp, rxq, 4753 &cqe->fast_path_tpa_start); 4754 fp->tpa_start++; 4755 break; 4756 4757 case ETH_RX_CQE_TYPE_TPA_CONT: 4758 qlnx_tpa_cont(ha, fp, rxq, 4759 &cqe->fast_path_tpa_cont); 4760 fp->tpa_cont++; 4761 break; 4762 4763 case ETH_RX_CQE_TYPE_TPA_END: 4764 rx_pkt += qlnx_tpa_end(ha, fp, rxq, 4765 &cqe->fast_path_tpa_end); 4766 fp->tpa_end++; 4767 break; 4768 4769 default: 4770 break; 4771 } 4772 4773 goto next_cqe; 4774 } 4775 4776 /* Get the data from the SW ring */ 4777 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4778 mp = sw_rx_data->data; 4779 4780 if (mp == NULL) { 4781 QL_DPRINT1(ha, "mp = NULL\n"); 4782 fp->err_rx_mp_null++; 4783 rxq->sw_rx_cons = 4784 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4785 goto next_cqe; 4786 } 4787 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4788 BUS_DMASYNC_POSTREAD); 4789 4790 /* non GRO */ 4791 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */ 4792 len = le16toh(fp_cqe->pkt_len); 4793 pad = fp_cqe->placement_offset; 4794 #if 0 4795 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x," 4796 " len %u, parsing flags = %d pad = %d\n", 4797 cqe_type, fp_cqe->bitfields, 4798 le16toh(fp_cqe->vlan_tag), 4799 len, le16toh(fp_cqe->pars_flags.flags), pad); 4800 #endif 4801 data = mtod(mp, uint8_t *); 4802 data = data + pad; 4803 4804 if (0) 4805 qlnx_dump_buf8(ha, __func__, data, len); 4806 4807 /* For every Rx BD consumed, we allocate a new BD so the BD ring 4808 * is always with a fixed size. If allocation fails, we take the 4809 * consumed BD and return it to the ring in the PROD position. 4810 * The packet that was received on that BD will be dropped (and 4811 * not passed to the upper stack). 4812 */ 4813 /* If this is an error packet then drop it */ 4814 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) & 4815 CQE_FLAGS_ERR) { 4816 4817 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x," 4818 " dropping incoming packet\n", sw_comp_cons, 4819 le16toh(cqe->fast_path_regular.pars_flags.flags)); 4820 fp->err_rx_hw_errors++; 4821 4822 qlnx_reuse_rx_data(rxq); 4823 4824 QLNX_INC_IERRORS(ifp); 4825 4826 goto next_cqe; 4827 } 4828 4829 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4830 4831 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 4832 " incoming packet and reusing its buffer\n"); 4833 qlnx_reuse_rx_data(rxq); 4834 4835 fp->err_rx_alloc_errors++; 4836 4837 QLNX_INC_IQDROPS(ifp); 4838 4839 goto next_cqe; 4840 } 4841 4842 ecore_chain_consume(&rxq->rx_bd_ring); 4843 4844 len_on_first_bd = fp_cqe->len_on_first_bd; 4845 m_adj(mp, pad); 4846 mp->m_pkthdr.len = len; 4847 4848 if ((len > 60 ) && (len > len_on_first_bd)) { 4849 4850 mp->m_len = len_on_first_bd; 4851 4852 if (qlnx_rx_jumbo_chain(ha, fp, mp, 4853 (len - len_on_first_bd)) != 0) { 4854 4855 m_freem(mp); 4856 4857 QLNX_INC_IQDROPS(ifp); 4858 4859 goto next_cqe; 4860 } 4861 4862 } else if (len_on_first_bd < len) { 4863 fp->err_rx_jumbo_chain_pkts++; 4864 } else { 4865 mp->m_len = len; 4866 } 4867 4868 mp->m_flags |= M_PKTHDR; 4869 4870 /* assign packet to this interface interface */ 4871 mp->m_pkthdr.rcvif = ifp; 4872 4873 /* assume no hardware checksum has complated */ 4874 mp->m_pkthdr.csum_flags = 0; 4875 4876 mp->m_pkthdr.flowid = fp_cqe->rss_hash; 4877 4878 #if __FreeBSD_version >= 1100000 4879 4880 hash_type = fp_cqe->bitfields & 4881 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4882 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4883 4884 switch (hash_type) { 4885 4886 case RSS_HASH_TYPE_IPV4: 4887 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4888 break; 4889 4890 case RSS_HASH_TYPE_TCP_IPV4: 4891 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4892 break; 4893 4894 case RSS_HASH_TYPE_IPV6: 4895 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4896 break; 4897 4898 case RSS_HASH_TYPE_TCP_IPV6: 4899 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4900 break; 4901 4902 default: 4903 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4904 break; 4905 } 4906 4907 #else 4908 mp->m_flags |= M_FLOWID; 4909 #endif 4910 4911 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) { 4912 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4913 } 4914 4915 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) { 4916 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4917 } 4918 4919 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) { 4920 mp->m_pkthdr.csum_data = 0xFFFF; 4921 mp->m_pkthdr.csum_flags |= 4922 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4923 } 4924 4925 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) { 4926 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag); 4927 mp->m_flags |= M_VLANTAG; 4928 } 4929 4930 QLNX_INC_IPACKETS(ifp); 4931 QLNX_INC_IBYTES(ifp, len); 4932 4933 #ifdef QLNX_SOFT_LRO 4934 4935 if (lro_enable) { 4936 4937 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 4938 4939 tcp_lro_queue_mbuf(lro, mp); 4940 4941 #else 4942 4943 if (tcp_lro_rx(lro, mp, 0)) 4944 (*ifp->if_input)(ifp, mp); 4945 4946 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 4947 4948 } else { 4949 (*ifp->if_input)(ifp, mp); 4950 } 4951 #else 4952 4953 (*ifp->if_input)(ifp, mp); 4954 4955 #endif /* #ifdef QLNX_SOFT_LRO */ 4956 4957 rx_pkt++; 4958 4959 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4960 4961 next_cqe: /* don't consume bd rx buffer */ 4962 ecore_chain_recycle_consumed(&rxq->rx_comp_ring); 4963 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4964 4965 /* CR TPA - revisit how to handle budget in TPA perhaps 4966 increase on "end" */ 4967 if (rx_pkt == budget) 4968 break; 4969 } /* repeat while sw_comp_cons != hw_comp_cons... */ 4970 4971 /* Update producers */ 4972 qlnx_update_rx_prod(p_hwfn, rxq); 4973 4974 return rx_pkt; 4975 } 4976 4977 4978 /* 4979 * fast path interrupt 4980 */ 4981 4982 static void 4983 qlnx_fp_isr(void *arg) 4984 { 4985 qlnx_ivec_t *ivec = arg; 4986 qlnx_host_t *ha; 4987 struct qlnx_fastpath *fp = NULL; 4988 int idx; 4989 4990 ha = ivec->ha; 4991 4992 if (ha->state != QLNX_STATE_OPEN) { 4993 return; 4994 } 4995 4996 idx = ivec->rss_idx; 4997 4998 if ((idx = ivec->rss_idx) >= ha->num_rss) { 4999 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx); 5000 ha->err_illegal_intr++; 5001 return; 5002 } 5003 fp = &ha->fp_array[idx]; 5004 5005 if (fp == NULL) { 5006 ha->err_fp_null++; 5007 } else { 5008 int rx_int = 0, total_rx_count = 0; 5009 int lro_enable, tc; 5010 struct qlnx_tx_queue *txq; 5011 uint16_t elem_left; 5012 5013 lro_enable = ha->ifp->if_capenable & IFCAP_LRO; 5014 5015 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); 5016 5017 do { 5018 for (tc = 0; tc < ha->num_tc; tc++) { 5019 5020 txq = fp->txq[tc]; 5021 5022 if((int)(elem_left = 5023 ecore_chain_get_elem_left(&txq->tx_pbl)) < 5024 QLNX_TX_ELEM_THRESH) { 5025 5026 if (mtx_trylock(&fp->tx_mtx)) { 5027 #ifdef QLNX_TRACE_PERF_DATA 5028 tx_compl = fp->tx_pkts_completed; 5029 #endif 5030 5031 qlnx_tx_int(ha, fp, fp->txq[tc]); 5032 #ifdef QLNX_TRACE_PERF_DATA 5033 fp->tx_pkts_compl_intr += 5034 (fp->tx_pkts_completed - tx_compl); 5035 if ((fp->tx_pkts_completed - tx_compl) <= 32) 5036 fp->tx_comInt[0]++; 5037 else if (((fp->tx_pkts_completed - tx_compl) > 32) && 5038 ((fp->tx_pkts_completed - tx_compl) <= 64)) 5039 fp->tx_comInt[1]++; 5040 else if(((fp->tx_pkts_completed - tx_compl) > 64) && 5041 ((fp->tx_pkts_completed - tx_compl) <= 128)) 5042 fp->tx_comInt[2]++; 5043 else if(((fp->tx_pkts_completed - tx_compl) > 128)) 5044 fp->tx_comInt[3]++; 5045 #endif 5046 mtx_unlock(&fp->tx_mtx); 5047 } 5048 } 5049 } 5050 5051 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, 5052 lro_enable); 5053 5054 if (rx_int) { 5055 fp->rx_pkts += rx_int; 5056 total_rx_count += rx_int; 5057 } 5058 5059 } while (rx_int); 5060 5061 #ifdef QLNX_SOFT_LRO 5062 { 5063 struct lro_ctrl *lro; 5064 5065 lro = &fp->rxq->lro; 5066 5067 if (lro_enable && total_rx_count) { 5068 5069 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 5070 5071 #ifdef QLNX_TRACE_LRO_CNT 5072 if (lro->lro_mbuf_count & ~1023) 5073 fp->lro_cnt_1024++; 5074 else if (lro->lro_mbuf_count & ~511) 5075 fp->lro_cnt_512++; 5076 else if (lro->lro_mbuf_count & ~255) 5077 fp->lro_cnt_256++; 5078 else if (lro->lro_mbuf_count & ~127) 5079 fp->lro_cnt_128++; 5080 else if (lro->lro_mbuf_count & ~63) 5081 fp->lro_cnt_64++; 5082 #endif /* #ifdef QLNX_TRACE_LRO_CNT */ 5083 5084 tcp_lro_flush_all(lro); 5085 5086 #else 5087 struct lro_entry *queued; 5088 5089 while ((!SLIST_EMPTY(&lro->lro_active))) { 5090 queued = SLIST_FIRST(&lro->lro_active); 5091 SLIST_REMOVE_HEAD(&lro->lro_active, \ 5092 next); 5093 tcp_lro_flush(lro, queued); 5094 } 5095 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 5096 } 5097 } 5098 #endif /* #ifdef QLNX_SOFT_LRO */ 5099 5100 ecore_sb_update_sb_idx(fp->sb_info); 5101 rmb(); 5102 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 5103 } 5104 5105 return; 5106 } 5107 5108 5109 /* 5110 * slow path interrupt processing function 5111 * can be invoked in polled mode or in interrupt mode via taskqueue. 5112 */ 5113 void 5114 qlnx_sp_isr(void *arg) 5115 { 5116 struct ecore_hwfn *p_hwfn; 5117 qlnx_host_t *ha; 5118 5119 p_hwfn = arg; 5120 5121 ha = (qlnx_host_t *)p_hwfn->p_dev; 5122 5123 ha->sp_interrupts++; 5124 5125 QL_DPRINT2(ha, "enter\n"); 5126 5127 ecore_int_sp_dpc(p_hwfn); 5128 5129 QL_DPRINT2(ha, "exit\n"); 5130 5131 return; 5132 } 5133 5134 /***************************************************************************** 5135 * Support Functions for DMA'able Memory 5136 *****************************************************************************/ 5137 5138 static void 5139 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 5140 { 5141 *((bus_addr_t *)arg) = 0; 5142 5143 if (error) { 5144 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 5145 return; 5146 } 5147 5148 *((bus_addr_t *)arg) = segs[0].ds_addr; 5149 5150 return; 5151 } 5152 5153 static int 5154 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 5155 { 5156 int ret = 0; 5157 device_t dev; 5158 bus_addr_t b_addr; 5159 5160 dev = ha->pci_dev; 5161 5162 ret = bus_dma_tag_create( 5163 ha->parent_tag,/* parent */ 5164 dma_buf->alignment, 5165 ((bus_size_t)(1ULL << 32)),/* boundary */ 5166 BUS_SPACE_MAXADDR, /* lowaddr */ 5167 BUS_SPACE_MAXADDR, /* highaddr */ 5168 NULL, NULL, /* filter, filterarg */ 5169 dma_buf->size, /* maxsize */ 5170 1, /* nsegments */ 5171 dma_buf->size, /* maxsegsize */ 5172 0, /* flags */ 5173 NULL, NULL, /* lockfunc, lockarg */ 5174 &dma_buf->dma_tag); 5175 5176 if (ret) { 5177 QL_DPRINT1(ha, "could not create dma tag\n"); 5178 goto qlnx_alloc_dmabuf_exit; 5179 } 5180 ret = bus_dmamem_alloc(dma_buf->dma_tag, 5181 (void **)&dma_buf->dma_b, 5182 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 5183 &dma_buf->dma_map); 5184 if (ret) { 5185 bus_dma_tag_destroy(dma_buf->dma_tag); 5186 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n"); 5187 goto qlnx_alloc_dmabuf_exit; 5188 } 5189 5190 ret = bus_dmamap_load(dma_buf->dma_tag, 5191 dma_buf->dma_map, 5192 dma_buf->dma_b, 5193 dma_buf->size, 5194 qlnx_dmamap_callback, 5195 &b_addr, BUS_DMA_NOWAIT); 5196 5197 if (ret || !b_addr) { 5198 bus_dma_tag_destroy(dma_buf->dma_tag); 5199 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 5200 dma_buf->dma_map); 5201 ret = -1; 5202 goto qlnx_alloc_dmabuf_exit; 5203 } 5204 5205 dma_buf->dma_addr = b_addr; 5206 5207 qlnx_alloc_dmabuf_exit: 5208 5209 return ret; 5210 } 5211 5212 static void 5213 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 5214 { 5215 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 5216 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 5217 bus_dma_tag_destroy(dma_buf->dma_tag); 5218 return; 5219 } 5220 5221 void * 5222 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size) 5223 { 5224 qlnx_dma_t dma_buf; 5225 qlnx_dma_t *dma_p; 5226 qlnx_host_t *ha; 5227 device_t dev; 5228 5229 ha = (qlnx_host_t *)ecore_dev; 5230 dev = ha->pci_dev; 5231 5232 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5233 5234 memset(&dma_buf, 0, sizeof (qlnx_dma_t)); 5235 5236 dma_buf.size = size + PAGE_SIZE; 5237 dma_buf.alignment = 8; 5238 5239 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0) 5240 return (NULL); 5241 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size); 5242 5243 *phys = dma_buf.dma_addr; 5244 5245 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size); 5246 5247 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t)); 5248 5249 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5250 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag, 5251 dma_buf.dma_b, (void *)dma_buf.dma_addr, size); 5252 5253 return (dma_buf.dma_b); 5254 } 5255 5256 void 5257 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys, 5258 uint32_t size) 5259 { 5260 qlnx_dma_t dma_buf, *dma_p; 5261 qlnx_host_t *ha; 5262 device_t dev; 5263 5264 ha = (qlnx_host_t *)ecore_dev; 5265 dev = ha->pci_dev; 5266 5267 if (v_addr == NULL) 5268 return; 5269 5270 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5271 5272 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size); 5273 5274 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5275 (void *)dma_p->dma_map, (void *)dma_p->dma_tag, 5276 dma_p->dma_b, (void *)dma_p->dma_addr, size); 5277 5278 dma_buf = *dma_p; 5279 5280 if (!ha->qlnxr_debug) 5281 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf); 5282 return; 5283 } 5284 5285 static int 5286 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha) 5287 { 5288 int ret; 5289 device_t dev; 5290 5291 dev = ha->pci_dev; 5292 5293 /* 5294 * Allocate parent DMA Tag 5295 */ 5296 ret = bus_dma_tag_create( 5297 bus_get_dma_tag(dev), /* parent */ 5298 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 5299 BUS_SPACE_MAXADDR, /* lowaddr */ 5300 BUS_SPACE_MAXADDR, /* highaddr */ 5301 NULL, NULL, /* filter, filterarg */ 5302 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 5303 0, /* nsegments */ 5304 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 5305 0, /* flags */ 5306 NULL, NULL, /* lockfunc, lockarg */ 5307 &ha->parent_tag); 5308 5309 if (ret) { 5310 QL_DPRINT1(ha, "could not create parent dma tag\n"); 5311 return (-1); 5312 } 5313 5314 ha->flags.parent_tag = 1; 5315 5316 return (0); 5317 } 5318 5319 static void 5320 qlnx_free_parent_dma_tag(qlnx_host_t *ha) 5321 { 5322 if (ha->parent_tag != NULL) { 5323 bus_dma_tag_destroy(ha->parent_tag); 5324 ha->parent_tag = NULL; 5325 } 5326 return; 5327 } 5328 5329 static int 5330 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha) 5331 { 5332 if (bus_dma_tag_create(NULL, /* parent */ 5333 1, 0, /* alignment, bounds */ 5334 BUS_SPACE_MAXADDR, /* lowaddr */ 5335 BUS_SPACE_MAXADDR, /* highaddr */ 5336 NULL, NULL, /* filter, filterarg */ 5337 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */ 5338 QLNX_MAX_SEGMENTS, /* nsegments */ 5339 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */ 5340 0, /* flags */ 5341 NULL, /* lockfunc */ 5342 NULL, /* lockfuncarg */ 5343 &ha->tx_tag)) { 5344 5345 QL_DPRINT1(ha, "tx_tag alloc failed\n"); 5346 return (-1); 5347 } 5348 5349 return (0); 5350 } 5351 5352 static void 5353 qlnx_free_tx_dma_tag(qlnx_host_t *ha) 5354 { 5355 if (ha->tx_tag != NULL) { 5356 bus_dma_tag_destroy(ha->tx_tag); 5357 ha->tx_tag = NULL; 5358 } 5359 return; 5360 } 5361 5362 static int 5363 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha) 5364 { 5365 if (bus_dma_tag_create(NULL, /* parent */ 5366 1, 0, /* alignment, bounds */ 5367 BUS_SPACE_MAXADDR, /* lowaddr */ 5368 BUS_SPACE_MAXADDR, /* highaddr */ 5369 NULL, NULL, /* filter, filterarg */ 5370 MJUM9BYTES, /* maxsize */ 5371 1, /* nsegments */ 5372 MJUM9BYTES, /* maxsegsize */ 5373 0, /* flags */ 5374 NULL, /* lockfunc */ 5375 NULL, /* lockfuncarg */ 5376 &ha->rx_tag)) { 5377 5378 QL_DPRINT1(ha, " rx_tag alloc failed\n"); 5379 5380 return (-1); 5381 } 5382 return (0); 5383 } 5384 5385 static void 5386 qlnx_free_rx_dma_tag(qlnx_host_t *ha) 5387 { 5388 if (ha->rx_tag != NULL) { 5389 bus_dma_tag_destroy(ha->rx_tag); 5390 ha->rx_tag = NULL; 5391 } 5392 return; 5393 } 5394 5395 /********************************* 5396 * Exported functions 5397 *********************************/ 5398 uint32_t 5399 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id) 5400 { 5401 uint32_t bar_size; 5402 5403 bar_id = bar_id * 2; 5404 5405 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev, 5406 SYS_RES_MEMORY, 5407 PCIR_BAR(bar_id)); 5408 5409 return (bar_size); 5410 } 5411 5412 uint32_t 5413 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value) 5414 { 5415 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5416 pci_reg, 1); 5417 return 0; 5418 } 5419 5420 uint32_t 5421 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg, 5422 uint16_t *reg_value) 5423 { 5424 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5425 pci_reg, 2); 5426 return 0; 5427 } 5428 5429 uint32_t 5430 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg, 5431 uint32_t *reg_value) 5432 { 5433 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5434 pci_reg, 4); 5435 return 0; 5436 } 5437 5438 void 5439 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value) 5440 { 5441 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5442 pci_reg, reg_value, 1); 5443 return; 5444 } 5445 5446 void 5447 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg, 5448 uint16_t reg_value) 5449 { 5450 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5451 pci_reg, reg_value, 2); 5452 return; 5453 } 5454 5455 void 5456 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg, 5457 uint32_t reg_value) 5458 { 5459 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5460 pci_reg, reg_value, 4); 5461 return; 5462 } 5463 5464 int 5465 qlnx_pci_find_capability(void *ecore_dev, int cap) 5466 { 5467 int reg; 5468 qlnx_host_t *ha; 5469 5470 ha = ecore_dev; 5471 5472 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0) 5473 return reg; 5474 else { 5475 QL_DPRINT1(ha, "failed\n"); 5476 return 0; 5477 } 5478 } 5479 5480 int 5481 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap) 5482 { 5483 int reg; 5484 qlnx_host_t *ha; 5485 5486 ha = ecore_dev; 5487 5488 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0) 5489 return reg; 5490 else { 5491 QL_DPRINT1(ha, "failed\n"); 5492 return 0; 5493 } 5494 } 5495 5496 uint32_t 5497 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr) 5498 { 5499 uint32_t data32; 5500 struct ecore_hwfn *p_hwfn; 5501 5502 p_hwfn = hwfn; 5503 5504 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5505 (bus_size_t)(p_hwfn->reg_offset + reg_addr)); 5506 5507 return (data32); 5508 } 5509 5510 void 5511 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5512 { 5513 struct ecore_hwfn *p_hwfn = hwfn; 5514 5515 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5516 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5517 5518 return; 5519 } 5520 5521 void 5522 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value) 5523 { 5524 struct ecore_hwfn *p_hwfn = hwfn; 5525 5526 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5527 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5528 return; 5529 } 5530 5531 void 5532 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value) 5533 { 5534 struct ecore_dev *cdev; 5535 struct ecore_hwfn *p_hwfn; 5536 uint32_t offset; 5537 5538 p_hwfn = hwfn; 5539 5540 cdev = p_hwfn->p_dev; 5541 5542 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells)); 5543 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value); 5544 5545 return; 5546 } 5547 5548 void 5549 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5550 { 5551 struct ecore_hwfn *p_hwfn = hwfn; 5552 5553 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \ 5554 (bus_size_t)(p_hwfn->db_offset + reg_addr), value); 5555 5556 return; 5557 } 5558 5559 uint32_t 5560 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr) 5561 { 5562 uint32_t data32; 5563 bus_size_t offset; 5564 struct ecore_dev *cdev; 5565 5566 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5567 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5568 5569 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset); 5570 5571 return (data32); 5572 } 5573 5574 void 5575 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value) 5576 { 5577 bus_size_t offset; 5578 struct ecore_dev *cdev; 5579 5580 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5581 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5582 5583 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5584 5585 return; 5586 } 5587 5588 void 5589 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value) 5590 { 5591 bus_size_t offset; 5592 struct ecore_dev *cdev; 5593 5594 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5595 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5596 5597 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5598 return; 5599 } 5600 5601 void * 5602 qlnx_zalloc(uint32_t size) 5603 { 5604 caddr_t va; 5605 5606 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT); 5607 bzero(va, size); 5608 return ((void *)va); 5609 } 5610 5611 void 5612 qlnx_barrier(void *p_hwfn) 5613 { 5614 qlnx_host_t *ha; 5615 5616 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5617 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE); 5618 } 5619 5620 void 5621 qlnx_link_update(void *p_hwfn) 5622 { 5623 qlnx_host_t *ha; 5624 int prev_link_state; 5625 5626 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5627 5628 qlnx_fill_link(ha, p_hwfn, &ha->if_link); 5629 5630 prev_link_state = ha->link_up; 5631 ha->link_up = ha->if_link.link_up; 5632 5633 if (prev_link_state != ha->link_up) { 5634 if (ha->link_up) { 5635 if_link_state_change(ha->ifp, LINK_STATE_UP); 5636 } else { 5637 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 5638 } 5639 } 5640 #ifndef QLNX_VF 5641 #ifdef CONFIG_ECORE_SRIOV 5642 5643 if (qlnx_vf_device(ha) != 0) { 5644 if (ha->sriov_initialized) 5645 qlnx_inform_vf_link_state(p_hwfn, ha); 5646 } 5647 5648 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 5649 #endif /* #ifdef QLNX_VF */ 5650 5651 return; 5652 } 5653 5654 static void 5655 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn, 5656 struct ecore_vf_acquire_sw_info *p_sw_info) 5657 { 5658 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) | 5659 (QLNX_VERSION_MINOR << 16) | 5660 QLNX_VERSION_BUILD; 5661 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD; 5662 5663 return; 5664 } 5665 5666 void 5667 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req, 5668 void *p_sw_info) 5669 { 5670 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info); 5671 5672 return; 5673 } 5674 5675 void 5676 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn, 5677 struct qlnx_link_output *if_link) 5678 { 5679 struct ecore_mcp_link_params link_params; 5680 struct ecore_mcp_link_state link_state; 5681 uint8_t p_change; 5682 struct ecore_ptt *p_ptt = NULL; 5683 5684 5685 memset(if_link, 0, sizeof(*if_link)); 5686 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params)); 5687 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state)); 5688 5689 ha = (qlnx_host_t *)hwfn->p_dev; 5690 5691 /* Prepare source inputs */ 5692 /* we only deal with physical functions */ 5693 if (qlnx_vf_device(ha) != 0) { 5694 5695 p_ptt = ecore_ptt_acquire(hwfn); 5696 5697 if (p_ptt == NULL) { 5698 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 5699 return; 5700 } 5701 5702 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type); 5703 ecore_ptt_release(hwfn, p_ptt); 5704 5705 memcpy(&link_params, ecore_mcp_get_link_params(hwfn), 5706 sizeof(link_params)); 5707 memcpy(&link_state, ecore_mcp_get_link_state(hwfn), 5708 sizeof(link_state)); 5709 } else { 5710 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type); 5711 ecore_vf_read_bulletin(hwfn, &p_change); 5712 ecore_vf_get_link_params(hwfn, &link_params); 5713 ecore_vf_get_link_state(hwfn, &link_state); 5714 } 5715 5716 /* Set the link parameters to pass to protocol driver */ 5717 if (link_state.link_up) { 5718 if_link->link_up = true; 5719 if_link->speed = link_state.speed; 5720 } 5721 5722 if_link->supported_caps = QLNX_LINK_CAP_FIBRE; 5723 5724 if (link_params.speed.autoneg) 5725 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg; 5726 5727 if (link_params.pause.autoneg || 5728 (link_params.pause.forced_rx && link_params.pause.forced_tx)) 5729 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause; 5730 5731 if (link_params.pause.autoneg || link_params.pause.forced_rx || 5732 link_params.pause.forced_tx) 5733 if_link->supported_caps |= QLNX_LINK_CAP_Pause; 5734 5735 if (link_params.speed.advertised_speeds & 5736 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 5737 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half | 5738 QLNX_LINK_CAP_1000baseT_Full; 5739 5740 if (link_params.speed.advertised_speeds & 5741 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 5742 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5743 5744 if (link_params.speed.advertised_speeds & 5745 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 5746 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5747 5748 if (link_params.speed.advertised_speeds & 5749 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) 5750 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5751 5752 if (link_params.speed.advertised_speeds & 5753 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 5754 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5755 5756 if (link_params.speed.advertised_speeds & 5757 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 5758 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5759 5760 if_link->advertised_caps = if_link->supported_caps; 5761 5762 if_link->autoneg = link_params.speed.autoneg; 5763 if_link->duplex = QLNX_LINK_DUPLEX; 5764 5765 /* Link partner capabilities */ 5766 5767 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD) 5768 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half; 5769 5770 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD) 5771 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full; 5772 5773 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G) 5774 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5775 5776 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G) 5777 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5778 5779 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G) 5780 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5781 5782 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G) 5783 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5784 5785 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G) 5786 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5787 5788 if (link_state.an_complete) 5789 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg; 5790 5791 if (link_state.partner_adv_pause) 5792 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause; 5793 5794 if ((link_state.partner_adv_pause == 5795 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) || 5796 (link_state.partner_adv_pause == 5797 ECORE_LINK_PARTNER_BOTH_PAUSE)) 5798 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause; 5799 5800 return; 5801 } 5802 5803 void 5804 qlnx_schedule_recovery(void *p_hwfn) 5805 { 5806 qlnx_host_t *ha; 5807 5808 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5809 5810 if (qlnx_vf_device(ha) != 0) { 5811 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 5812 } 5813 5814 return; 5815 } 5816 5817 static int 5818 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params) 5819 { 5820 int rc, i; 5821 5822 for (i = 0; i < cdev->num_hwfns; i++) { 5823 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 5824 p_hwfn->pf_params = *func_params; 5825 5826 #ifdef QLNX_ENABLE_IWARP 5827 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) { 5828 p_hwfn->using_ll2 = true; 5829 } 5830 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5831 5832 } 5833 5834 rc = ecore_resc_alloc(cdev); 5835 if (rc) 5836 goto qlnx_nic_setup_exit; 5837 5838 ecore_resc_setup(cdev); 5839 5840 qlnx_nic_setup_exit: 5841 5842 return rc; 5843 } 5844 5845 static int 5846 qlnx_nic_start(struct ecore_dev *cdev) 5847 { 5848 int rc; 5849 struct ecore_hw_init_params params; 5850 5851 bzero(¶ms, sizeof (struct ecore_hw_init_params)); 5852 5853 params.p_tunn = NULL; 5854 params.b_hw_start = true; 5855 params.int_mode = cdev->int_mode; 5856 params.allow_npar_tx_switch = true; 5857 params.bin_fw_data = NULL; 5858 5859 rc = ecore_hw_init(cdev, ¶ms); 5860 if (rc) { 5861 ecore_resc_free(cdev); 5862 return rc; 5863 } 5864 5865 return 0; 5866 } 5867 5868 static int 5869 qlnx_slowpath_start(qlnx_host_t *ha) 5870 { 5871 struct ecore_dev *cdev; 5872 struct ecore_pf_params pf_params; 5873 int rc; 5874 5875 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 5876 pf_params.eth_pf_params.num_cons = 5877 (ha->num_rss) * (ha->num_tc + 1); 5878 5879 #ifdef QLNX_ENABLE_IWARP 5880 if (qlnx_vf_device(ha) != 0) { 5881 if(ha->personality == ECORE_PCI_ETH_IWARP) { 5882 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n"); 5883 pf_params.rdma_pf_params.num_qps = 1024; 5884 pf_params.rdma_pf_params.num_srqs = 1024; 5885 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5886 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP; 5887 } else if(ha->personality == ECORE_PCI_ETH_ROCE) { 5888 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n"); 5889 pf_params.rdma_pf_params.num_qps = 8192; 5890 pf_params.rdma_pf_params.num_srqs = 8192; 5891 //pf_params.rdma_pf_params.min_dpis = 0; 5892 pf_params.rdma_pf_params.min_dpis = 8; 5893 pf_params.rdma_pf_params.roce_edpm_mode = 0; 5894 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5895 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE; 5896 } 5897 } 5898 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5899 5900 cdev = &ha->cdev; 5901 5902 rc = qlnx_nic_setup(cdev, &pf_params); 5903 if (rc) 5904 goto qlnx_slowpath_start_exit; 5905 5906 cdev->int_mode = ECORE_INT_MODE_MSIX; 5907 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; 5908 5909 #ifdef QLNX_MAX_COALESCE 5910 cdev->rx_coalesce_usecs = 255; 5911 cdev->tx_coalesce_usecs = 255; 5912 #endif 5913 5914 rc = qlnx_nic_start(cdev); 5915 5916 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs; 5917 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs; 5918 5919 #ifdef QLNX_USER_LLDP 5920 (void)qlnx_set_lldp_tlvx(ha, NULL); 5921 #endif /* #ifdef QLNX_USER_LLDP */ 5922 5923 qlnx_slowpath_start_exit: 5924 5925 return (rc); 5926 } 5927 5928 static int 5929 qlnx_slowpath_stop(qlnx_host_t *ha) 5930 { 5931 struct ecore_dev *cdev; 5932 device_t dev = ha->pci_dev; 5933 int i; 5934 5935 cdev = &ha->cdev; 5936 5937 ecore_hw_stop(cdev); 5938 5939 for (i = 0; i < ha->cdev.num_hwfns; i++) { 5940 5941 if (ha->sp_handle[i]) 5942 (void)bus_teardown_intr(dev, ha->sp_irq[i], 5943 ha->sp_handle[i]); 5944 5945 ha->sp_handle[i] = NULL; 5946 5947 if (ha->sp_irq[i]) 5948 (void) bus_release_resource(dev, SYS_RES_IRQ, 5949 ha->sp_irq_rid[i], ha->sp_irq[i]); 5950 ha->sp_irq[i] = NULL; 5951 } 5952 5953 ecore_resc_free(cdev); 5954 5955 return 0; 5956 } 5957 5958 static void 5959 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 5960 char ver_str[VER_SIZE]) 5961 { 5962 int i; 5963 5964 memcpy(cdev->name, name, NAME_SIZE); 5965 5966 for_each_hwfn(cdev, i) { 5967 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 5968 } 5969 5970 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD; 5971 5972 return ; 5973 } 5974 5975 void 5976 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats) 5977 { 5978 enum ecore_mcp_protocol_type type; 5979 union ecore_mcp_protocol_stats *stats; 5980 struct ecore_eth_stats eth_stats; 5981 qlnx_host_t *ha; 5982 5983 ha = cdev; 5984 stats = proto_stats; 5985 type = proto_type; 5986 5987 switch (type) { 5988 5989 case ECORE_MCP_LAN_STATS: 5990 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats); 5991 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; 5992 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts; 5993 stats->lan_stats.fcs_err = -1; 5994 break; 5995 5996 default: 5997 ha->err_get_proto_invalid_type++; 5998 5999 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type); 6000 break; 6001 } 6002 return; 6003 } 6004 6005 static int 6006 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver) 6007 { 6008 struct ecore_hwfn *p_hwfn; 6009 struct ecore_ptt *p_ptt; 6010 6011 p_hwfn = &ha->cdev.hwfns[0]; 6012 p_ptt = ecore_ptt_acquire(p_hwfn); 6013 6014 if (p_ptt == NULL) { 6015 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 6016 return (-1); 6017 } 6018 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL); 6019 6020 ecore_ptt_release(p_hwfn, p_ptt); 6021 6022 return (0); 6023 } 6024 6025 static int 6026 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size) 6027 { 6028 struct ecore_hwfn *p_hwfn; 6029 struct ecore_ptt *p_ptt; 6030 6031 p_hwfn = &ha->cdev.hwfns[0]; 6032 p_ptt = ecore_ptt_acquire(p_hwfn); 6033 6034 if (p_ptt == NULL) { 6035 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n"); 6036 return (-1); 6037 } 6038 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size); 6039 6040 ecore_ptt_release(p_hwfn, p_ptt); 6041 6042 return (0); 6043 } 6044 6045 static int 6046 qlnx_alloc_mem_arrays(qlnx_host_t *ha) 6047 { 6048 struct ecore_dev *cdev; 6049 6050 cdev = &ha->cdev; 6051 6052 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS)); 6053 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS)); 6054 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS)); 6055 6056 return 0; 6057 } 6058 6059 static void 6060 qlnx_init_fp(qlnx_host_t *ha) 6061 { 6062 int rss_id, txq_array_index, tc; 6063 6064 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 6065 6066 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 6067 6068 fp->rss_id = rss_id; 6069 fp->edev = ha; 6070 fp->sb_info = &ha->sb_array[rss_id]; 6071 fp->rxq = &ha->rxq_array[rss_id]; 6072 fp->rxq->rxq_id = rss_id; 6073 6074 for (tc = 0; tc < ha->num_tc; tc++) { 6075 txq_array_index = tc * ha->num_rss + rss_id; 6076 fp->txq[tc] = &ha->txq_array[txq_array_index]; 6077 fp->txq[tc]->index = txq_array_index; 6078 } 6079 6080 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str, 6081 rss_id); 6082 6083 fp->tx_ring_full = 0; 6084 6085 /* reset all the statistics counters */ 6086 6087 fp->tx_pkts_processed = 0; 6088 fp->tx_pkts_freed = 0; 6089 fp->tx_pkts_transmitted = 0; 6090 fp->tx_pkts_completed = 0; 6091 6092 #ifdef QLNX_TRACE_PERF_DATA 6093 fp->tx_pkts_trans_ctx = 0; 6094 fp->tx_pkts_compl_ctx = 0; 6095 fp->tx_pkts_trans_fp = 0; 6096 fp->tx_pkts_compl_fp = 0; 6097 fp->tx_pkts_compl_intr = 0; 6098 #endif 6099 fp->tx_lso_wnd_min_len = 0; 6100 fp->tx_defrag = 0; 6101 fp->tx_nsegs_gt_elem_left = 0; 6102 fp->tx_tso_max_nsegs = 0; 6103 fp->tx_tso_min_nsegs = 0; 6104 fp->err_tx_nsegs_gt_elem_left = 0; 6105 fp->err_tx_dmamap_create = 0; 6106 fp->err_tx_defrag_dmamap_load = 0; 6107 fp->err_tx_non_tso_max_seg = 0; 6108 fp->err_tx_dmamap_load = 0; 6109 fp->err_tx_defrag = 0; 6110 fp->err_tx_free_pkt_null = 0; 6111 fp->err_tx_cons_idx_conflict = 0; 6112 6113 fp->rx_pkts = 0; 6114 fp->err_m_getcl = 0; 6115 fp->err_m_getjcl = 0; 6116 } 6117 return; 6118 } 6119 6120 void 6121 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info) 6122 { 6123 struct ecore_dev *cdev; 6124 6125 cdev = &ha->cdev; 6126 6127 if (sb_info->sb_virt) { 6128 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt), 6129 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt))); 6130 sb_info->sb_virt = NULL; 6131 } 6132 } 6133 6134 static int 6135 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info, 6136 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id) 6137 { 6138 struct ecore_hwfn *p_hwfn; 6139 int hwfn_index, rc; 6140 u16 rel_sb_id; 6141 6142 hwfn_index = sb_id % cdev->num_hwfns; 6143 p_hwfn = &cdev->hwfns[hwfn_index]; 6144 rel_sb_id = sb_id / cdev->num_hwfns; 6145 6146 QL_DPRINT2(((qlnx_host_t *)cdev), 6147 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \ 6148 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n", 6149 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info, 6150 sb_virt_addr, (void *)sb_phy_addr); 6151 6152 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, 6153 sb_virt_addr, sb_phy_addr, rel_sb_id); 6154 6155 return rc; 6156 } 6157 6158 /* This function allocates fast-path status block memory */ 6159 int 6160 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id) 6161 { 6162 struct status_block_e4 *sb_virt; 6163 bus_addr_t sb_phys; 6164 int rc; 6165 uint32_t size; 6166 struct ecore_dev *cdev; 6167 6168 cdev = &ha->cdev; 6169 6170 size = sizeof(*sb_virt); 6171 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size); 6172 6173 if (!sb_virt) { 6174 QL_DPRINT1(ha, "Status block allocation failed\n"); 6175 return -ENOMEM; 6176 } 6177 6178 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id); 6179 if (rc) { 6180 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size); 6181 } 6182 6183 return rc; 6184 } 6185 6186 static void 6187 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6188 { 6189 int i; 6190 struct sw_rx_data *rx_buf; 6191 6192 for (i = 0; i < rxq->num_rx_buffers; i++) { 6193 6194 rx_buf = &rxq->sw_rx_ring[i]; 6195 6196 if (rx_buf->data != NULL) { 6197 if (rx_buf->map != NULL) { 6198 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 6199 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 6200 rx_buf->map = NULL; 6201 } 6202 m_freem(rx_buf->data); 6203 rx_buf->data = NULL; 6204 } 6205 } 6206 return; 6207 } 6208 6209 static void 6210 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6211 { 6212 struct ecore_dev *cdev; 6213 int i; 6214 6215 cdev = &ha->cdev; 6216 6217 qlnx_free_rx_buffers(ha, rxq); 6218 6219 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 6220 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]); 6221 if (rxq->tpa_info[i].mpf != NULL) 6222 m_freem(rxq->tpa_info[i].mpf); 6223 } 6224 6225 bzero((void *)&rxq->sw_rx_ring[0], 6226 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 6227 6228 /* Free the real RQ ring used by FW */ 6229 if (rxq->rx_bd_ring.p_virt_addr) { 6230 ecore_chain_free(cdev, &rxq->rx_bd_ring); 6231 rxq->rx_bd_ring.p_virt_addr = NULL; 6232 } 6233 6234 /* Free the real completion ring used by FW */ 6235 if (rxq->rx_comp_ring.p_virt_addr && 6236 rxq->rx_comp_ring.pbl_sp.p_virt_table) { 6237 ecore_chain_free(cdev, &rxq->rx_comp_ring); 6238 rxq->rx_comp_ring.p_virt_addr = NULL; 6239 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL; 6240 } 6241 6242 #ifdef QLNX_SOFT_LRO 6243 { 6244 struct lro_ctrl *lro; 6245 6246 lro = &rxq->lro; 6247 tcp_lro_free(lro); 6248 } 6249 #endif /* #ifdef QLNX_SOFT_LRO */ 6250 6251 return; 6252 } 6253 6254 static int 6255 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6256 { 6257 register struct mbuf *mp; 6258 uint16_t rx_buf_size; 6259 struct sw_rx_data *sw_rx_data; 6260 struct eth_rx_bd *rx_bd; 6261 dma_addr_t dma_addr; 6262 bus_dmamap_t map; 6263 bus_dma_segment_t segs[1]; 6264 int nsegs; 6265 int ret; 6266 struct ecore_dev *cdev; 6267 6268 cdev = &ha->cdev; 6269 6270 rx_buf_size = rxq->rx_buf_size; 6271 6272 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6273 6274 if (mp == NULL) { 6275 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6276 return -ENOMEM; 6277 } 6278 6279 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6280 6281 map = (bus_dmamap_t)0; 6282 6283 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6284 BUS_DMA_NOWAIT); 6285 dma_addr = segs[0].ds_addr; 6286 6287 if (ret || !dma_addr || (nsegs != 1)) { 6288 m_freem(mp); 6289 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6290 ret, (long long unsigned int)dma_addr, nsegs); 6291 return -ENOMEM; 6292 } 6293 6294 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6295 sw_rx_data->data = mp; 6296 sw_rx_data->dma_addr = dma_addr; 6297 sw_rx_data->map = map; 6298 6299 /* Advance PROD and get BD pointer */ 6300 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); 6301 rx_bd->addr.hi = htole32(U64_HI(dma_addr)); 6302 rx_bd->addr.lo = htole32(U64_LO(dma_addr)); 6303 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6304 6305 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6306 6307 return 0; 6308 } 6309 6310 static int 6311 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 6312 struct qlnx_agg_info *tpa) 6313 { 6314 struct mbuf *mp; 6315 dma_addr_t dma_addr; 6316 bus_dmamap_t map; 6317 bus_dma_segment_t segs[1]; 6318 int nsegs; 6319 int ret; 6320 struct sw_rx_data *rx_buf; 6321 6322 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6323 6324 if (mp == NULL) { 6325 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6326 return -ENOMEM; 6327 } 6328 6329 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6330 6331 map = (bus_dmamap_t)0; 6332 6333 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6334 BUS_DMA_NOWAIT); 6335 dma_addr = segs[0].ds_addr; 6336 6337 if (ret || !dma_addr || (nsegs != 1)) { 6338 m_freem(mp); 6339 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6340 ret, (long long unsigned int)dma_addr, nsegs); 6341 return -ENOMEM; 6342 } 6343 6344 rx_buf = &tpa->rx_buf; 6345 6346 memset(rx_buf, 0, sizeof (struct sw_rx_data)); 6347 6348 rx_buf->data = mp; 6349 rx_buf->dma_addr = dma_addr; 6350 rx_buf->map = map; 6351 6352 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6353 6354 return (0); 6355 } 6356 6357 static void 6358 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa) 6359 { 6360 struct sw_rx_data *rx_buf; 6361 6362 rx_buf = &tpa->rx_buf; 6363 6364 if (rx_buf->data != NULL) { 6365 if (rx_buf->map != NULL) { 6366 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 6367 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 6368 rx_buf->map = NULL; 6369 } 6370 m_freem(rx_buf->data); 6371 rx_buf->data = NULL; 6372 } 6373 return; 6374 } 6375 6376 /* This function allocates all memory needed per Rx queue */ 6377 static int 6378 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6379 { 6380 int i, rc, num_allocated; 6381 struct ifnet *ifp; 6382 struct ecore_dev *cdev; 6383 6384 cdev = &ha->cdev; 6385 ifp = ha->ifp; 6386 6387 rxq->num_rx_buffers = RX_RING_SIZE; 6388 6389 rxq->rx_buf_size = ha->rx_buf_size; 6390 6391 /* Allocate the parallel driver ring for Rx buffers */ 6392 bzero((void *)&rxq->sw_rx_ring[0], 6393 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 6394 6395 /* Allocate FW Rx ring */ 6396 6397 rc = ecore_chain_alloc(cdev, 6398 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6399 ECORE_CHAIN_MODE_NEXT_PTR, 6400 ECORE_CHAIN_CNT_TYPE_U16, 6401 RX_RING_SIZE, 6402 sizeof(struct eth_rx_bd), 6403 &rxq->rx_bd_ring, NULL); 6404 6405 if (rc) 6406 goto err; 6407 6408 /* Allocate FW completion ring */ 6409 rc = ecore_chain_alloc(cdev, 6410 ECORE_CHAIN_USE_TO_CONSUME, 6411 ECORE_CHAIN_MODE_PBL, 6412 ECORE_CHAIN_CNT_TYPE_U16, 6413 RX_RING_SIZE, 6414 sizeof(union eth_rx_cqe), 6415 &rxq->rx_comp_ring, NULL); 6416 6417 if (rc) 6418 goto err; 6419 6420 /* Allocate buffers for the Rx ring */ 6421 6422 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 6423 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size, 6424 &rxq->tpa_info[i]); 6425 if (rc) 6426 break; 6427 6428 } 6429 6430 for (i = 0; i < rxq->num_rx_buffers; i++) { 6431 rc = qlnx_alloc_rx_buffer(ha, rxq); 6432 if (rc) 6433 break; 6434 } 6435 num_allocated = i; 6436 if (!num_allocated) { 6437 QL_DPRINT1(ha, "Rx buffers allocation failed\n"); 6438 goto err; 6439 } else if (num_allocated < rxq->num_rx_buffers) { 6440 QL_DPRINT1(ha, "Allocated less buffers than" 6441 " desired (%d allocated)\n", num_allocated); 6442 } 6443 6444 #ifdef QLNX_SOFT_LRO 6445 6446 { 6447 struct lro_ctrl *lro; 6448 6449 lro = &rxq->lro; 6450 6451 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 6452 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) { 6453 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 6454 rxq->rxq_id); 6455 goto err; 6456 } 6457 #else 6458 if (tcp_lro_init(lro)) { 6459 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 6460 rxq->rxq_id); 6461 goto err; 6462 } 6463 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 6464 6465 lro->ifp = ha->ifp; 6466 } 6467 #endif /* #ifdef QLNX_SOFT_LRO */ 6468 return 0; 6469 6470 err: 6471 qlnx_free_mem_rxq(ha, rxq); 6472 return -ENOMEM; 6473 } 6474 6475 6476 static void 6477 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6478 struct qlnx_tx_queue *txq) 6479 { 6480 struct ecore_dev *cdev; 6481 6482 cdev = &ha->cdev; 6483 6484 bzero((void *)&txq->sw_tx_ring[0], 6485 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6486 6487 /* Free the real RQ ring used by FW */ 6488 if (txq->tx_pbl.p_virt_addr) { 6489 ecore_chain_free(cdev, &txq->tx_pbl); 6490 txq->tx_pbl.p_virt_addr = NULL; 6491 } 6492 return; 6493 } 6494 6495 /* This function allocates all memory needed per Tx queue */ 6496 static int 6497 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6498 struct qlnx_tx_queue *txq) 6499 { 6500 int ret = ECORE_SUCCESS; 6501 union eth_tx_bd_types *p_virt; 6502 struct ecore_dev *cdev; 6503 6504 cdev = &ha->cdev; 6505 6506 bzero((void *)&txq->sw_tx_ring[0], 6507 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6508 6509 /* Allocate the real Tx ring to be used by FW */ 6510 ret = ecore_chain_alloc(cdev, 6511 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6512 ECORE_CHAIN_MODE_PBL, 6513 ECORE_CHAIN_CNT_TYPE_U16, 6514 TX_RING_SIZE, 6515 sizeof(*p_virt), 6516 &txq->tx_pbl, NULL); 6517 6518 if (ret != ECORE_SUCCESS) { 6519 goto err; 6520 } 6521 6522 txq->num_tx_buffers = TX_RING_SIZE; 6523 6524 return 0; 6525 6526 err: 6527 qlnx_free_mem_txq(ha, fp, txq); 6528 return -ENOMEM; 6529 } 6530 6531 static void 6532 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6533 { 6534 struct mbuf *mp; 6535 struct ifnet *ifp = ha->ifp; 6536 6537 if (mtx_initialized(&fp->tx_mtx)) { 6538 6539 if (fp->tx_br != NULL) { 6540 6541 mtx_lock(&fp->tx_mtx); 6542 6543 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 6544 fp->tx_pkts_freed++; 6545 m_freem(mp); 6546 } 6547 6548 mtx_unlock(&fp->tx_mtx); 6549 6550 buf_ring_free(fp->tx_br, M_DEVBUF); 6551 fp->tx_br = NULL; 6552 } 6553 mtx_destroy(&fp->tx_mtx); 6554 } 6555 return; 6556 } 6557 6558 static void 6559 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6560 { 6561 int tc; 6562 6563 qlnx_free_mem_sb(ha, fp->sb_info); 6564 6565 qlnx_free_mem_rxq(ha, fp->rxq); 6566 6567 for (tc = 0; tc < ha->num_tc; tc++) 6568 qlnx_free_mem_txq(ha, fp, fp->txq[tc]); 6569 6570 return; 6571 } 6572 6573 static int 6574 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6575 { 6576 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 6577 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id); 6578 6579 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 6580 6581 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF, 6582 M_NOWAIT, &fp->tx_mtx); 6583 if (fp->tx_br == NULL) { 6584 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n", 6585 ha->dev_unit, fp->rss_id); 6586 return -ENOMEM; 6587 } 6588 return 0; 6589 } 6590 6591 static int 6592 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6593 { 6594 int rc, tc; 6595 6596 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id); 6597 if (rc) 6598 goto err; 6599 6600 if (ha->rx_jumbo_buf_eq_mtu) { 6601 if (ha->max_frame_size <= MCLBYTES) 6602 ha->rx_buf_size = MCLBYTES; 6603 else if (ha->max_frame_size <= MJUMPAGESIZE) 6604 ha->rx_buf_size = MJUMPAGESIZE; 6605 else if (ha->max_frame_size <= MJUM9BYTES) 6606 ha->rx_buf_size = MJUM9BYTES; 6607 else if (ha->max_frame_size <= MJUM16BYTES) 6608 ha->rx_buf_size = MJUM16BYTES; 6609 } else { 6610 if (ha->max_frame_size <= MCLBYTES) 6611 ha->rx_buf_size = MCLBYTES; 6612 else 6613 ha->rx_buf_size = MJUMPAGESIZE; 6614 } 6615 6616 rc = qlnx_alloc_mem_rxq(ha, fp->rxq); 6617 if (rc) 6618 goto err; 6619 6620 for (tc = 0; tc < ha->num_tc; tc++) { 6621 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]); 6622 if (rc) 6623 goto err; 6624 } 6625 6626 return 0; 6627 6628 err: 6629 qlnx_free_mem_fp(ha, fp); 6630 return -ENOMEM; 6631 } 6632 6633 static void 6634 qlnx_free_mem_load(qlnx_host_t *ha) 6635 { 6636 int i; 6637 struct ecore_dev *cdev; 6638 6639 cdev = &ha->cdev; 6640 6641 for (i = 0; i < ha->num_rss; i++) { 6642 struct qlnx_fastpath *fp = &ha->fp_array[i]; 6643 6644 qlnx_free_mem_fp(ha, fp); 6645 } 6646 return; 6647 } 6648 6649 static int 6650 qlnx_alloc_mem_load(qlnx_host_t *ha) 6651 { 6652 int rc = 0, rss_id; 6653 6654 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 6655 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 6656 6657 rc = qlnx_alloc_mem_fp(ha, fp); 6658 if (rc) 6659 break; 6660 } 6661 return (rc); 6662 } 6663 6664 static int 6665 qlnx_start_vport(struct ecore_dev *cdev, 6666 u8 vport_id, 6667 u16 mtu, 6668 u8 drop_ttl0_flg, 6669 u8 inner_vlan_removal_en_flg, 6670 u8 tx_switching, 6671 u8 hw_lro_enable) 6672 { 6673 int rc, i; 6674 struct ecore_sp_vport_start_params vport_start_params = { 0 }; 6675 qlnx_host_t *ha; 6676 6677 ha = (qlnx_host_t *)cdev; 6678 6679 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg; 6680 vport_start_params.tx_switching = 0; 6681 vport_start_params.handle_ptp_pkts = 0; 6682 vport_start_params.only_untagged = 0; 6683 vport_start_params.drop_ttl0 = drop_ttl0_flg; 6684 6685 vport_start_params.tpa_mode = 6686 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE); 6687 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6688 6689 vport_start_params.vport_id = vport_id; 6690 vport_start_params.mtu = mtu; 6691 6692 6693 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id); 6694 6695 for_each_hwfn(cdev, i) { 6696 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6697 6698 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid; 6699 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6700 6701 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params); 6702 6703 if (rc) { 6704 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d" 6705 " with MTU %d\n" , vport_id, mtu); 6706 return -ENOMEM; 6707 } 6708 6709 ecore_hw_start_fastpath(p_hwfn); 6710 6711 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n", 6712 vport_id, mtu); 6713 } 6714 return 0; 6715 } 6716 6717 6718 static int 6719 qlnx_update_vport(struct ecore_dev *cdev, 6720 struct qlnx_update_vport_params *params) 6721 { 6722 struct ecore_sp_vport_update_params sp_params; 6723 int rc, i, j, fp_index; 6724 struct ecore_hwfn *p_hwfn; 6725 struct ecore_rss_params *rss; 6726 qlnx_host_t *ha = (qlnx_host_t *)cdev; 6727 struct qlnx_fastpath *fp; 6728 6729 memset(&sp_params, 0, sizeof(sp_params)); 6730 /* Translate protocol params into sp params */ 6731 sp_params.vport_id = params->vport_id; 6732 6733 sp_params.update_vport_active_rx_flg = 6734 params->update_vport_active_rx_flg; 6735 sp_params.vport_active_rx_flg = params->vport_active_rx_flg; 6736 6737 sp_params.update_vport_active_tx_flg = 6738 params->update_vport_active_tx_flg; 6739 sp_params.vport_active_tx_flg = params->vport_active_tx_flg; 6740 6741 sp_params.update_inner_vlan_removal_flg = 6742 params->update_inner_vlan_removal_flg; 6743 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; 6744 6745 sp_params.sge_tpa_params = params->sge_tpa_params; 6746 6747 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 6748 * We need to re-fix the rss values per engine for CMT. 6749 */ 6750 if (params->rss_params->update_rss_config) 6751 sp_params.rss_params = params->rss_params; 6752 else 6753 sp_params.rss_params = NULL; 6754 6755 for_each_hwfn(cdev, i) { 6756 6757 p_hwfn = &cdev->hwfns[i]; 6758 6759 if ((cdev->num_hwfns > 1) && 6760 params->rss_params->update_rss_config && 6761 params->rss_params->rss_enable) { 6762 6763 rss = params->rss_params; 6764 6765 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) { 6766 6767 fp_index = ((cdev->num_hwfns * j) + i) % 6768 ha->num_rss; 6769 6770 fp = &ha->fp_array[fp_index]; 6771 rss->rss_ind_table[j] = fp->rxq->handle; 6772 } 6773 6774 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) { 6775 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n", 6776 rss->rss_ind_table[j], 6777 rss->rss_ind_table[j+1], 6778 rss->rss_ind_table[j+2], 6779 rss->rss_ind_table[j+3], 6780 rss->rss_ind_table[j+4], 6781 rss->rss_ind_table[j+5], 6782 rss->rss_ind_table[j+6], 6783 rss->rss_ind_table[j+7]); 6784 j += 8; 6785 } 6786 } 6787 6788 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6789 6790 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id); 6791 6792 rc = ecore_sp_vport_update(p_hwfn, &sp_params, 6793 ECORE_SPQ_MODE_EBLOCK, NULL); 6794 if (rc) { 6795 QL_DPRINT1(ha, "Failed to update VPORT\n"); 6796 return rc; 6797 } 6798 6799 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \ 6800 rx_active_flag %d [tx_update %d], [rx_update %d]\n", 6801 params->vport_id, params->vport_active_tx_flg, 6802 params->vport_active_rx_flg, 6803 params->update_vport_active_tx_flg, 6804 params->update_vport_active_rx_flg); 6805 } 6806 6807 return 0; 6808 } 6809 6810 static void 6811 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq) 6812 { 6813 struct eth_rx_bd *rx_bd_cons = 6814 ecore_chain_consume(&rxq->rx_bd_ring); 6815 struct eth_rx_bd *rx_bd_prod = 6816 ecore_chain_produce(&rxq->rx_bd_ring); 6817 struct sw_rx_data *sw_rx_data_cons = 6818 &rxq->sw_rx_ring[rxq->sw_rx_cons]; 6819 struct sw_rx_data *sw_rx_data_prod = 6820 &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6821 6822 sw_rx_data_prod->data = sw_rx_data_cons->data; 6823 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); 6824 6825 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 6826 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6827 6828 return; 6829 } 6830 6831 static void 6832 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq) 6833 { 6834 6835 uint16_t bd_prod; 6836 uint16_t cqe_prod; 6837 union { 6838 struct eth_rx_prod_data rx_prod_data; 6839 uint32_t data32; 6840 } rx_prods; 6841 6842 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); 6843 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); 6844 6845 /* Update producers */ 6846 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod); 6847 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod); 6848 6849 /* Make sure that the BD and SGE data is updated before updating the 6850 * producers since FW might read the BD/SGE right after the producer 6851 * is updated. 6852 */ 6853 wmb(); 6854 6855 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr, 6856 sizeof(rx_prods), &rx_prods.data32); 6857 6858 /* mmiowb is needed to synchronize doorbell writes from more than one 6859 * processor. It guarantees that the write arrives to the device before 6860 * the napi lock is released and another qlnx_poll is called (possibly 6861 * on another CPU). Without this barrier, the next doorbell can bypass 6862 * this doorbell. This is applicable to IA64/Altix systems. 6863 */ 6864 wmb(); 6865 6866 return; 6867 } 6868 6869 static uint32_t qlnx_hash_key[] = { 6870 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda), 6871 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2), 6872 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d), 6873 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0), 6874 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb), 6875 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4), 6876 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3), 6877 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c), 6878 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b), 6879 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)}; 6880 6881 static int 6882 qlnx_start_queues(qlnx_host_t *ha) 6883 { 6884 int rc, tc, i, vport_id = 0, 6885 drop_ttl0_flg = 1, vlan_removal_en = 1, 6886 tx_switching = 0, hw_lro_enable = 0; 6887 struct ecore_dev *cdev = &ha->cdev; 6888 struct ecore_rss_params *rss_params = &ha->rss_params; 6889 struct qlnx_update_vport_params vport_update_params; 6890 struct ifnet *ifp; 6891 struct ecore_hwfn *p_hwfn; 6892 struct ecore_sge_tpa_params tpa_params; 6893 struct ecore_queue_start_common_params qparams; 6894 struct qlnx_fastpath *fp; 6895 6896 ifp = ha->ifp; 6897 6898 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss); 6899 6900 if (!ha->num_rss) { 6901 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there" 6902 " are no Rx queues\n"); 6903 return -EINVAL; 6904 } 6905 6906 #ifndef QLNX_SOFT_LRO 6907 hw_lro_enable = ifp->if_capenable & IFCAP_LRO; 6908 #endif /* #ifndef QLNX_SOFT_LRO */ 6909 6910 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg, 6911 vlan_removal_en, tx_switching, hw_lro_enable); 6912 6913 if (rc) { 6914 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc); 6915 return rc; 6916 } 6917 6918 QL_DPRINT2(ha, "Start vport ramrod passed, " 6919 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n", 6920 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en); 6921 6922 for_each_rss(i) { 6923 struct ecore_rxq_start_ret_params rx_ret_params; 6924 struct ecore_txq_start_ret_params tx_ret_params; 6925 6926 fp = &ha->fp_array[i]; 6927 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)]; 6928 6929 bzero(&qparams, sizeof(struct ecore_queue_start_common_params)); 6930 bzero(&rx_ret_params, 6931 sizeof (struct ecore_rxq_start_ret_params)); 6932 6933 qparams.queue_id = i ; 6934 qparams.vport_id = vport_id; 6935 qparams.stats_id = vport_id; 6936 qparams.p_sb = fp->sb_info; 6937 qparams.sb_idx = RX_PI; 6938 6939 6940 rc = ecore_eth_rx_queue_start(p_hwfn, 6941 p_hwfn->hw_info.opaque_fid, 6942 &qparams, 6943 fp->rxq->rx_buf_size, /* bd_max_bytes */ 6944 /* bd_chain_phys_addr */ 6945 fp->rxq->rx_bd_ring.p_phys_addr, 6946 /* cqe_pbl_addr */ 6947 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring), 6948 /* cqe_pbl_size */ 6949 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring), 6950 &rx_ret_params); 6951 6952 if (rc) { 6953 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc); 6954 return rc; 6955 } 6956 6957 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod; 6958 fp->rxq->handle = rx_ret_params.p_handle; 6959 fp->rxq->hw_cons_ptr = 6960 &fp->sb_info->sb_virt->pi_array[RX_PI]; 6961 6962 qlnx_update_rx_prod(p_hwfn, fp->rxq); 6963 6964 for (tc = 0; tc < ha->num_tc; tc++) { 6965 struct qlnx_tx_queue *txq = fp->txq[tc]; 6966 6967 bzero(&qparams, 6968 sizeof(struct ecore_queue_start_common_params)); 6969 bzero(&tx_ret_params, 6970 sizeof (struct ecore_txq_start_ret_params)); 6971 6972 qparams.queue_id = txq->index / cdev->num_hwfns ; 6973 qparams.vport_id = vport_id; 6974 qparams.stats_id = vport_id; 6975 qparams.p_sb = fp->sb_info; 6976 qparams.sb_idx = TX_PI(tc); 6977 6978 rc = ecore_eth_tx_queue_start(p_hwfn, 6979 p_hwfn->hw_info.opaque_fid, 6980 &qparams, tc, 6981 /* bd_chain_phys_addr */ 6982 ecore_chain_get_pbl_phys(&txq->tx_pbl), 6983 ecore_chain_get_page_cnt(&txq->tx_pbl), 6984 &tx_ret_params); 6985 6986 if (rc) { 6987 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n", 6988 txq->index, rc); 6989 return rc; 6990 } 6991 6992 txq->doorbell_addr = tx_ret_params.p_doorbell; 6993 txq->handle = tx_ret_params.p_handle; 6994 6995 txq->hw_cons_ptr = 6996 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; 6997 SET_FIELD(txq->tx_db.data.params, 6998 ETH_DB_DATA_DEST, DB_DEST_XCM); 6999 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, 7000 DB_AGG_CMD_SET); 7001 SET_FIELD(txq->tx_db.data.params, 7002 ETH_DB_DATA_AGG_VAL_SEL, 7003 DQ_XCM_ETH_TX_BD_PROD_CMD); 7004 7005 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 7006 } 7007 } 7008 7009 /* Fill struct with RSS params */ 7010 if (ha->num_rss > 1) { 7011 7012 rss_params->update_rss_config = 1; 7013 rss_params->rss_enable = 1; 7014 rss_params->update_rss_capabilities = 1; 7015 rss_params->update_rss_ind_table = 1; 7016 rss_params->update_rss_key = 1; 7017 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | 7018 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; 7019 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */ 7020 7021 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 7022 fp = &ha->fp_array[(i % ha->num_rss)]; 7023 rss_params->rss_ind_table[i] = fp->rxq->handle; 7024 } 7025 7026 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 7027 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i]; 7028 7029 } else { 7030 memset(rss_params, 0, sizeof(*rss_params)); 7031 } 7032 7033 7034 /* Prepare and send the vport enable */ 7035 memset(&vport_update_params, 0, sizeof(vport_update_params)); 7036 vport_update_params.vport_id = vport_id; 7037 vport_update_params.update_vport_active_tx_flg = 1; 7038 vport_update_params.vport_active_tx_flg = 1; 7039 vport_update_params.update_vport_active_rx_flg = 1; 7040 vport_update_params.vport_active_rx_flg = 1; 7041 vport_update_params.rss_params = rss_params; 7042 vport_update_params.update_inner_vlan_removal_flg = 1; 7043 vport_update_params.inner_vlan_removal_flg = 1; 7044 7045 if (hw_lro_enable) { 7046 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params)); 7047 7048 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 7049 7050 tpa_params.update_tpa_en_flg = 1; 7051 tpa_params.tpa_ipv4_en_flg = 1; 7052 tpa_params.tpa_ipv6_en_flg = 1; 7053 7054 tpa_params.update_tpa_param_flg = 1; 7055 tpa_params.tpa_pkt_split_flg = 0; 7056 tpa_params.tpa_hdr_data_split_flg = 0; 7057 tpa_params.tpa_gro_consistent_flg = 0; 7058 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 7059 tpa_params.tpa_max_size = (uint16_t)(-1); 7060 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2; 7061 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2; 7062 7063 vport_update_params.sge_tpa_params = &tpa_params; 7064 } 7065 7066 rc = qlnx_update_vport(cdev, &vport_update_params); 7067 if (rc) { 7068 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc); 7069 return rc; 7070 } 7071 7072 return 0; 7073 } 7074 7075 static int 7076 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 7077 struct qlnx_tx_queue *txq) 7078 { 7079 uint16_t hw_bd_cons; 7080 uint16_t ecore_cons_idx; 7081 7082 QL_DPRINT2(ha, "enter\n"); 7083 7084 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 7085 7086 while (hw_bd_cons != 7087 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 7088 7089 mtx_lock(&fp->tx_mtx); 7090 7091 (void)qlnx_tx_int(ha, fp, txq); 7092 7093 mtx_unlock(&fp->tx_mtx); 7094 7095 qlnx_mdelay(__func__, 2); 7096 7097 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 7098 } 7099 7100 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index); 7101 7102 return 0; 7103 } 7104 7105 static int 7106 qlnx_stop_queues(qlnx_host_t *ha) 7107 { 7108 struct qlnx_update_vport_params vport_update_params; 7109 struct ecore_dev *cdev; 7110 struct qlnx_fastpath *fp; 7111 int rc, tc, i; 7112 7113 cdev = &ha->cdev; 7114 7115 /* Disable the vport */ 7116 7117 memset(&vport_update_params, 0, sizeof(vport_update_params)); 7118 7119 vport_update_params.vport_id = 0; 7120 vport_update_params.update_vport_active_tx_flg = 1; 7121 vport_update_params.vport_active_tx_flg = 0; 7122 vport_update_params.update_vport_active_rx_flg = 1; 7123 vport_update_params.vport_active_rx_flg = 0; 7124 vport_update_params.rss_params = &ha->rss_params; 7125 vport_update_params.rss_params->update_rss_config = 0; 7126 vport_update_params.rss_params->rss_enable = 0; 7127 vport_update_params.update_inner_vlan_removal_flg = 0; 7128 vport_update_params.inner_vlan_removal_flg = 0; 7129 7130 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id); 7131 7132 rc = qlnx_update_vport(cdev, &vport_update_params); 7133 if (rc) { 7134 QL_DPRINT1(ha, "Failed to update vport\n"); 7135 return rc; 7136 } 7137 7138 /* Flush Tx queues. If needed, request drain from MCP */ 7139 for_each_rss(i) { 7140 fp = &ha->fp_array[i]; 7141 7142 for (tc = 0; tc < ha->num_tc; tc++) { 7143 struct qlnx_tx_queue *txq = fp->txq[tc]; 7144 7145 rc = qlnx_drain_txq(ha, fp, txq); 7146 if (rc) 7147 return rc; 7148 } 7149 } 7150 7151 /* Stop all Queues in reverse order*/ 7152 for (i = ha->num_rss - 1; i >= 0; i--) { 7153 7154 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)]; 7155 7156 fp = &ha->fp_array[i]; 7157 7158 /* Stop the Tx Queue(s)*/ 7159 for (tc = 0; tc < ha->num_tc; tc++) { 7160 int tx_queue_id; 7161 7162 tx_queue_id = tc * ha->num_rss + i; 7163 rc = ecore_eth_tx_queue_stop(p_hwfn, 7164 fp->txq[tc]->handle); 7165 7166 if (rc) { 7167 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n", 7168 tx_queue_id); 7169 return rc; 7170 } 7171 } 7172 7173 /* Stop the Rx Queue*/ 7174 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false, 7175 false); 7176 if (rc) { 7177 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i); 7178 return rc; 7179 } 7180 } 7181 7182 /* Stop the vport */ 7183 for_each_hwfn(cdev, i) { 7184 7185 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 7186 7187 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0); 7188 7189 if (rc) { 7190 QL_DPRINT1(ha, "Failed to stop VPORT\n"); 7191 return rc; 7192 } 7193 } 7194 7195 return rc; 7196 } 7197 7198 static int 7199 qlnx_set_ucast_rx_mac(qlnx_host_t *ha, 7200 enum ecore_filter_opcode opcode, 7201 unsigned char mac[ETH_ALEN]) 7202 { 7203 struct ecore_filter_ucast ucast; 7204 struct ecore_dev *cdev; 7205 int rc; 7206 7207 cdev = &ha->cdev; 7208 7209 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 7210 7211 ucast.opcode = opcode; 7212 ucast.type = ECORE_FILTER_MAC; 7213 ucast.is_rx_filter = 1; 7214 ucast.vport_to_add_to = 0; 7215 memcpy(&ucast.mac[0], mac, ETH_ALEN); 7216 7217 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 7218 7219 return (rc); 7220 } 7221 7222 static int 7223 qlnx_remove_all_ucast_mac(qlnx_host_t *ha) 7224 { 7225 struct ecore_filter_ucast ucast; 7226 struct ecore_dev *cdev; 7227 int rc; 7228 7229 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 7230 7231 ucast.opcode = ECORE_FILTER_REPLACE; 7232 ucast.type = ECORE_FILTER_MAC; 7233 ucast.is_rx_filter = 1; 7234 7235 cdev = &ha->cdev; 7236 7237 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 7238 7239 return (rc); 7240 } 7241 7242 static int 7243 qlnx_remove_all_mcast_mac(qlnx_host_t *ha) 7244 { 7245 struct ecore_filter_mcast *mcast; 7246 struct ecore_dev *cdev; 7247 int rc, i; 7248 7249 cdev = &ha->cdev; 7250 7251 mcast = &ha->ecore_mcast; 7252 bzero(mcast, sizeof(struct ecore_filter_mcast)); 7253 7254 mcast->opcode = ECORE_FILTER_REMOVE; 7255 7256 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 7257 7258 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] || 7259 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] || 7260 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) { 7261 7262 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN); 7263 mcast->num_mc_addrs++; 7264 } 7265 } 7266 mcast = &ha->ecore_mcast; 7267 7268 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 7269 7270 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS)); 7271 ha->nmcast = 0; 7272 7273 return (rc); 7274 } 7275 7276 static int 7277 qlnx_clean_filters(qlnx_host_t *ha) 7278 { 7279 int rc = 0; 7280 7281 /* Remove all unicast macs */ 7282 rc = qlnx_remove_all_ucast_mac(ha); 7283 if (rc) 7284 return rc; 7285 7286 /* Remove all multicast macs */ 7287 rc = qlnx_remove_all_mcast_mac(ha); 7288 if (rc) 7289 return rc; 7290 7291 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac); 7292 7293 return (rc); 7294 } 7295 7296 static int 7297 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter) 7298 { 7299 struct ecore_filter_accept_flags accept; 7300 int rc = 0; 7301 struct ecore_dev *cdev; 7302 7303 cdev = &ha->cdev; 7304 7305 bzero(&accept, sizeof(struct ecore_filter_accept_flags)); 7306 7307 accept.update_rx_mode_config = 1; 7308 accept.rx_accept_filter = filter; 7309 7310 accept.update_tx_mode_config = 1; 7311 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 7312 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST; 7313 7314 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false, 7315 ECORE_SPQ_MODE_CB, NULL); 7316 7317 return (rc); 7318 } 7319 7320 static int 7321 qlnx_set_rx_mode(qlnx_host_t *ha) 7322 { 7323 int rc = 0; 7324 uint8_t filter; 7325 7326 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac); 7327 if (rc) 7328 return rc; 7329 7330 rc = qlnx_remove_all_mcast_mac(ha); 7331 if (rc) 7332 return rc; 7333 7334 filter = ECORE_ACCEPT_UCAST_MATCHED | 7335 ECORE_ACCEPT_MCAST_MATCHED | 7336 ECORE_ACCEPT_BCAST; 7337 7338 if (qlnx_vf_device(ha) == 0) { 7339 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 7340 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 7341 } 7342 ha->filter = filter; 7343 7344 rc = qlnx_set_rx_accept_filter(ha, filter); 7345 7346 return (rc); 7347 } 7348 7349 static int 7350 qlnx_set_link(qlnx_host_t *ha, bool link_up) 7351 { 7352 int i, rc = 0; 7353 struct ecore_dev *cdev; 7354 struct ecore_hwfn *hwfn; 7355 struct ecore_ptt *ptt; 7356 7357 if (qlnx_vf_device(ha) == 0) 7358 return (0); 7359 7360 cdev = &ha->cdev; 7361 7362 for_each_hwfn(cdev, i) { 7363 7364 hwfn = &cdev->hwfns[i]; 7365 7366 ptt = ecore_ptt_acquire(hwfn); 7367 if (!ptt) 7368 return -EBUSY; 7369 7370 rc = ecore_mcp_set_link(hwfn, ptt, link_up); 7371 7372 ecore_ptt_release(hwfn, ptt); 7373 7374 if (rc) 7375 return rc; 7376 } 7377 return (rc); 7378 } 7379 7380 #if __FreeBSD_version >= 1100000 7381 static uint64_t 7382 qlnx_get_counter(if_t ifp, ift_counter cnt) 7383 { 7384 qlnx_host_t *ha; 7385 uint64_t count; 7386 7387 ha = (qlnx_host_t *)if_getsoftc(ifp); 7388 7389 switch (cnt) { 7390 7391 case IFCOUNTER_IPACKETS: 7392 count = ha->hw_stats.common.rx_ucast_pkts + 7393 ha->hw_stats.common.rx_mcast_pkts + 7394 ha->hw_stats.common.rx_bcast_pkts; 7395 break; 7396 7397 case IFCOUNTER_IERRORS: 7398 count = ha->hw_stats.common.rx_crc_errors + 7399 ha->hw_stats.common.rx_align_errors + 7400 ha->hw_stats.common.rx_oversize_packets + 7401 ha->hw_stats.common.rx_undersize_packets; 7402 break; 7403 7404 case IFCOUNTER_OPACKETS: 7405 count = ha->hw_stats.common.tx_ucast_pkts + 7406 ha->hw_stats.common.tx_mcast_pkts + 7407 ha->hw_stats.common.tx_bcast_pkts; 7408 break; 7409 7410 case IFCOUNTER_OERRORS: 7411 count = ha->hw_stats.common.tx_err_drop_pkts; 7412 break; 7413 7414 case IFCOUNTER_COLLISIONS: 7415 return (0); 7416 7417 case IFCOUNTER_IBYTES: 7418 count = ha->hw_stats.common.rx_ucast_bytes + 7419 ha->hw_stats.common.rx_mcast_bytes + 7420 ha->hw_stats.common.rx_bcast_bytes; 7421 break; 7422 7423 case IFCOUNTER_OBYTES: 7424 count = ha->hw_stats.common.tx_ucast_bytes + 7425 ha->hw_stats.common.tx_mcast_bytes + 7426 ha->hw_stats.common.tx_bcast_bytes; 7427 break; 7428 7429 case IFCOUNTER_IMCASTS: 7430 count = ha->hw_stats.common.rx_mcast_bytes; 7431 break; 7432 7433 case IFCOUNTER_OMCASTS: 7434 count = ha->hw_stats.common.tx_mcast_bytes; 7435 break; 7436 7437 case IFCOUNTER_IQDROPS: 7438 case IFCOUNTER_OQDROPS: 7439 case IFCOUNTER_NOPROTO: 7440 7441 default: 7442 return (if_get_counter_default(ifp, cnt)); 7443 } 7444 return (count); 7445 } 7446 #endif 7447 7448 7449 static void 7450 qlnx_timer(void *arg) 7451 { 7452 qlnx_host_t *ha; 7453 7454 ha = (qlnx_host_t *)arg; 7455 7456 if (ha->error_recovery) { 7457 ha->error_recovery = 0; 7458 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 7459 return; 7460 } 7461 7462 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats); 7463 7464 if (ha->storm_stats_gather) 7465 qlnx_sample_storm_stats(ha); 7466 7467 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7468 7469 return; 7470 } 7471 7472 static int 7473 qlnx_load(qlnx_host_t *ha) 7474 { 7475 int i; 7476 int rc = 0; 7477 struct ecore_dev *cdev; 7478 device_t dev; 7479 7480 cdev = &ha->cdev; 7481 dev = ha->pci_dev; 7482 7483 QL_DPRINT2(ha, "enter\n"); 7484 7485 rc = qlnx_alloc_mem_arrays(ha); 7486 if (rc) 7487 goto qlnx_load_exit0; 7488 7489 qlnx_init_fp(ha); 7490 7491 rc = qlnx_alloc_mem_load(ha); 7492 if (rc) 7493 goto qlnx_load_exit1; 7494 7495 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n", 7496 ha->num_rss, ha->num_tc); 7497 7498 for (i = 0; i < ha->num_rss; i++) { 7499 7500 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq, 7501 (INTR_TYPE_NET | INTR_MPSAFE), 7502 NULL, qlnx_fp_isr, &ha->irq_vec[i], 7503 &ha->irq_vec[i].handle))) { 7504 7505 QL_DPRINT1(ha, "could not setup interrupt\n"); 7506 goto qlnx_load_exit2; 7507 } 7508 7509 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \ 7510 irq %p handle %p\n", i, 7511 ha->irq_vec[i].irq_rid, 7512 ha->irq_vec[i].irq, ha->irq_vec[i].handle); 7513 7514 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus)); 7515 } 7516 7517 rc = qlnx_start_queues(ha); 7518 if (rc) 7519 goto qlnx_load_exit2; 7520 7521 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n"); 7522 7523 /* Add primary mac and set Rx filters */ 7524 rc = qlnx_set_rx_mode(ha); 7525 if (rc) 7526 goto qlnx_load_exit2; 7527 7528 /* Ask for link-up using current configuration */ 7529 qlnx_set_link(ha, true); 7530 7531 if (qlnx_vf_device(ha) == 0) 7532 qlnx_link_update(&ha->cdev.hwfns[0]); 7533 7534 ha->state = QLNX_STATE_OPEN; 7535 7536 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats)); 7537 7538 if (ha->flags.callout_init) 7539 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7540 7541 goto qlnx_load_exit0; 7542 7543 qlnx_load_exit2: 7544 qlnx_free_mem_load(ha); 7545 7546 qlnx_load_exit1: 7547 ha->num_rss = 0; 7548 7549 qlnx_load_exit0: 7550 QL_DPRINT2(ha, "exit [%d]\n", rc); 7551 return rc; 7552 } 7553 7554 static void 7555 qlnx_drain_soft_lro(qlnx_host_t *ha) 7556 { 7557 #ifdef QLNX_SOFT_LRO 7558 7559 struct ifnet *ifp; 7560 int i; 7561 7562 ifp = ha->ifp; 7563 7564 7565 if (ifp->if_capenable & IFCAP_LRO) { 7566 7567 for (i = 0; i < ha->num_rss; i++) { 7568 7569 struct qlnx_fastpath *fp = &ha->fp_array[i]; 7570 struct lro_ctrl *lro; 7571 7572 lro = &fp->rxq->lro; 7573 7574 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 7575 7576 tcp_lro_flush_all(lro); 7577 7578 #else 7579 struct lro_entry *queued; 7580 7581 while ((!SLIST_EMPTY(&lro->lro_active))){ 7582 queued = SLIST_FIRST(&lro->lro_active); 7583 SLIST_REMOVE_HEAD(&lro->lro_active, next); 7584 tcp_lro_flush(lro, queued); 7585 } 7586 7587 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 7588 7589 } 7590 } 7591 7592 #endif /* #ifdef QLNX_SOFT_LRO */ 7593 7594 return; 7595 } 7596 7597 static void 7598 qlnx_unload(qlnx_host_t *ha) 7599 { 7600 struct ecore_dev *cdev; 7601 device_t dev; 7602 int i; 7603 7604 cdev = &ha->cdev; 7605 dev = ha->pci_dev; 7606 7607 QL_DPRINT2(ha, "enter\n"); 7608 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state); 7609 7610 if (ha->state == QLNX_STATE_OPEN) { 7611 7612 qlnx_set_link(ha, false); 7613 qlnx_clean_filters(ha); 7614 qlnx_stop_queues(ha); 7615 ecore_hw_stop_fastpath(cdev); 7616 7617 for (i = 0; i < ha->num_rss; i++) { 7618 if (ha->irq_vec[i].handle) { 7619 (void)bus_teardown_intr(dev, 7620 ha->irq_vec[i].irq, 7621 ha->irq_vec[i].handle); 7622 ha->irq_vec[i].handle = NULL; 7623 } 7624 } 7625 7626 qlnx_drain_fp_taskqueues(ha); 7627 qlnx_drain_soft_lro(ha); 7628 qlnx_free_mem_load(ha); 7629 } 7630 7631 if (ha->flags.callout_init) 7632 callout_drain(&ha->qlnx_callout); 7633 7634 qlnx_mdelay(__func__, 1000); 7635 7636 ha->state = QLNX_STATE_CLOSED; 7637 7638 QL_DPRINT2(ha, "exit\n"); 7639 return; 7640 } 7641 7642 static int 7643 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7644 { 7645 int rval = -1; 7646 struct ecore_hwfn *p_hwfn; 7647 struct ecore_ptt *p_ptt; 7648 7649 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7650 7651 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7652 p_ptt = ecore_ptt_acquire(p_hwfn); 7653 7654 if (!p_ptt) { 7655 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7656 return (rval); 7657 } 7658 7659 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7660 7661 if (rval == DBG_STATUS_OK) 7662 rval = 0; 7663 else { 7664 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed" 7665 "[0x%x]\n", rval); 7666 } 7667 7668 ecore_ptt_release(p_hwfn, p_ptt); 7669 7670 return (rval); 7671 } 7672 7673 static int 7674 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7675 { 7676 int rval = -1; 7677 struct ecore_hwfn *p_hwfn; 7678 struct ecore_ptt *p_ptt; 7679 7680 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7681 7682 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7683 p_ptt = ecore_ptt_acquire(p_hwfn); 7684 7685 if (!p_ptt) { 7686 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7687 return (rval); 7688 } 7689 7690 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7691 7692 if (rval == DBG_STATUS_OK) 7693 rval = 0; 7694 else { 7695 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed" 7696 " [0x%x]\n", rval); 7697 } 7698 7699 ecore_ptt_release(p_hwfn, p_ptt); 7700 7701 return (rval); 7702 } 7703 7704 7705 static void 7706 qlnx_sample_storm_stats(qlnx_host_t *ha) 7707 { 7708 int i, index; 7709 struct ecore_dev *cdev; 7710 qlnx_storm_stats_t *s_stats; 7711 uint32_t reg; 7712 struct ecore_ptt *p_ptt; 7713 struct ecore_hwfn *hwfn; 7714 7715 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) { 7716 ha->storm_stats_gather = 0; 7717 return; 7718 } 7719 7720 cdev = &ha->cdev; 7721 7722 for_each_hwfn(cdev, i) { 7723 7724 hwfn = &cdev->hwfns[i]; 7725 7726 p_ptt = ecore_ptt_acquire(hwfn); 7727 if (!p_ptt) 7728 return; 7729 7730 index = ha->storm_stats_index + 7731 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN); 7732 7733 s_stats = &ha->storm_stats[index]; 7734 7735 /* XSTORM */ 7736 reg = XSEM_REG_FAST_MEMORY + 7737 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7738 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7739 7740 reg = XSEM_REG_FAST_MEMORY + 7741 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7742 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7743 7744 reg = XSEM_REG_FAST_MEMORY + 7745 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7746 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7747 7748 reg = XSEM_REG_FAST_MEMORY + 7749 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7750 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7751 7752 /* YSTORM */ 7753 reg = YSEM_REG_FAST_MEMORY + 7754 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7755 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7756 7757 reg = YSEM_REG_FAST_MEMORY + 7758 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7759 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7760 7761 reg = YSEM_REG_FAST_MEMORY + 7762 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7763 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7764 7765 reg = YSEM_REG_FAST_MEMORY + 7766 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7767 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7768 7769 /* PSTORM */ 7770 reg = PSEM_REG_FAST_MEMORY + 7771 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7772 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7773 7774 reg = PSEM_REG_FAST_MEMORY + 7775 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7776 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7777 7778 reg = PSEM_REG_FAST_MEMORY + 7779 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7780 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7781 7782 reg = PSEM_REG_FAST_MEMORY + 7783 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7784 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7785 7786 /* TSTORM */ 7787 reg = TSEM_REG_FAST_MEMORY + 7788 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7789 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7790 7791 reg = TSEM_REG_FAST_MEMORY + 7792 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7793 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7794 7795 reg = TSEM_REG_FAST_MEMORY + 7796 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7797 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7798 7799 reg = TSEM_REG_FAST_MEMORY + 7800 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7801 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7802 7803 /* MSTORM */ 7804 reg = MSEM_REG_FAST_MEMORY + 7805 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7806 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7807 7808 reg = MSEM_REG_FAST_MEMORY + 7809 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7810 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7811 7812 reg = MSEM_REG_FAST_MEMORY + 7813 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7814 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7815 7816 reg = MSEM_REG_FAST_MEMORY + 7817 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7818 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7819 7820 /* USTORM */ 7821 reg = USEM_REG_FAST_MEMORY + 7822 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7823 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7824 7825 reg = USEM_REG_FAST_MEMORY + 7826 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7827 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7828 7829 reg = USEM_REG_FAST_MEMORY + 7830 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7831 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7832 7833 reg = USEM_REG_FAST_MEMORY + 7834 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7835 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7836 7837 ecore_ptt_release(hwfn, p_ptt); 7838 } 7839 7840 ha->storm_stats_index++; 7841 7842 return; 7843 } 7844 7845 /* 7846 * Name: qlnx_dump_buf8 7847 * Function: dumps a buffer as bytes 7848 */ 7849 static void 7850 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len) 7851 { 7852 device_t dev; 7853 uint32_t i = 0; 7854 uint8_t *buf; 7855 7856 dev = ha->pci_dev; 7857 buf = dbuf; 7858 7859 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len); 7860 7861 while (len >= 16) { 7862 device_printf(dev,"0x%08x:" 7863 " %02x %02x %02x %02x %02x %02x %02x %02x" 7864 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7865 buf[0], buf[1], buf[2], buf[3], 7866 buf[4], buf[5], buf[6], buf[7], 7867 buf[8], buf[9], buf[10], buf[11], 7868 buf[12], buf[13], buf[14], buf[15]); 7869 i += 16; 7870 len -= 16; 7871 buf += 16; 7872 } 7873 switch (len) { 7874 case 1: 7875 device_printf(dev,"0x%08x: %02x\n", i, buf[0]); 7876 break; 7877 case 2: 7878 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]); 7879 break; 7880 case 3: 7881 device_printf(dev,"0x%08x: %02x %02x %02x\n", 7882 i, buf[0], buf[1], buf[2]); 7883 break; 7884 case 4: 7885 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i, 7886 buf[0], buf[1], buf[2], buf[3]); 7887 break; 7888 case 5: 7889 device_printf(dev,"0x%08x:" 7890 " %02x %02x %02x %02x %02x\n", i, 7891 buf[0], buf[1], buf[2], buf[3], buf[4]); 7892 break; 7893 case 6: 7894 device_printf(dev,"0x%08x:" 7895 " %02x %02x %02x %02x %02x %02x\n", i, 7896 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); 7897 break; 7898 case 7: 7899 device_printf(dev,"0x%08x:" 7900 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7901 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); 7902 break; 7903 case 8: 7904 device_printf(dev,"0x%08x:" 7905 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7906 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7907 buf[7]); 7908 break; 7909 case 9: 7910 device_printf(dev,"0x%08x:" 7911 " %02x %02x %02x %02x %02x %02x %02x %02x" 7912 " %02x\n", i, 7913 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7914 buf[7], buf[8]); 7915 break; 7916 case 10: 7917 device_printf(dev,"0x%08x:" 7918 " %02x %02x %02x %02x %02x %02x %02x %02x" 7919 " %02x %02x\n", i, 7920 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7921 buf[7], buf[8], buf[9]); 7922 break; 7923 case 11: 7924 device_printf(dev,"0x%08x:" 7925 " %02x %02x %02x %02x %02x %02x %02x %02x" 7926 " %02x %02x %02x\n", i, 7927 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7928 buf[7], buf[8], buf[9], buf[10]); 7929 break; 7930 case 12: 7931 device_printf(dev,"0x%08x:" 7932 " %02x %02x %02x %02x %02x %02x %02x %02x" 7933 " %02x %02x %02x %02x\n", i, 7934 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7935 buf[7], buf[8], buf[9], buf[10], buf[11]); 7936 break; 7937 case 13: 7938 device_printf(dev,"0x%08x:" 7939 " %02x %02x %02x %02x %02x %02x %02x %02x" 7940 " %02x %02x %02x %02x %02x\n", i, 7941 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7942 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]); 7943 break; 7944 case 14: 7945 device_printf(dev,"0x%08x:" 7946 " %02x %02x %02x %02x %02x %02x %02x %02x" 7947 " %02x %02x %02x %02x %02x %02x\n", i, 7948 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7949 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7950 buf[13]); 7951 break; 7952 case 15: 7953 device_printf(dev,"0x%08x:" 7954 " %02x %02x %02x %02x %02x %02x %02x %02x" 7955 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7956 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7957 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7958 buf[13], buf[14]); 7959 break; 7960 default: 7961 break; 7962 } 7963 7964 device_printf(dev, "%s: %s dump end\n", __func__, msg); 7965 7966 return; 7967 } 7968 7969 #ifdef CONFIG_ECORE_SRIOV 7970 7971 static void 7972 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id) 7973 { 7974 struct ecore_public_vf_info *vf_info; 7975 7976 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false); 7977 7978 if (!vf_info) 7979 return; 7980 7981 /* Clear the VF mac */ 7982 memset(vf_info->forced_mac, 0, ETH_ALEN); 7983 7984 vf_info->forced_vlan = 0; 7985 7986 return; 7987 } 7988 7989 void 7990 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id) 7991 { 7992 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id); 7993 return; 7994 } 7995 7996 static int 7997 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid, 7998 struct ecore_filter_ucast *params) 7999 { 8000 struct ecore_public_vf_info *vf; 8001 8002 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) { 8003 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev), 8004 "VF[%d] vport not initialized\n", vfid); 8005 return ECORE_INVAL; 8006 } 8007 8008 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true); 8009 if (!vf) 8010 return -EINVAL; 8011 8012 /* No real decision to make; Store the configured MAC */ 8013 if (params->type == ECORE_FILTER_MAC || 8014 params->type == ECORE_FILTER_MAC_VLAN) 8015 memcpy(params->mac, vf->forced_mac, ETH_ALEN); 8016 8017 return 0; 8018 } 8019 8020 int 8021 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params) 8022 { 8023 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params)); 8024 } 8025 8026 static int 8027 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid, 8028 struct ecore_sp_vport_update_params *params, uint16_t * tlvs) 8029 { 8030 uint8_t mask; 8031 struct ecore_filter_accept_flags *flags; 8032 8033 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) { 8034 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev), 8035 "VF[%d] vport not initialized\n", vfid); 8036 return ECORE_INVAL; 8037 } 8038 8039 /* Untrusted VFs can't even be trusted to know that fact. 8040 * Simply indicate everything is configured fine, and trace 8041 * configuration 'behind their back'. 8042 */ 8043 mask = ECORE_ACCEPT_UCAST_UNMATCHED | ECORE_ACCEPT_MCAST_UNMATCHED; 8044 flags = ¶ms->accept_flags; 8045 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM))) 8046 return 0; 8047 8048 return 0; 8049 8050 } 8051 int 8052 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs) 8053 { 8054 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs)); 8055 } 8056 8057 static int 8058 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn) 8059 { 8060 int i; 8061 struct ecore_dev *cdev; 8062 8063 cdev = p_hwfn->p_dev; 8064 8065 for (i = 0; i < cdev->num_hwfns; i++) { 8066 if (&cdev->hwfns[i] == p_hwfn) 8067 break; 8068 } 8069 8070 if (i >= cdev->num_hwfns) 8071 return (-1); 8072 8073 return (i); 8074 } 8075 8076 static int 8077 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id) 8078 { 8079 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 8080 int i; 8081 8082 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n", 8083 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id); 8084 8085 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8086 return (-1); 8087 8088 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8089 8090 atomic_testandset_32(&ha->sriov_task[i].flags, 8091 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG); 8092 8093 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 8094 &ha->sriov_task[i].pf_task); 8095 8096 } 8097 8098 return (ECORE_SUCCESS); 8099 } 8100 8101 8102 int 8103 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id) 8104 { 8105 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id)); 8106 } 8107 8108 static void 8109 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn) 8110 { 8111 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 8112 int i; 8113 8114 if (!ha->sriov_initialized) 8115 return; 8116 8117 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 8118 ha, p_hwfn->p_dev, p_hwfn); 8119 8120 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8121 return; 8122 8123 8124 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8125 8126 atomic_testandset_32(&ha->sriov_task[i].flags, 8127 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE); 8128 8129 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 8130 &ha->sriov_task[i].pf_task); 8131 } 8132 8133 return; 8134 } 8135 8136 8137 void 8138 qlnx_vf_flr_update(void *p_hwfn) 8139 { 8140 __qlnx_vf_flr_update(p_hwfn); 8141 8142 return; 8143 } 8144 8145 #ifndef QLNX_VF 8146 8147 static void 8148 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn) 8149 { 8150 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 8151 int i; 8152 8153 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 8154 ha, p_hwfn->p_dev, p_hwfn); 8155 8156 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8157 return; 8158 8159 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n", 8160 ha, p_hwfn->p_dev, p_hwfn, i); 8161 8162 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8163 8164 atomic_testandset_32(&ha->sriov_task[i].flags, 8165 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE); 8166 8167 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 8168 &ha->sriov_task[i].pf_task); 8169 } 8170 } 8171 8172 static void 8173 qlnx_initialize_sriov(qlnx_host_t *ha) 8174 { 8175 device_t dev; 8176 nvlist_t *pf_schema, *vf_schema; 8177 int iov_error; 8178 8179 dev = ha->pci_dev; 8180 8181 pf_schema = pci_iov_schema_alloc_node(); 8182 vf_schema = pci_iov_schema_alloc_node(); 8183 8184 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 8185 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 8186 IOV_SCHEMA_HASDEFAULT, FALSE); 8187 pci_iov_schema_add_bool(vf_schema, "allow-promisc", 8188 IOV_SCHEMA_HASDEFAULT, FALSE); 8189 pci_iov_schema_add_uint16(vf_schema, "num-queues", 8190 IOV_SCHEMA_HASDEFAULT, 1); 8191 8192 iov_error = pci_iov_attach(dev, pf_schema, vf_schema); 8193 8194 if (iov_error != 0) { 8195 ha->sriov_initialized = 0; 8196 } else { 8197 device_printf(dev, "SRIOV initialized\n"); 8198 ha->sriov_initialized = 1; 8199 } 8200 8201 return; 8202 } 8203 8204 static void 8205 qlnx_sriov_disable(qlnx_host_t *ha) 8206 { 8207 struct ecore_dev *cdev; 8208 int i, j; 8209 8210 cdev = &ha->cdev; 8211 8212 ecore_iov_set_vfs_to_disable(cdev, true); 8213 8214 8215 for_each_hwfn(cdev, i) { 8216 8217 struct ecore_hwfn *hwfn = &cdev->hwfns[i]; 8218 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 8219 8220 if (!ptt) { 8221 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 8222 return; 8223 } 8224 /* Clean WFQ db and configure equal weight for all vports */ 8225 ecore_clean_wfq_db(hwfn, ptt); 8226 8227 ecore_for_each_vf(hwfn, j) { 8228 int k = 0; 8229 8230 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false)) 8231 continue; 8232 8233 if (ecore_iov_is_vf_started(hwfn, j)) { 8234 /* Wait until VF is disabled before releasing */ 8235 8236 for (k = 0; k < 100; k++) { 8237 if (!ecore_iov_is_vf_stopped(hwfn, j)) { 8238 qlnx_mdelay(__func__, 10); 8239 } else 8240 break; 8241 } 8242 } 8243 8244 if (k < 100) 8245 ecore_iov_release_hw_for_vf(&cdev->hwfns[i], 8246 ptt, j); 8247 else { 8248 QL_DPRINT1(ha, 8249 "Timeout waiting for VF's FLR to end\n"); 8250 } 8251 } 8252 ecore_ptt_release(hwfn, ptt); 8253 } 8254 8255 ecore_iov_set_vfs_to_disable(cdev, false); 8256 8257 return; 8258 } 8259 8260 8261 static void 8262 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid, 8263 struct ecore_iov_vf_init_params *params) 8264 { 8265 u16 base, i; 8266 8267 /* Since we have an equal resource distribution per-VF, and we assume 8268 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting 8269 * sequentially from there. 8270 */ 8271 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues; 8272 8273 params->rel_vf_id = vfid; 8274 8275 for (i = 0; i < params->num_queues; i++) { 8276 params->req_rx_queue[i] = base + i; 8277 params->req_tx_queue[i] = base + i; 8278 } 8279 8280 /* PF uses indices 0 for itself; Set vport/RSS afterwards */ 8281 params->vport_id = vfid + 1; 8282 params->rss_eng_id = vfid + 1; 8283 8284 return; 8285 } 8286 8287 static int 8288 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params) 8289 { 8290 qlnx_host_t *ha; 8291 struct ecore_dev *cdev; 8292 struct ecore_iov_vf_init_params params; 8293 int ret, j, i; 8294 uint32_t max_vfs; 8295 8296 if ((ha = device_get_softc(dev)) == NULL) { 8297 device_printf(dev, "%s: cannot get softc\n", __func__); 8298 return (-1); 8299 } 8300 8301 if (qlnx_create_pf_taskqueues(ha) != 0) 8302 goto qlnx_iov_init_err0; 8303 8304 cdev = &ha->cdev; 8305 8306 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT); 8307 8308 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n", 8309 dev, num_vfs, max_vfs); 8310 8311 if (num_vfs >= max_vfs) { 8312 QL_DPRINT1(ha, "Can start at most %d VFs\n", 8313 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1)); 8314 goto qlnx_iov_init_err0; 8315 } 8316 8317 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF, 8318 M_NOWAIT); 8319 8320 if (ha->vf_attr == NULL) 8321 goto qlnx_iov_init_err0; 8322 8323 8324 memset(¶ms, 0, sizeof(params)); 8325 8326 /* Initialize HW for VF access */ 8327 for_each_hwfn(cdev, j) { 8328 struct ecore_hwfn *hwfn = &cdev->hwfns[j]; 8329 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 8330 8331 /* Make sure not to use more than 16 queues per VF */ 8332 params.num_queues = min_t(int, 8333 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs), 8334 16); 8335 8336 if (!ptt) { 8337 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 8338 goto qlnx_iov_init_err1; 8339 } 8340 8341 for (i = 0; i < num_vfs; i++) { 8342 8343 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true)) 8344 continue; 8345 8346 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms); 8347 8348 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms); 8349 8350 if (ret) { 8351 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i); 8352 ecore_ptt_release(hwfn, ptt); 8353 goto qlnx_iov_init_err1; 8354 } 8355 } 8356 8357 ecore_ptt_release(hwfn, ptt); 8358 } 8359 8360 ha->num_vfs = num_vfs; 8361 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha); 8362 8363 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs); 8364 8365 return (0); 8366 8367 qlnx_iov_init_err1: 8368 qlnx_sriov_disable(ha); 8369 8370 qlnx_iov_init_err0: 8371 qlnx_destroy_pf_taskqueues(ha); 8372 ha->num_vfs = 0; 8373 8374 return (-1); 8375 } 8376 8377 static void 8378 qlnx_iov_uninit(device_t dev) 8379 { 8380 qlnx_host_t *ha; 8381 8382 if ((ha = device_get_softc(dev)) == NULL) { 8383 device_printf(dev, "%s: cannot get softc\n", __func__); 8384 return; 8385 } 8386 8387 QL_DPRINT2(ha," dev = %p enter\n", dev); 8388 8389 qlnx_sriov_disable(ha); 8390 qlnx_destroy_pf_taskqueues(ha); 8391 8392 free(ha->vf_attr, M_QLNXBUF); 8393 ha->vf_attr = NULL; 8394 8395 ha->num_vfs = 0; 8396 8397 QL_DPRINT2(ha," dev = %p exit\n", dev); 8398 return; 8399 } 8400 8401 static int 8402 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 8403 { 8404 qlnx_host_t *ha; 8405 qlnx_vf_attr_t *vf_attr; 8406 unsigned const char *mac; 8407 size_t size; 8408 struct ecore_hwfn *p_hwfn; 8409 8410 if ((ha = device_get_softc(dev)) == NULL) { 8411 device_printf(dev, "%s: cannot get softc\n", __func__); 8412 return (-1); 8413 } 8414 8415 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum); 8416 8417 if (vfnum > (ha->num_vfs - 1)) { 8418 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n", 8419 vfnum, (ha->num_vfs - 1)); 8420 } 8421 8422 vf_attr = &ha->vf_attr[vfnum]; 8423 8424 if (nvlist_exists_binary(params, "mac-addr")) { 8425 mac = nvlist_get_binary(params, "mac-addr", &size); 8426 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN); 8427 device_printf(dev, 8428 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", 8429 __func__, vf_attr->mac_addr[0], 8430 vf_attr->mac_addr[1], vf_attr->mac_addr[2], 8431 vf_attr->mac_addr[3], vf_attr->mac_addr[4], 8432 vf_attr->mac_addr[5]); 8433 p_hwfn = &ha->cdev.hwfns[0]; 8434 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr, 8435 vfnum); 8436 } 8437 8438 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum); 8439 return (0); 8440 } 8441 8442 static void 8443 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8444 { 8445 uint64_t events[ECORE_VF_ARRAY_LENGTH]; 8446 struct ecore_ptt *ptt; 8447 int i; 8448 8449 ptt = ecore_ptt_acquire(p_hwfn); 8450 if (!ptt) { 8451 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8452 __qlnx_pf_vf_msg(p_hwfn, 0); 8453 return; 8454 } 8455 8456 ecore_iov_pf_get_pending_events(p_hwfn, events); 8457 8458 QL_DPRINT2(ha, "Event mask of VF events:" 8459 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n", 8460 events[0], events[1], events[2]); 8461 8462 ecore_for_each_vf(p_hwfn, i) { 8463 8464 /* Skip VFs with no pending messages */ 8465 if (!(events[i / 64] & (1ULL << (i % 64)))) 8466 continue; 8467 8468 QL_DPRINT2(ha, 8469 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", 8470 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i); 8471 8472 /* Copy VF's message to PF's request buffer for that VF */ 8473 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i)) 8474 continue; 8475 8476 ecore_iov_process_mbx_req(p_hwfn, ptt, i); 8477 } 8478 8479 ecore_ptt_release(p_hwfn, ptt); 8480 8481 return; 8482 } 8483 8484 static void 8485 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8486 { 8487 struct ecore_ptt *ptt; 8488 int ret; 8489 8490 ptt = ecore_ptt_acquire(p_hwfn); 8491 8492 if (!ptt) { 8493 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8494 __qlnx_vf_flr_update(p_hwfn); 8495 return; 8496 } 8497 8498 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt); 8499 8500 if (ret) { 8501 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n"); 8502 } 8503 8504 ecore_ptt_release(p_hwfn, ptt); 8505 8506 return; 8507 } 8508 8509 static void 8510 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8511 { 8512 struct ecore_ptt *ptt; 8513 int i; 8514 8515 ptt = ecore_ptt_acquire(p_hwfn); 8516 8517 if (!ptt) { 8518 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8519 qlnx_vf_bulleting_update(p_hwfn); 8520 return; 8521 } 8522 8523 ecore_for_each_vf(p_hwfn, i) { 8524 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n", 8525 p_hwfn, i); 8526 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt); 8527 } 8528 8529 ecore_ptt_release(p_hwfn, ptt); 8530 8531 return; 8532 } 8533 8534 static void 8535 qlnx_pf_taskqueue(void *context, int pending) 8536 { 8537 struct ecore_hwfn *p_hwfn; 8538 qlnx_host_t *ha; 8539 int i; 8540 8541 p_hwfn = context; 8542 8543 if (p_hwfn == NULL) 8544 return; 8545 8546 ha = (qlnx_host_t *)(p_hwfn->p_dev); 8547 8548 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8549 return; 8550 8551 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8552 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG)) 8553 qlnx_handle_vf_msg(ha, p_hwfn); 8554 8555 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8556 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE)) 8557 qlnx_handle_vf_flr_update(ha, p_hwfn); 8558 8559 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8560 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE)) 8561 qlnx_handle_bulletin_update(ha, p_hwfn); 8562 8563 return; 8564 } 8565 8566 static int 8567 qlnx_create_pf_taskqueues(qlnx_host_t *ha) 8568 { 8569 int i; 8570 uint8_t tq_name[32]; 8571 8572 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8573 8574 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 8575 8576 bzero(tq_name, sizeof (tq_name)); 8577 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i); 8578 8579 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn); 8580 8581 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 8582 taskqueue_thread_enqueue, 8583 &ha->sriov_task[i].pf_taskqueue); 8584 8585 if (ha->sriov_task[i].pf_taskqueue == NULL) 8586 return (-1); 8587 8588 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1, 8589 PI_NET, "%s", tq_name); 8590 8591 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue); 8592 } 8593 8594 return (0); 8595 } 8596 8597 static void 8598 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha) 8599 { 8600 int i; 8601 8602 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8603 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8604 taskqueue_drain(ha->sriov_task[i].pf_taskqueue, 8605 &ha->sriov_task[i].pf_task); 8606 taskqueue_free(ha->sriov_task[i].pf_taskqueue); 8607 ha->sriov_task[i].pf_taskqueue = NULL; 8608 } 8609 } 8610 return; 8611 } 8612 8613 static void 8614 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha) 8615 { 8616 struct ecore_mcp_link_capabilities caps; 8617 struct ecore_mcp_link_params params; 8618 struct ecore_mcp_link_state link; 8619 int i; 8620 8621 if (!p_hwfn->pf_iov_info) 8622 return; 8623 8624 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params)); 8625 memset(&link, 0, sizeof(struct ecore_mcp_link_state)); 8626 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities)); 8627 8628 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 8629 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link)); 8630 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params)); 8631 8632 QL_DPRINT2(ha, "called\n"); 8633 8634 /* Update bulletin of all future possible VFs with link configuration */ 8635 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) { 8636 8637 /* Modify link according to the VF's configured link state */ 8638 8639 link.link_up = false; 8640 8641 if (ha->link_up) { 8642 link.link_up = true; 8643 /* Set speed according to maximum supported by HW. 8644 * that is 40G for regular devices and 100G for CMT 8645 * mode devices. 8646 */ 8647 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ? 8648 100000 : link.speed; 8649 } 8650 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up); 8651 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps); 8652 } 8653 8654 qlnx_vf_bulleting_update(p_hwfn); 8655 8656 return; 8657 } 8658 #endif /* #ifndef QLNX_VF */ 8659 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 8660