1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 29 /* 30 * File: qlnx_os.c 31 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "qlnx_os.h" 38 #include "bcm_osal.h" 39 #include "reg_addr.h" 40 #include "ecore_gtt_reg_addr.h" 41 #include "ecore.h" 42 #include "ecore_chain.h" 43 #include "ecore_status.h" 44 #include "ecore_hw.h" 45 #include "ecore_rt_defs.h" 46 #include "ecore_init_ops.h" 47 #include "ecore_int.h" 48 #include "ecore_cxt.h" 49 #include "ecore_spq.h" 50 #include "ecore_init_fw_funcs.h" 51 #include "ecore_sp_commands.h" 52 #include "ecore_dev_api.h" 53 #include "ecore_l2_api.h" 54 #include "ecore_mcp.h" 55 #include "ecore_hw_defs.h" 56 #include "mcp_public.h" 57 #include "ecore_iro.h" 58 #include "nvm_cfg.h" 59 #include "ecore_dev_api.h" 60 #include "ecore_dbg_fw_funcs.h" 61 #include "ecore_iov_api.h" 62 #include "ecore_vf_api.h" 63 64 #include "qlnx_ioctl.h" 65 #include "qlnx_def.h" 66 #include "qlnx_ver.h" 67 68 #ifdef QLNX_ENABLE_IWARP 69 #include "qlnx_rdma.h" 70 #endif /* #ifdef QLNX_ENABLE_IWARP */ 71 72 #include <sys/smp.h> 73 74 75 /* 76 * static functions 77 */ 78 /* 79 * ioctl related functions 80 */ 81 static void qlnx_add_sysctls(qlnx_host_t *ha); 82 83 /* 84 * main driver 85 */ 86 static void qlnx_release(qlnx_host_t *ha); 87 static void qlnx_fp_isr(void *arg); 88 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha); 89 static void qlnx_init(void *arg); 90 static void qlnx_init_locked(qlnx_host_t *ha); 91 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi); 92 static int qlnx_set_promisc(qlnx_host_t *ha); 93 static int qlnx_set_allmulti(qlnx_host_t *ha); 94 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 95 static int qlnx_media_change(struct ifnet *ifp); 96 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 97 static void qlnx_stop(qlnx_host_t *ha); 98 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, 99 struct mbuf **m_headp); 100 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha); 101 static uint32_t qlnx_get_optics(qlnx_host_t *ha, 102 struct qlnx_link_output *if_link); 103 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp); 104 static int qlnx_transmit_locked(struct ifnet *ifp, struct qlnx_fastpath *fp, 105 struct mbuf *mp); 106 static void qlnx_qflush(struct ifnet *ifp); 107 108 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha); 109 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha); 110 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha); 111 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha); 112 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha); 113 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha); 114 115 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver); 116 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size); 117 118 static int qlnx_nic_setup(struct ecore_dev *cdev, 119 struct ecore_pf_params *func_params); 120 static int qlnx_nic_start(struct ecore_dev *cdev); 121 static int qlnx_slowpath_start(qlnx_host_t *ha); 122 static int qlnx_slowpath_stop(qlnx_host_t *ha); 123 static int qlnx_init_hw(qlnx_host_t *ha); 124 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 125 char ver_str[VER_SIZE]); 126 static void qlnx_unload(qlnx_host_t *ha); 127 static int qlnx_load(qlnx_host_t *ha); 128 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 129 uint32_t add_mac); 130 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, 131 uint32_t len); 132 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq); 133 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq); 134 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, 135 struct qlnx_rx_queue *rxq); 136 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter); 137 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, 138 int hwfn_index); 139 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, 140 int hwfn_index); 141 static void qlnx_timer(void *arg); 142 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 143 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 144 static void qlnx_trigger_dump(qlnx_host_t *ha); 145 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 146 struct qlnx_tx_queue *txq); 147 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 148 struct qlnx_tx_queue *txq); 149 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 150 int lro_enable); 151 static void qlnx_fp_taskqueue(void *context, int pending); 152 static void qlnx_sample_storm_stats(qlnx_host_t *ha); 153 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 154 struct qlnx_agg_info *tpa); 155 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa); 156 157 #if __FreeBSD_version >= 1100000 158 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt); 159 #endif 160 161 162 /* 163 * Hooks to the Operating Systems 164 */ 165 static int qlnx_pci_probe (device_t); 166 static int qlnx_pci_attach (device_t); 167 static int qlnx_pci_detach (device_t); 168 169 #ifndef QLNX_VF 170 171 #ifdef CONFIG_ECORE_SRIOV 172 173 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params); 174 static void qlnx_iov_uninit(device_t dev); 175 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params); 176 static void qlnx_initialize_sriov(qlnx_host_t *ha); 177 static void qlnx_pf_taskqueue(void *context, int pending); 178 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha); 179 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha); 180 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha); 181 182 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 183 184 static device_method_t qlnx_pci_methods[] = { 185 /* Device interface */ 186 DEVMETHOD(device_probe, qlnx_pci_probe), 187 DEVMETHOD(device_attach, qlnx_pci_attach), 188 DEVMETHOD(device_detach, qlnx_pci_detach), 189 190 #ifdef CONFIG_ECORE_SRIOV 191 DEVMETHOD(pci_iov_init, qlnx_iov_init), 192 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit), 193 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf), 194 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 195 { 0, 0 } 196 }; 197 198 static driver_t qlnx_pci_driver = { 199 "ql", qlnx_pci_methods, sizeof (qlnx_host_t), 200 }; 201 202 static devclass_t qlnx_devclass; 203 204 MODULE_VERSION(if_qlnxe,1); 205 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0); 206 207 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1); 208 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1); 209 210 #else 211 212 static device_method_t qlnxv_pci_methods[] = { 213 /* Device interface */ 214 DEVMETHOD(device_probe, qlnx_pci_probe), 215 DEVMETHOD(device_attach, qlnx_pci_attach), 216 DEVMETHOD(device_detach, qlnx_pci_detach), 217 { 0, 0 } 218 }; 219 220 static driver_t qlnxv_pci_driver = { 221 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t), 222 }; 223 224 static devclass_t qlnxv_devclass; 225 MODULE_VERSION(if_qlnxev,1); 226 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, qlnxv_devclass, 0, 0); 227 228 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1); 229 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1); 230 231 #endif /* #ifdef QLNX_VF */ 232 233 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver"); 234 235 char qlnx_dev_str[128]; 236 char qlnx_ver_str[VER_SIZE]; 237 char qlnx_name_str[NAME_SIZE]; 238 239 /* 240 * Some PCI Configuration Space Related Defines 241 */ 242 243 #ifndef PCI_VENDOR_QLOGIC 244 #define PCI_VENDOR_QLOGIC 0x1077 245 #endif 246 247 /* 40G Adapter QLE45xxx*/ 248 #ifndef QLOGIC_PCI_DEVICE_ID_1634 249 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634 250 #endif 251 252 /* 100G Adapter QLE45xxx*/ 253 #ifndef QLOGIC_PCI_DEVICE_ID_1644 254 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644 255 #endif 256 257 /* 25G Adapter QLE45xxx*/ 258 #ifndef QLOGIC_PCI_DEVICE_ID_1656 259 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656 260 #endif 261 262 /* 50G Adapter QLE45xxx*/ 263 #ifndef QLOGIC_PCI_DEVICE_ID_1654 264 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654 265 #endif 266 267 /* 10G/25G/40G Adapter QLE41xxx*/ 268 #ifndef QLOGIC_PCI_DEVICE_ID_8070 269 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070 270 #endif 271 272 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/ 273 #ifndef QLOGIC_PCI_DEVICE_ID_8090 274 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090 275 #endif 276 277 278 279 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 280 "qlnxe driver parameters"); 281 282 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */ 283 static int qlnxe_queue_count = QLNX_DEFAULT_RSS; 284 285 #if __FreeBSD_version < 1100000 286 287 TUNABLE_INT("hw.qlnxe.queue_count", &qlnxe_queue_count); 288 289 #endif 290 291 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 292 &qlnxe_queue_count, 0, "Multi-Queue queue count"); 293 294 295 /* 296 * Note on RDMA personality setting 297 * 298 * Read the personality configured in NVRAM 299 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and 300 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT 301 * use the personality in NVRAM. 302 303 * Otherwise use t the personality configured in sysctl. 304 * 305 */ 306 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */ 307 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */ 308 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */ 309 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */ 310 #define QLNX_PERSONALITY_BITS_PER_FUNC 4 311 #define QLNX_PERSONALIY_MASK 0xF 312 313 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/ 314 static uint64_t qlnxe_rdma_configuration = 0x22222222; 315 316 #if __FreeBSD_version < 1100000 317 318 TUNABLE_QUAD("hw.qlnxe.rdma_configuration", &qlnxe_rdma_configuration); 319 320 SYSCTL_UQUAD(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN, 321 &qlnxe_rdma_configuration, 0, "RDMA Configuration"); 322 323 #else 324 325 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN, 326 &qlnxe_rdma_configuration, 0, "RDMA Configuration"); 327 328 #endif /* #if __FreeBSD_version < 1100000 */ 329 330 int 331 qlnx_vf_device(qlnx_host_t *ha) 332 { 333 uint16_t device_id; 334 335 device_id = ha->device_id; 336 337 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 338 return 0; 339 340 return -1; 341 } 342 343 static int 344 qlnx_valid_device(qlnx_host_t *ha) 345 { 346 uint16_t device_id; 347 348 device_id = ha->device_id; 349 350 #ifndef QLNX_VF 351 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 352 (device_id == QLOGIC_PCI_DEVICE_ID_1644) || 353 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 354 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 355 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 356 return 0; 357 #else 358 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 359 return 0; 360 361 #endif /* #ifndef QLNX_VF */ 362 return -1; 363 } 364 365 #ifdef QLNX_ENABLE_IWARP 366 static int 367 qlnx_rdma_supported(struct qlnx_host *ha) 368 { 369 uint16_t device_id; 370 371 device_id = pci_get_device(ha->pci_dev); 372 373 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 374 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 375 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 376 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 377 return (0); 378 379 return (-1); 380 } 381 #endif /* #ifdef QLNX_ENABLE_IWARP */ 382 383 /* 384 * Name: qlnx_pci_probe 385 * Function: Validate the PCI device to be a QLA80XX device 386 */ 387 static int 388 qlnx_pci_probe(device_t dev) 389 { 390 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d", 391 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); 392 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx"); 393 394 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) { 395 return (ENXIO); 396 } 397 398 switch (pci_get_device(dev)) { 399 400 #ifndef QLNX_VF 401 402 case QLOGIC_PCI_DEVICE_ID_1644: 403 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 404 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function", 405 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 406 QLNX_VERSION_BUILD); 407 device_set_desc_copy(dev, qlnx_dev_str); 408 409 break; 410 411 case QLOGIC_PCI_DEVICE_ID_1634: 412 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 413 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function", 414 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 415 QLNX_VERSION_BUILD); 416 device_set_desc_copy(dev, qlnx_dev_str); 417 418 break; 419 420 case QLOGIC_PCI_DEVICE_ID_1656: 421 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 422 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function", 423 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 424 QLNX_VERSION_BUILD); 425 device_set_desc_copy(dev, qlnx_dev_str); 426 427 break; 428 429 case QLOGIC_PCI_DEVICE_ID_1654: 430 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 431 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function", 432 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 433 QLNX_VERSION_BUILD); 434 device_set_desc_copy(dev, qlnx_dev_str); 435 436 break; 437 438 case QLOGIC_PCI_DEVICE_ID_8070: 439 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 440 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)" 441 " Adapter-Ethernet Function", 442 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 443 QLNX_VERSION_BUILD); 444 device_set_desc_copy(dev, qlnx_dev_str); 445 446 break; 447 448 #else 449 case QLOGIC_PCI_DEVICE_ID_8090: 450 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 451 "Qlogic SRIOV PCI CNA (AH) " 452 "Adapter-Ethernet Function", 453 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 454 QLNX_VERSION_BUILD); 455 device_set_desc_copy(dev, qlnx_dev_str); 456 457 break; 458 459 #endif /* #ifndef QLNX_VF */ 460 461 default: 462 return (ENXIO); 463 } 464 465 #ifdef QLNX_ENABLE_IWARP 466 qlnx_rdma_init(); 467 #endif /* #ifdef QLNX_ENABLE_IWARP */ 468 469 return (BUS_PROBE_DEFAULT); 470 } 471 472 static uint16_t 473 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 474 struct qlnx_tx_queue *txq) 475 { 476 u16 hw_bd_cons; 477 u16 ecore_cons_idx; 478 uint16_t diff; 479 480 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 481 482 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl); 483 if (hw_bd_cons < ecore_cons_idx) { 484 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 485 } else { 486 diff = hw_bd_cons - ecore_cons_idx; 487 } 488 return diff; 489 } 490 491 492 static void 493 qlnx_sp_intr(void *arg) 494 { 495 struct ecore_hwfn *p_hwfn; 496 qlnx_host_t *ha; 497 int i; 498 499 p_hwfn = arg; 500 501 if (p_hwfn == NULL) { 502 printf("%s: spurious slowpath intr\n", __func__); 503 return; 504 } 505 506 ha = (qlnx_host_t *)p_hwfn->p_dev; 507 508 QL_DPRINT2(ha, "enter\n"); 509 510 for (i = 0; i < ha->cdev.num_hwfns; i++) { 511 if (&ha->cdev.hwfns[i] == p_hwfn) { 512 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]); 513 break; 514 } 515 } 516 QL_DPRINT2(ha, "exit\n"); 517 518 return; 519 } 520 521 static void 522 qlnx_sp_taskqueue(void *context, int pending) 523 { 524 struct ecore_hwfn *p_hwfn; 525 526 p_hwfn = context; 527 528 if (p_hwfn != NULL) { 529 qlnx_sp_isr(p_hwfn); 530 } 531 return; 532 } 533 534 static int 535 qlnx_create_sp_taskqueues(qlnx_host_t *ha) 536 { 537 int i; 538 uint8_t tq_name[32]; 539 540 for (i = 0; i < ha->cdev.num_hwfns; i++) { 541 542 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 543 544 bzero(tq_name, sizeof (tq_name)); 545 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i); 546 547 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn); 548 549 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT, 550 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]); 551 552 if (ha->sp_taskqueue[i] == NULL) 553 return (-1); 554 555 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s", 556 tq_name); 557 558 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]); 559 } 560 561 return (0); 562 } 563 564 static void 565 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha) 566 { 567 int i; 568 569 for (i = 0; i < ha->cdev.num_hwfns; i++) { 570 if (ha->sp_taskqueue[i] != NULL) { 571 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]); 572 taskqueue_free(ha->sp_taskqueue[i]); 573 } 574 } 575 return; 576 } 577 578 static void 579 qlnx_fp_taskqueue(void *context, int pending) 580 { 581 struct qlnx_fastpath *fp; 582 qlnx_host_t *ha; 583 struct ifnet *ifp; 584 585 fp = context; 586 587 if (fp == NULL) 588 return; 589 590 ha = (qlnx_host_t *)fp->edev; 591 592 ifp = ha->ifp; 593 594 if(ifp->if_drv_flags & IFF_DRV_RUNNING) { 595 596 if (!drbr_empty(ifp, fp->tx_br)) { 597 598 if(mtx_trylock(&fp->tx_mtx)) { 599 600 #ifdef QLNX_TRACE_PERF_DATA 601 tx_pkts = fp->tx_pkts_transmitted; 602 tx_compl = fp->tx_pkts_completed; 603 #endif 604 605 qlnx_transmit_locked(ifp, fp, NULL); 606 607 #ifdef QLNX_TRACE_PERF_DATA 608 fp->tx_pkts_trans_fp += 609 (fp->tx_pkts_transmitted - tx_pkts); 610 fp->tx_pkts_compl_fp += 611 (fp->tx_pkts_completed - tx_compl); 612 #endif 613 mtx_unlock(&fp->tx_mtx); 614 } 615 } 616 } 617 618 QL_DPRINT2(ha, "exit \n"); 619 return; 620 } 621 622 static int 623 qlnx_create_fp_taskqueues(qlnx_host_t *ha) 624 { 625 int i; 626 uint8_t tq_name[32]; 627 struct qlnx_fastpath *fp; 628 629 for (i = 0; i < ha->num_rss; i++) { 630 631 fp = &ha->fp_array[i]; 632 633 bzero(tq_name, sizeof (tq_name)); 634 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 635 636 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp); 637 638 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 639 taskqueue_thread_enqueue, 640 &fp->fp_taskqueue); 641 642 if (fp->fp_taskqueue == NULL) 643 return (-1); 644 645 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 646 tq_name); 647 648 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue); 649 } 650 651 return (0); 652 } 653 654 static void 655 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha) 656 { 657 int i; 658 struct qlnx_fastpath *fp; 659 660 for (i = 0; i < ha->num_rss; i++) { 661 662 fp = &ha->fp_array[i]; 663 664 if (fp->fp_taskqueue != NULL) { 665 666 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 667 taskqueue_free(fp->fp_taskqueue); 668 fp->fp_taskqueue = NULL; 669 } 670 } 671 return; 672 } 673 674 static void 675 qlnx_drain_fp_taskqueues(qlnx_host_t *ha) 676 { 677 int i; 678 struct qlnx_fastpath *fp; 679 680 for (i = 0; i < ha->num_rss; i++) { 681 fp = &ha->fp_array[i]; 682 683 if (fp->fp_taskqueue != NULL) { 684 QLNX_UNLOCK(ha); 685 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 686 QLNX_LOCK(ha); 687 } 688 } 689 return; 690 } 691 692 static void 693 qlnx_get_params(qlnx_host_t *ha) 694 { 695 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) { 696 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n", 697 qlnxe_queue_count); 698 qlnxe_queue_count = 0; 699 } 700 return; 701 } 702 703 static void 704 qlnx_error_recovery_taskqueue(void *context, int pending) 705 { 706 qlnx_host_t *ha; 707 708 ha = context; 709 710 QL_DPRINT2(ha, "enter\n"); 711 712 QLNX_LOCK(ha); 713 qlnx_stop(ha); 714 QLNX_UNLOCK(ha); 715 716 #ifdef QLNX_ENABLE_IWARP 717 qlnx_rdma_dev_remove(ha); 718 #endif /* #ifdef QLNX_ENABLE_IWARP */ 719 720 qlnx_slowpath_stop(ha); 721 qlnx_slowpath_start(ha); 722 723 #ifdef QLNX_ENABLE_IWARP 724 qlnx_rdma_dev_add(ha); 725 #endif /* #ifdef QLNX_ENABLE_IWARP */ 726 727 qlnx_init(ha); 728 729 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 730 731 QL_DPRINT2(ha, "exit\n"); 732 733 return; 734 } 735 736 static int 737 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha) 738 { 739 uint8_t tq_name[32]; 740 741 bzero(tq_name, sizeof (tq_name)); 742 snprintf(tq_name, sizeof (tq_name), "ql_err_tq"); 743 744 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha); 745 746 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 747 taskqueue_thread_enqueue, &ha->err_taskqueue); 748 749 750 if (ha->err_taskqueue == NULL) 751 return (-1); 752 753 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name); 754 755 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue); 756 757 return (0); 758 } 759 760 static void 761 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha) 762 { 763 if (ha->err_taskqueue != NULL) { 764 taskqueue_drain(ha->err_taskqueue, &ha->err_task); 765 taskqueue_free(ha->err_taskqueue); 766 } 767 768 ha->err_taskqueue = NULL; 769 770 return; 771 } 772 773 /* 774 * Name: qlnx_pci_attach 775 * Function: attaches the device to the operating system 776 */ 777 static int 778 qlnx_pci_attach(device_t dev) 779 { 780 qlnx_host_t *ha = NULL; 781 uint32_t rsrc_len_reg = 0; 782 uint32_t rsrc_len_dbells = 0; 783 uint32_t rsrc_len_msix = 0; 784 int i; 785 uint32_t mfw_ver; 786 uint32_t num_sp_msix = 0; 787 uint32_t num_rdma_irqs = 0; 788 789 if ((ha = device_get_softc(dev)) == NULL) { 790 device_printf(dev, "cannot get softc\n"); 791 return (ENOMEM); 792 } 793 794 memset(ha, 0, sizeof (qlnx_host_t)); 795 796 ha->device_id = pci_get_device(dev); 797 798 if (qlnx_valid_device(ha) != 0) { 799 device_printf(dev, "device is not valid device\n"); 800 return (ENXIO); 801 } 802 ha->pci_func = pci_get_function(dev); 803 804 ha->pci_dev = dev; 805 806 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 807 808 ha->flags.lock_init = 1; 809 810 pci_enable_busmaster(dev); 811 812 /* 813 * map the PCI BARs 814 */ 815 816 ha->reg_rid = PCIR_BAR(0); 817 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 818 RF_ACTIVE); 819 820 if (ha->pci_reg == NULL) { 821 device_printf(dev, "unable to map BAR0\n"); 822 goto qlnx_pci_attach_err; 823 } 824 825 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 826 ha->reg_rid); 827 828 ha->dbells_rid = PCIR_BAR(2); 829 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, 830 SYS_RES_MEMORY, 831 ha->dbells_rid); 832 if (rsrc_len_dbells) { 833 834 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 835 &ha->dbells_rid, RF_ACTIVE); 836 837 if (ha->pci_dbells == NULL) { 838 device_printf(dev, "unable to map BAR1\n"); 839 goto qlnx_pci_attach_err; 840 } 841 ha->dbells_phys_addr = (uint64_t) 842 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid); 843 844 ha->dbells_size = rsrc_len_dbells; 845 } else { 846 if (qlnx_vf_device(ha) != 0) { 847 device_printf(dev, " BAR1 size is zero\n"); 848 goto qlnx_pci_attach_err; 849 } 850 } 851 852 ha->msix_rid = PCIR_BAR(4); 853 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 854 &ha->msix_rid, RF_ACTIVE); 855 856 if (ha->msix_bar == NULL) { 857 device_printf(dev, "unable to map BAR2\n"); 858 goto qlnx_pci_attach_err; 859 } 860 861 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 862 ha->msix_rid); 863 864 ha->dbg_level = 0x0000; 865 866 QL_DPRINT1(ha, "\n\t\t\t" 867 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 868 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 869 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 870 " msix_avail = 0x%x " 871 "\n\t\t\t[ncpus = %d]\n", 872 ha->pci_dev, ha->pci_reg, rsrc_len_reg, 873 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 874 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 875 mp_ncpus); 876 /* 877 * allocate dma tags 878 */ 879 880 if (qlnx_alloc_parent_dma_tag(ha)) 881 goto qlnx_pci_attach_err; 882 883 if (qlnx_alloc_tx_dma_tag(ha)) 884 goto qlnx_pci_attach_err; 885 886 if (qlnx_alloc_rx_dma_tag(ha)) 887 goto qlnx_pci_attach_err; 888 889 890 if (qlnx_init_hw(ha) != 0) 891 goto qlnx_pci_attach_err; 892 893 ha->flags.hw_init = 1; 894 895 qlnx_get_params(ha); 896 897 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) && 898 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) { 899 qlnxe_queue_count = QLNX_MAX_RSS; 900 } 901 902 /* 903 * Allocate MSI-x vectors 904 */ 905 if (qlnx_vf_device(ha) != 0) { 906 907 if (qlnxe_queue_count == 0) 908 ha->num_rss = QLNX_DEFAULT_RSS; 909 else 910 ha->num_rss = qlnxe_queue_count; 911 912 num_sp_msix = ha->cdev.num_hwfns; 913 } else { 914 uint8_t max_rxq; 915 uint8_t max_txq; 916 917 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq); 918 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq); 919 920 if (max_rxq < max_txq) 921 ha->num_rss = max_rxq; 922 else 923 ha->num_rss = max_txq; 924 925 if (ha->num_rss > QLNX_MAX_VF_RSS) 926 ha->num_rss = QLNX_MAX_VF_RSS; 927 928 num_sp_msix = 0; 929 } 930 931 if (ha->num_rss > mp_ncpus) 932 ha->num_rss = mp_ncpus; 933 934 ha->num_tc = QLNX_MAX_TC; 935 936 ha->msix_count = pci_msix_count(dev); 937 938 #ifdef QLNX_ENABLE_IWARP 939 940 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha); 941 942 #endif /* #ifdef QLNX_ENABLE_IWARP */ 943 944 if (!ha->msix_count || 945 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) { 946 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 947 ha->msix_count); 948 goto qlnx_pci_attach_err; 949 } 950 951 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs)) 952 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs; 953 else 954 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs); 955 956 QL_DPRINT1(ha, "\n\t\t\t" 957 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 958 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 959 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 960 " msix_avail = 0x%x msix_alloc = 0x%x" 961 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n", 962 ha->pci_reg, rsrc_len_reg, 963 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 964 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 965 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc); 966 967 if (pci_alloc_msix(dev, &ha->msix_count)) { 968 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__, 969 ha->msix_count); 970 ha->msix_count = 0; 971 goto qlnx_pci_attach_err; 972 } 973 974 /* 975 * Initialize slow path interrupt and task queue 976 */ 977 978 if (num_sp_msix) { 979 980 if (qlnx_create_sp_taskqueues(ha) != 0) 981 goto qlnx_pci_attach_err; 982 983 for (i = 0; i < ha->cdev.num_hwfns; i++) { 984 985 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 986 987 ha->sp_irq_rid[i] = i + 1; 988 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, 989 &ha->sp_irq_rid[i], 990 (RF_ACTIVE | RF_SHAREABLE)); 991 if (ha->sp_irq[i] == NULL) { 992 device_printf(dev, 993 "could not allocate mbx interrupt\n"); 994 goto qlnx_pci_attach_err; 995 } 996 997 if (bus_setup_intr(dev, ha->sp_irq[i], 998 (INTR_TYPE_NET | INTR_MPSAFE), NULL, 999 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) { 1000 device_printf(dev, 1001 "could not setup slow path interrupt\n"); 1002 goto qlnx_pci_attach_err; 1003 } 1004 1005 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d" 1006 " sp_irq %p sp_handle %p\n", p_hwfn, 1007 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]); 1008 } 1009 } 1010 1011 /* 1012 * initialize fast path interrupt 1013 */ 1014 if (qlnx_create_fp_taskqueues(ha) != 0) 1015 goto qlnx_pci_attach_err; 1016 1017 for (i = 0; i < ha->num_rss; i++) { 1018 ha->irq_vec[i].rss_idx = i; 1019 ha->irq_vec[i].ha = ha; 1020 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i; 1021 1022 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1023 &ha->irq_vec[i].irq_rid, 1024 (RF_ACTIVE | RF_SHAREABLE)); 1025 1026 if (ha->irq_vec[i].irq == NULL) { 1027 device_printf(dev, 1028 "could not allocate interrupt[%d] irq_rid = %d\n", 1029 i, ha->irq_vec[i].irq_rid); 1030 goto qlnx_pci_attach_err; 1031 } 1032 1033 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) { 1034 device_printf(dev, "could not allocate tx_br[%d]\n", i); 1035 goto qlnx_pci_attach_err; 1036 1037 } 1038 } 1039 1040 1041 if (qlnx_vf_device(ha) != 0) { 1042 1043 callout_init(&ha->qlnx_callout, 1); 1044 ha->flags.callout_init = 1; 1045 1046 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1047 1048 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0) 1049 goto qlnx_pci_attach_err; 1050 if (ha->grcdump_size[i] == 0) 1051 goto qlnx_pci_attach_err; 1052 1053 ha->grcdump_size[i] = ha->grcdump_size[i] << 2; 1054 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n", 1055 i, ha->grcdump_size[i]); 1056 1057 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]); 1058 if (ha->grcdump[i] == NULL) { 1059 device_printf(dev, "grcdump alloc[%d] failed\n", i); 1060 goto qlnx_pci_attach_err; 1061 } 1062 1063 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0) 1064 goto qlnx_pci_attach_err; 1065 if (ha->idle_chk_size[i] == 0) 1066 goto qlnx_pci_attach_err; 1067 1068 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2; 1069 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n", 1070 i, ha->idle_chk_size[i]); 1071 1072 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]); 1073 1074 if (ha->idle_chk[i] == NULL) { 1075 device_printf(dev, "idle_chk alloc failed\n"); 1076 goto qlnx_pci_attach_err; 1077 } 1078 } 1079 1080 if (qlnx_create_error_recovery_taskqueue(ha) != 0) 1081 goto qlnx_pci_attach_err; 1082 } 1083 1084 if (qlnx_slowpath_start(ha) != 0) 1085 goto qlnx_pci_attach_err; 1086 else 1087 ha->flags.slowpath_start = 1; 1088 1089 if (qlnx_vf_device(ha) != 0) { 1090 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) { 1091 qlnx_mdelay(__func__, 1000); 1092 qlnx_trigger_dump(ha); 1093 1094 goto qlnx_pci_attach_err0; 1095 } 1096 1097 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) { 1098 qlnx_mdelay(__func__, 1000); 1099 qlnx_trigger_dump(ha); 1100 1101 goto qlnx_pci_attach_err0; 1102 } 1103 } else { 1104 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 1105 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL); 1106 } 1107 1108 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d", 1109 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF), 1110 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF)); 1111 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d", 1112 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, 1113 FW_ENGINEERING_VERSION); 1114 1115 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n", 1116 ha->stormfw_ver, ha->mfw_ver); 1117 1118 qlnx_init_ifnet(dev, ha); 1119 1120 /* 1121 * add sysctls 1122 */ 1123 qlnx_add_sysctls(ha); 1124 1125 qlnx_pci_attach_err0: 1126 /* 1127 * create ioctl device interface 1128 */ 1129 if (qlnx_vf_device(ha) != 0) { 1130 1131 if (qlnx_make_cdev(ha)) { 1132 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 1133 goto qlnx_pci_attach_err; 1134 } 1135 1136 #ifdef QLNX_ENABLE_IWARP 1137 qlnx_rdma_dev_add(ha); 1138 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1139 } 1140 1141 #ifndef QLNX_VF 1142 #ifdef CONFIG_ECORE_SRIOV 1143 1144 if (qlnx_vf_device(ha) != 0) 1145 qlnx_initialize_sriov(ha); 1146 1147 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1148 #endif /* #ifdef QLNX_VF */ 1149 1150 QL_DPRINT2(ha, "success\n"); 1151 1152 return (0); 1153 1154 qlnx_pci_attach_err: 1155 1156 qlnx_release(ha); 1157 1158 return (ENXIO); 1159 } 1160 1161 /* 1162 * Name: qlnx_pci_detach 1163 * Function: Unhooks the device from the operating system 1164 */ 1165 static int 1166 qlnx_pci_detach(device_t dev) 1167 { 1168 qlnx_host_t *ha = NULL; 1169 1170 if ((ha = device_get_softc(dev)) == NULL) { 1171 device_printf(dev, "%s: cannot get softc\n", __func__); 1172 return (ENOMEM); 1173 } 1174 1175 if (qlnx_vf_device(ha) != 0) { 1176 #ifdef CONFIG_ECORE_SRIOV 1177 int ret; 1178 1179 ret = pci_iov_detach(dev); 1180 if (ret) { 1181 device_printf(dev, "%s: SRIOV in use\n", __func__); 1182 return (ret); 1183 } 1184 1185 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1186 1187 #ifdef QLNX_ENABLE_IWARP 1188 if (qlnx_rdma_dev_remove(ha) != 0) 1189 return (EBUSY); 1190 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1191 } 1192 1193 QLNX_LOCK(ha); 1194 qlnx_stop(ha); 1195 QLNX_UNLOCK(ha); 1196 1197 qlnx_release(ha); 1198 1199 return (0); 1200 } 1201 1202 #ifdef QLNX_ENABLE_IWARP 1203 1204 static uint8_t 1205 qlnx_get_personality(uint8_t pci_func) 1206 { 1207 uint8_t personality; 1208 1209 personality = (qlnxe_rdma_configuration >> 1210 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) & 1211 QLNX_PERSONALIY_MASK; 1212 return (personality); 1213 } 1214 1215 static void 1216 qlnx_set_personality(qlnx_host_t *ha) 1217 { 1218 struct ecore_hwfn *p_hwfn; 1219 uint8_t personality; 1220 1221 p_hwfn = &ha->cdev.hwfns[0]; 1222 1223 personality = qlnx_get_personality(ha->pci_func); 1224 1225 switch (personality) { 1226 1227 case QLNX_PERSONALITY_DEFAULT: 1228 device_printf(ha->pci_dev, "%s: DEFAULT\n", 1229 __func__); 1230 ha->personality = ECORE_PCI_DEFAULT; 1231 break; 1232 1233 case QLNX_PERSONALITY_ETH_ONLY: 1234 device_printf(ha->pci_dev, "%s: ETH_ONLY\n", 1235 __func__); 1236 ha->personality = ECORE_PCI_ETH; 1237 break; 1238 1239 case QLNX_PERSONALITY_ETH_IWARP: 1240 device_printf(ha->pci_dev, "%s: ETH_IWARP\n", 1241 __func__); 1242 ha->personality = ECORE_PCI_ETH_IWARP; 1243 break; 1244 1245 case QLNX_PERSONALITY_ETH_ROCE: 1246 device_printf(ha->pci_dev, "%s: ETH_ROCE\n", 1247 __func__); 1248 ha->personality = ECORE_PCI_ETH_ROCE; 1249 break; 1250 } 1251 1252 return; 1253 } 1254 1255 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1256 1257 static int 1258 qlnx_init_hw(qlnx_host_t *ha) 1259 { 1260 int rval = 0; 1261 struct ecore_hw_prepare_params params; 1262 1263 ecore_init_struct(&ha->cdev); 1264 1265 /* ha->dp_module = ECORE_MSG_PROBE | 1266 ECORE_MSG_INTR | 1267 ECORE_MSG_SP | 1268 ECORE_MSG_LINK | 1269 ECORE_MSG_SPQ | 1270 ECORE_MSG_RDMA; 1271 ha->dp_level = ECORE_LEVEL_VERBOSE;*/ 1272 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2; 1273 ha->dp_level = ECORE_LEVEL_NOTICE; 1274 //ha->dp_level = ECORE_LEVEL_VERBOSE; 1275 1276 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev); 1277 1278 ha->cdev.regview = ha->pci_reg; 1279 1280 ha->personality = ECORE_PCI_DEFAULT; 1281 1282 if (qlnx_vf_device(ha) == 0) { 1283 ha->cdev.b_is_vf = true; 1284 1285 if (ha->pci_dbells != NULL) { 1286 ha->cdev.doorbells = ha->pci_dbells; 1287 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1288 ha->cdev.db_size = ha->dbells_size; 1289 } else { 1290 ha->pci_dbells = ha->pci_reg; 1291 } 1292 } else { 1293 ha->cdev.doorbells = ha->pci_dbells; 1294 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1295 ha->cdev.db_size = ha->dbells_size; 1296 1297 #ifdef QLNX_ENABLE_IWARP 1298 1299 if (qlnx_rdma_supported(ha) == 0) 1300 qlnx_set_personality(ha); 1301 1302 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1303 1304 } 1305 QL_DPRINT2(ha, "%s: %s\n", __func__, 1306 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet")); 1307 1308 bzero(¶ms, sizeof (struct ecore_hw_prepare_params)); 1309 1310 params.personality = ha->personality; 1311 1312 params.drv_resc_alloc = false; 1313 params.chk_reg_fifo = false; 1314 params.initiate_pf_flr = true; 1315 params.epoch = 0; 1316 1317 ecore_hw_prepare(&ha->cdev, ¶ms); 1318 1319 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str); 1320 1321 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n", 1322 ha, &ha->cdev, &ha->cdev.hwfns[0]); 1323 1324 return (rval); 1325 } 1326 1327 static void 1328 qlnx_release(qlnx_host_t *ha) 1329 { 1330 device_t dev; 1331 int i; 1332 1333 dev = ha->pci_dev; 1334 1335 QL_DPRINT2(ha, "enter\n"); 1336 1337 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) { 1338 if (ha->idle_chk[i] != NULL) { 1339 free(ha->idle_chk[i], M_QLNXBUF); 1340 ha->idle_chk[i] = NULL; 1341 } 1342 1343 if (ha->grcdump[i] != NULL) { 1344 free(ha->grcdump[i], M_QLNXBUF); 1345 ha->grcdump[i] = NULL; 1346 } 1347 } 1348 1349 if (ha->flags.callout_init) 1350 callout_drain(&ha->qlnx_callout); 1351 1352 if (ha->flags.slowpath_start) { 1353 qlnx_slowpath_stop(ha); 1354 } 1355 1356 if (ha->flags.hw_init) 1357 ecore_hw_remove(&ha->cdev); 1358 1359 qlnx_del_cdev(ha); 1360 1361 if (ha->ifp != NULL) 1362 ether_ifdetach(ha->ifp); 1363 1364 qlnx_free_tx_dma_tag(ha); 1365 1366 qlnx_free_rx_dma_tag(ha); 1367 1368 qlnx_free_parent_dma_tag(ha); 1369 1370 if (qlnx_vf_device(ha) != 0) { 1371 qlnx_destroy_error_recovery_taskqueue(ha); 1372 } 1373 1374 for (i = 0; i < ha->num_rss; i++) { 1375 struct qlnx_fastpath *fp = &ha->fp_array[i]; 1376 1377 if (ha->irq_vec[i].handle) { 1378 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 1379 ha->irq_vec[i].handle); 1380 } 1381 1382 if (ha->irq_vec[i].irq) { 1383 (void)bus_release_resource(dev, SYS_RES_IRQ, 1384 ha->irq_vec[i].irq_rid, 1385 ha->irq_vec[i].irq); 1386 } 1387 1388 qlnx_free_tx_br(ha, fp); 1389 } 1390 qlnx_destroy_fp_taskqueues(ha); 1391 1392 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1393 if (ha->sp_handle[i]) 1394 (void)bus_teardown_intr(dev, ha->sp_irq[i], 1395 ha->sp_handle[i]); 1396 1397 if (ha->sp_irq[i]) 1398 (void) bus_release_resource(dev, SYS_RES_IRQ, 1399 ha->sp_irq_rid[i], ha->sp_irq[i]); 1400 } 1401 1402 qlnx_destroy_sp_taskqueues(ha); 1403 1404 if (ha->msix_count) 1405 pci_release_msi(dev); 1406 1407 if (ha->flags.lock_init) { 1408 mtx_destroy(&ha->hw_lock); 1409 } 1410 1411 if (ha->pci_reg) 1412 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 1413 ha->pci_reg); 1414 1415 if (ha->dbells_size && ha->pci_dbells) 1416 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid, 1417 ha->pci_dbells); 1418 1419 if (ha->msix_bar) 1420 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid, 1421 ha->msix_bar); 1422 1423 QL_DPRINT2(ha, "exit\n"); 1424 return; 1425 } 1426 1427 static void 1428 qlnx_trigger_dump(qlnx_host_t *ha) 1429 { 1430 int i; 1431 1432 if (ha->ifp != NULL) 1433 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 1434 1435 QL_DPRINT2(ha, "enter\n"); 1436 1437 if (qlnx_vf_device(ha) == 0) 1438 return; 1439 1440 ha->error_recovery = 1; 1441 1442 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1443 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i); 1444 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i); 1445 } 1446 1447 QL_DPRINT2(ha, "exit\n"); 1448 1449 return; 1450 } 1451 1452 static int 1453 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS) 1454 { 1455 int err, ret = 0; 1456 qlnx_host_t *ha; 1457 1458 err = sysctl_handle_int(oidp, &ret, 0, req); 1459 1460 if (err || !req->newptr) 1461 return (err); 1462 1463 if (ret == 1) { 1464 ha = (qlnx_host_t *)arg1; 1465 qlnx_trigger_dump(ha); 1466 } 1467 return (err); 1468 } 1469 1470 static int 1471 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS) 1472 { 1473 int err, i, ret = 0, usecs = 0; 1474 qlnx_host_t *ha; 1475 struct ecore_hwfn *p_hwfn; 1476 struct qlnx_fastpath *fp; 1477 1478 err = sysctl_handle_int(oidp, &usecs, 0, req); 1479 1480 if (err || !req->newptr || !usecs || (usecs > 255)) 1481 return (err); 1482 1483 ha = (qlnx_host_t *)arg1; 1484 1485 if (qlnx_vf_device(ha) == 0) 1486 return (-1); 1487 1488 for (i = 0; i < ha->num_rss; i++) { 1489 1490 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1491 1492 fp = &ha->fp_array[i]; 1493 1494 if (fp->txq[0]->handle != NULL) { 1495 ret = ecore_set_queue_coalesce(p_hwfn, 0, 1496 (uint16_t)usecs, fp->txq[0]->handle); 1497 } 1498 } 1499 1500 if (!ret) 1501 ha->tx_coalesce_usecs = (uint8_t)usecs; 1502 1503 return (err); 1504 } 1505 1506 static int 1507 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS) 1508 { 1509 int err, i, ret = 0, usecs = 0; 1510 qlnx_host_t *ha; 1511 struct ecore_hwfn *p_hwfn; 1512 struct qlnx_fastpath *fp; 1513 1514 err = sysctl_handle_int(oidp, &usecs, 0, req); 1515 1516 if (err || !req->newptr || !usecs || (usecs > 255)) 1517 return (err); 1518 1519 ha = (qlnx_host_t *)arg1; 1520 1521 if (qlnx_vf_device(ha) == 0) 1522 return (-1); 1523 1524 for (i = 0; i < ha->num_rss; i++) { 1525 1526 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1527 1528 fp = &ha->fp_array[i]; 1529 1530 if (fp->rxq->handle != NULL) { 1531 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs, 1532 0, fp->rxq->handle); 1533 } 1534 } 1535 1536 if (!ret) 1537 ha->rx_coalesce_usecs = (uint8_t)usecs; 1538 1539 return (err); 1540 } 1541 1542 static void 1543 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha) 1544 { 1545 struct sysctl_ctx_list *ctx; 1546 struct sysctl_oid_list *children; 1547 struct sysctl_oid *ctx_oid; 1548 1549 ctx = device_get_sysctl_ctx(ha->pci_dev); 1550 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1551 1552 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat", 1553 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat"); 1554 children = SYSCTL_CHILDREN(ctx_oid); 1555 1556 SYSCTL_ADD_QUAD(ctx, children, 1557 OID_AUTO, "sp_interrupts", 1558 CTLFLAG_RD, &ha->sp_interrupts, 1559 "No. of slowpath interrupts"); 1560 1561 return; 1562 } 1563 1564 static void 1565 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha) 1566 { 1567 struct sysctl_ctx_list *ctx; 1568 struct sysctl_oid_list *children; 1569 struct sysctl_oid_list *node_children; 1570 struct sysctl_oid *ctx_oid; 1571 int i, j; 1572 uint8_t name_str[16]; 1573 1574 ctx = device_get_sysctl_ctx(ha->pci_dev); 1575 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1576 1577 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat", 1578 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat"); 1579 children = SYSCTL_CHILDREN(ctx_oid); 1580 1581 for (i = 0; i < ha->num_rss; i++) { 1582 1583 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1584 snprintf(name_str, sizeof(name_str), "%d", i); 1585 1586 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 1587 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str); 1588 node_children = SYSCTL_CHILDREN(ctx_oid); 1589 1590 /* Tx Related */ 1591 1592 SYSCTL_ADD_QUAD(ctx, node_children, 1593 OID_AUTO, "tx_pkts_processed", 1594 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed, 1595 "No. of packets processed for transmission"); 1596 1597 SYSCTL_ADD_QUAD(ctx, node_children, 1598 OID_AUTO, "tx_pkts_freed", 1599 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed, 1600 "No. of freed packets"); 1601 1602 SYSCTL_ADD_QUAD(ctx, node_children, 1603 OID_AUTO, "tx_pkts_transmitted", 1604 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted, 1605 "No. of transmitted packets"); 1606 1607 SYSCTL_ADD_QUAD(ctx, node_children, 1608 OID_AUTO, "tx_pkts_completed", 1609 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed, 1610 "No. of transmit completions"); 1611 1612 SYSCTL_ADD_QUAD(ctx, node_children, 1613 OID_AUTO, "tx_non_tso_pkts", 1614 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts, 1615 "No. of non LSO transmited packets"); 1616 1617 #ifdef QLNX_TRACE_PERF_DATA 1618 1619 SYSCTL_ADD_QUAD(ctx, node_children, 1620 OID_AUTO, "tx_pkts_trans_ctx", 1621 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx, 1622 "No. of transmitted packets in transmit context"); 1623 1624 SYSCTL_ADD_QUAD(ctx, node_children, 1625 OID_AUTO, "tx_pkts_compl_ctx", 1626 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx, 1627 "No. of transmit completions in transmit context"); 1628 1629 SYSCTL_ADD_QUAD(ctx, node_children, 1630 OID_AUTO, "tx_pkts_trans_fp", 1631 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp, 1632 "No. of transmitted packets in taskqueue"); 1633 1634 SYSCTL_ADD_QUAD(ctx, node_children, 1635 OID_AUTO, "tx_pkts_compl_fp", 1636 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp, 1637 "No. of transmit completions in taskqueue"); 1638 1639 SYSCTL_ADD_QUAD(ctx, node_children, 1640 OID_AUTO, "tx_pkts_compl_intr", 1641 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr, 1642 "No. of transmit completions in interrupt ctx"); 1643 #endif 1644 1645 SYSCTL_ADD_QUAD(ctx, node_children, 1646 OID_AUTO, "tx_tso_pkts", 1647 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts, 1648 "No. of LSO transmited packets"); 1649 1650 SYSCTL_ADD_QUAD(ctx, node_children, 1651 OID_AUTO, "tx_lso_wnd_min_len", 1652 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len, 1653 "tx_lso_wnd_min_len"); 1654 1655 SYSCTL_ADD_QUAD(ctx, node_children, 1656 OID_AUTO, "tx_defrag", 1657 CTLFLAG_RD, &ha->fp_array[i].tx_defrag, 1658 "tx_defrag"); 1659 1660 SYSCTL_ADD_QUAD(ctx, node_children, 1661 OID_AUTO, "tx_nsegs_gt_elem_left", 1662 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left, 1663 "tx_nsegs_gt_elem_left"); 1664 1665 SYSCTL_ADD_UINT(ctx, node_children, 1666 OID_AUTO, "tx_tso_max_nsegs", 1667 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs, 1668 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs"); 1669 1670 SYSCTL_ADD_UINT(ctx, node_children, 1671 OID_AUTO, "tx_tso_min_nsegs", 1672 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs, 1673 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs"); 1674 1675 SYSCTL_ADD_UINT(ctx, node_children, 1676 OID_AUTO, "tx_tso_max_pkt_len", 1677 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len, 1678 ha->fp_array[i].tx_tso_max_pkt_len, 1679 "tx_tso_max_pkt_len"); 1680 1681 SYSCTL_ADD_UINT(ctx, node_children, 1682 OID_AUTO, "tx_tso_min_pkt_len", 1683 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len, 1684 ha->fp_array[i].tx_tso_min_pkt_len, 1685 "tx_tso_min_pkt_len"); 1686 1687 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) { 1688 1689 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1690 snprintf(name_str, sizeof(name_str), 1691 "tx_pkts_nseg_%02d", (j+1)); 1692 1693 SYSCTL_ADD_QUAD(ctx, node_children, 1694 OID_AUTO, name_str, CTLFLAG_RD, 1695 &ha->fp_array[i].tx_pkts[j], name_str); 1696 } 1697 1698 #ifdef QLNX_TRACE_PERF_DATA 1699 for (j = 0; j < 18; j++) { 1700 1701 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1702 snprintf(name_str, sizeof(name_str), 1703 "tx_pkts_hist_%02d", (j+1)); 1704 1705 SYSCTL_ADD_QUAD(ctx, node_children, 1706 OID_AUTO, name_str, CTLFLAG_RD, 1707 &ha->fp_array[i].tx_pkts_hist[j], name_str); 1708 } 1709 for (j = 0; j < 5; j++) { 1710 1711 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1712 snprintf(name_str, sizeof(name_str), 1713 "tx_comInt_%02d", (j+1)); 1714 1715 SYSCTL_ADD_QUAD(ctx, node_children, 1716 OID_AUTO, name_str, CTLFLAG_RD, 1717 &ha->fp_array[i].tx_comInt[j], name_str); 1718 } 1719 for (j = 0; j < 18; j++) { 1720 1721 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1722 snprintf(name_str, sizeof(name_str), 1723 "tx_pkts_q_%02d", (j+1)); 1724 1725 SYSCTL_ADD_QUAD(ctx, node_children, 1726 OID_AUTO, name_str, CTLFLAG_RD, 1727 &ha->fp_array[i].tx_pkts_q[j], name_str); 1728 } 1729 #endif 1730 1731 SYSCTL_ADD_QUAD(ctx, node_children, 1732 OID_AUTO, "err_tx_nsegs_gt_elem_left", 1733 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left, 1734 "err_tx_nsegs_gt_elem_left"); 1735 1736 SYSCTL_ADD_QUAD(ctx, node_children, 1737 OID_AUTO, "err_tx_dmamap_create", 1738 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create, 1739 "err_tx_dmamap_create"); 1740 1741 SYSCTL_ADD_QUAD(ctx, node_children, 1742 OID_AUTO, "err_tx_defrag_dmamap_load", 1743 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load, 1744 "err_tx_defrag_dmamap_load"); 1745 1746 SYSCTL_ADD_QUAD(ctx, node_children, 1747 OID_AUTO, "err_tx_non_tso_max_seg", 1748 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg, 1749 "err_tx_non_tso_max_seg"); 1750 1751 SYSCTL_ADD_QUAD(ctx, node_children, 1752 OID_AUTO, "err_tx_dmamap_load", 1753 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load, 1754 "err_tx_dmamap_load"); 1755 1756 SYSCTL_ADD_QUAD(ctx, node_children, 1757 OID_AUTO, "err_tx_defrag", 1758 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag, 1759 "err_tx_defrag"); 1760 1761 SYSCTL_ADD_QUAD(ctx, node_children, 1762 OID_AUTO, "err_tx_free_pkt_null", 1763 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null, 1764 "err_tx_free_pkt_null"); 1765 1766 SYSCTL_ADD_QUAD(ctx, node_children, 1767 OID_AUTO, "err_tx_cons_idx_conflict", 1768 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict, 1769 "err_tx_cons_idx_conflict"); 1770 1771 SYSCTL_ADD_QUAD(ctx, node_children, 1772 OID_AUTO, "lro_cnt_64", 1773 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64, 1774 "lro_cnt_64"); 1775 1776 SYSCTL_ADD_QUAD(ctx, node_children, 1777 OID_AUTO, "lro_cnt_128", 1778 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128, 1779 "lro_cnt_128"); 1780 1781 SYSCTL_ADD_QUAD(ctx, node_children, 1782 OID_AUTO, "lro_cnt_256", 1783 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256, 1784 "lro_cnt_256"); 1785 1786 SYSCTL_ADD_QUAD(ctx, node_children, 1787 OID_AUTO, "lro_cnt_512", 1788 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512, 1789 "lro_cnt_512"); 1790 1791 SYSCTL_ADD_QUAD(ctx, node_children, 1792 OID_AUTO, "lro_cnt_1024", 1793 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024, 1794 "lro_cnt_1024"); 1795 1796 /* Rx Related */ 1797 1798 SYSCTL_ADD_QUAD(ctx, node_children, 1799 OID_AUTO, "rx_pkts", 1800 CTLFLAG_RD, &ha->fp_array[i].rx_pkts, 1801 "No. of received packets"); 1802 1803 SYSCTL_ADD_QUAD(ctx, node_children, 1804 OID_AUTO, "tpa_start", 1805 CTLFLAG_RD, &ha->fp_array[i].tpa_start, 1806 "No. of tpa_start packets"); 1807 1808 SYSCTL_ADD_QUAD(ctx, node_children, 1809 OID_AUTO, "tpa_cont", 1810 CTLFLAG_RD, &ha->fp_array[i].tpa_cont, 1811 "No. of tpa_cont packets"); 1812 1813 SYSCTL_ADD_QUAD(ctx, node_children, 1814 OID_AUTO, "tpa_end", 1815 CTLFLAG_RD, &ha->fp_array[i].tpa_end, 1816 "No. of tpa_end packets"); 1817 1818 SYSCTL_ADD_QUAD(ctx, node_children, 1819 OID_AUTO, "err_m_getcl", 1820 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl, 1821 "err_m_getcl"); 1822 1823 SYSCTL_ADD_QUAD(ctx, node_children, 1824 OID_AUTO, "err_m_getjcl", 1825 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl, 1826 "err_m_getjcl"); 1827 1828 SYSCTL_ADD_QUAD(ctx, node_children, 1829 OID_AUTO, "err_rx_hw_errors", 1830 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors, 1831 "err_rx_hw_errors"); 1832 1833 SYSCTL_ADD_QUAD(ctx, node_children, 1834 OID_AUTO, "err_rx_alloc_errors", 1835 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors, 1836 "err_rx_alloc_errors"); 1837 } 1838 1839 return; 1840 } 1841 1842 static void 1843 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha) 1844 { 1845 struct sysctl_ctx_list *ctx; 1846 struct sysctl_oid_list *children; 1847 struct sysctl_oid *ctx_oid; 1848 1849 ctx = device_get_sysctl_ctx(ha->pci_dev); 1850 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1851 1852 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat", 1853 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat"); 1854 children = SYSCTL_CHILDREN(ctx_oid); 1855 1856 SYSCTL_ADD_QUAD(ctx, children, 1857 OID_AUTO, "no_buff_discards", 1858 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards, 1859 "No. of packets discarded due to lack of buffer"); 1860 1861 SYSCTL_ADD_QUAD(ctx, children, 1862 OID_AUTO, "packet_too_big_discard", 1863 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard, 1864 "No. of packets discarded because packet was too big"); 1865 1866 SYSCTL_ADD_QUAD(ctx, children, 1867 OID_AUTO, "ttl0_discard", 1868 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard, 1869 "ttl0_discard"); 1870 1871 SYSCTL_ADD_QUAD(ctx, children, 1872 OID_AUTO, "rx_ucast_bytes", 1873 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes, 1874 "rx_ucast_bytes"); 1875 1876 SYSCTL_ADD_QUAD(ctx, children, 1877 OID_AUTO, "rx_mcast_bytes", 1878 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes, 1879 "rx_mcast_bytes"); 1880 1881 SYSCTL_ADD_QUAD(ctx, children, 1882 OID_AUTO, "rx_bcast_bytes", 1883 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes, 1884 "rx_bcast_bytes"); 1885 1886 SYSCTL_ADD_QUAD(ctx, children, 1887 OID_AUTO, "rx_ucast_pkts", 1888 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts, 1889 "rx_ucast_pkts"); 1890 1891 SYSCTL_ADD_QUAD(ctx, children, 1892 OID_AUTO, "rx_mcast_pkts", 1893 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts, 1894 "rx_mcast_pkts"); 1895 1896 SYSCTL_ADD_QUAD(ctx, children, 1897 OID_AUTO, "rx_bcast_pkts", 1898 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts, 1899 "rx_bcast_pkts"); 1900 1901 SYSCTL_ADD_QUAD(ctx, children, 1902 OID_AUTO, "mftag_filter_discards", 1903 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards, 1904 "mftag_filter_discards"); 1905 1906 SYSCTL_ADD_QUAD(ctx, children, 1907 OID_AUTO, "mac_filter_discards", 1908 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards, 1909 "mac_filter_discards"); 1910 1911 SYSCTL_ADD_QUAD(ctx, children, 1912 OID_AUTO, "tx_ucast_bytes", 1913 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes, 1914 "tx_ucast_bytes"); 1915 1916 SYSCTL_ADD_QUAD(ctx, children, 1917 OID_AUTO, "tx_mcast_bytes", 1918 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes, 1919 "tx_mcast_bytes"); 1920 1921 SYSCTL_ADD_QUAD(ctx, children, 1922 OID_AUTO, "tx_bcast_bytes", 1923 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes, 1924 "tx_bcast_bytes"); 1925 1926 SYSCTL_ADD_QUAD(ctx, children, 1927 OID_AUTO, "tx_ucast_pkts", 1928 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts, 1929 "tx_ucast_pkts"); 1930 1931 SYSCTL_ADD_QUAD(ctx, children, 1932 OID_AUTO, "tx_mcast_pkts", 1933 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts, 1934 "tx_mcast_pkts"); 1935 1936 SYSCTL_ADD_QUAD(ctx, children, 1937 OID_AUTO, "tx_bcast_pkts", 1938 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts, 1939 "tx_bcast_pkts"); 1940 1941 SYSCTL_ADD_QUAD(ctx, children, 1942 OID_AUTO, "tx_err_drop_pkts", 1943 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts, 1944 "tx_err_drop_pkts"); 1945 1946 SYSCTL_ADD_QUAD(ctx, children, 1947 OID_AUTO, "tpa_coalesced_pkts", 1948 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts, 1949 "tpa_coalesced_pkts"); 1950 1951 SYSCTL_ADD_QUAD(ctx, children, 1952 OID_AUTO, "tpa_coalesced_events", 1953 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events, 1954 "tpa_coalesced_events"); 1955 1956 SYSCTL_ADD_QUAD(ctx, children, 1957 OID_AUTO, "tpa_aborts_num", 1958 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num, 1959 "tpa_aborts_num"); 1960 1961 SYSCTL_ADD_QUAD(ctx, children, 1962 OID_AUTO, "tpa_not_coalesced_pkts", 1963 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts, 1964 "tpa_not_coalesced_pkts"); 1965 1966 SYSCTL_ADD_QUAD(ctx, children, 1967 OID_AUTO, "tpa_coalesced_bytes", 1968 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes, 1969 "tpa_coalesced_bytes"); 1970 1971 SYSCTL_ADD_QUAD(ctx, children, 1972 OID_AUTO, "rx_64_byte_packets", 1973 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets, 1974 "rx_64_byte_packets"); 1975 1976 SYSCTL_ADD_QUAD(ctx, children, 1977 OID_AUTO, "rx_65_to_127_byte_packets", 1978 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets, 1979 "rx_65_to_127_byte_packets"); 1980 1981 SYSCTL_ADD_QUAD(ctx, children, 1982 OID_AUTO, "rx_128_to_255_byte_packets", 1983 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets, 1984 "rx_128_to_255_byte_packets"); 1985 1986 SYSCTL_ADD_QUAD(ctx, children, 1987 OID_AUTO, "rx_256_to_511_byte_packets", 1988 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets, 1989 "rx_256_to_511_byte_packets"); 1990 1991 SYSCTL_ADD_QUAD(ctx, children, 1992 OID_AUTO, "rx_512_to_1023_byte_packets", 1993 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets, 1994 "rx_512_to_1023_byte_packets"); 1995 1996 SYSCTL_ADD_QUAD(ctx, children, 1997 OID_AUTO, "rx_1024_to_1518_byte_packets", 1998 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets, 1999 "rx_1024_to_1518_byte_packets"); 2000 2001 SYSCTL_ADD_QUAD(ctx, children, 2002 OID_AUTO, "rx_1519_to_1522_byte_packets", 2003 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets, 2004 "rx_1519_to_1522_byte_packets"); 2005 2006 SYSCTL_ADD_QUAD(ctx, children, 2007 OID_AUTO, "rx_1523_to_2047_byte_packets", 2008 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets, 2009 "rx_1523_to_2047_byte_packets"); 2010 2011 SYSCTL_ADD_QUAD(ctx, children, 2012 OID_AUTO, "rx_2048_to_4095_byte_packets", 2013 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets, 2014 "rx_2048_to_4095_byte_packets"); 2015 2016 SYSCTL_ADD_QUAD(ctx, children, 2017 OID_AUTO, "rx_4096_to_9216_byte_packets", 2018 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets, 2019 "rx_4096_to_9216_byte_packets"); 2020 2021 SYSCTL_ADD_QUAD(ctx, children, 2022 OID_AUTO, "rx_9217_to_16383_byte_packets", 2023 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets, 2024 "rx_9217_to_16383_byte_packets"); 2025 2026 SYSCTL_ADD_QUAD(ctx, children, 2027 OID_AUTO, "rx_crc_errors", 2028 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors, 2029 "rx_crc_errors"); 2030 2031 SYSCTL_ADD_QUAD(ctx, children, 2032 OID_AUTO, "rx_mac_crtl_frames", 2033 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames, 2034 "rx_mac_crtl_frames"); 2035 2036 SYSCTL_ADD_QUAD(ctx, children, 2037 OID_AUTO, "rx_pause_frames", 2038 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames, 2039 "rx_pause_frames"); 2040 2041 SYSCTL_ADD_QUAD(ctx, children, 2042 OID_AUTO, "rx_pfc_frames", 2043 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames, 2044 "rx_pfc_frames"); 2045 2046 SYSCTL_ADD_QUAD(ctx, children, 2047 OID_AUTO, "rx_align_errors", 2048 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors, 2049 "rx_align_errors"); 2050 2051 SYSCTL_ADD_QUAD(ctx, children, 2052 OID_AUTO, "rx_carrier_errors", 2053 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors, 2054 "rx_carrier_errors"); 2055 2056 SYSCTL_ADD_QUAD(ctx, children, 2057 OID_AUTO, "rx_oversize_packets", 2058 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets, 2059 "rx_oversize_packets"); 2060 2061 SYSCTL_ADD_QUAD(ctx, children, 2062 OID_AUTO, "rx_jabbers", 2063 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers, 2064 "rx_jabbers"); 2065 2066 SYSCTL_ADD_QUAD(ctx, children, 2067 OID_AUTO, "rx_undersize_packets", 2068 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets, 2069 "rx_undersize_packets"); 2070 2071 SYSCTL_ADD_QUAD(ctx, children, 2072 OID_AUTO, "rx_fragments", 2073 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments, 2074 "rx_fragments"); 2075 2076 SYSCTL_ADD_QUAD(ctx, children, 2077 OID_AUTO, "tx_64_byte_packets", 2078 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets, 2079 "tx_64_byte_packets"); 2080 2081 SYSCTL_ADD_QUAD(ctx, children, 2082 OID_AUTO, "tx_65_to_127_byte_packets", 2083 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets, 2084 "tx_65_to_127_byte_packets"); 2085 2086 SYSCTL_ADD_QUAD(ctx, children, 2087 OID_AUTO, "tx_128_to_255_byte_packets", 2088 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets, 2089 "tx_128_to_255_byte_packets"); 2090 2091 SYSCTL_ADD_QUAD(ctx, children, 2092 OID_AUTO, "tx_256_to_511_byte_packets", 2093 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets, 2094 "tx_256_to_511_byte_packets"); 2095 2096 SYSCTL_ADD_QUAD(ctx, children, 2097 OID_AUTO, "tx_512_to_1023_byte_packets", 2098 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets, 2099 "tx_512_to_1023_byte_packets"); 2100 2101 SYSCTL_ADD_QUAD(ctx, children, 2102 OID_AUTO, "tx_1024_to_1518_byte_packets", 2103 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets, 2104 "tx_1024_to_1518_byte_packets"); 2105 2106 SYSCTL_ADD_QUAD(ctx, children, 2107 OID_AUTO, "tx_1519_to_2047_byte_packets", 2108 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets, 2109 "tx_1519_to_2047_byte_packets"); 2110 2111 SYSCTL_ADD_QUAD(ctx, children, 2112 OID_AUTO, "tx_2048_to_4095_byte_packets", 2113 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets, 2114 "tx_2048_to_4095_byte_packets"); 2115 2116 SYSCTL_ADD_QUAD(ctx, children, 2117 OID_AUTO, "tx_4096_to_9216_byte_packets", 2118 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets, 2119 "tx_4096_to_9216_byte_packets"); 2120 2121 SYSCTL_ADD_QUAD(ctx, children, 2122 OID_AUTO, "tx_9217_to_16383_byte_packets", 2123 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets, 2124 "tx_9217_to_16383_byte_packets"); 2125 2126 SYSCTL_ADD_QUAD(ctx, children, 2127 OID_AUTO, "tx_pause_frames", 2128 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames, 2129 "tx_pause_frames"); 2130 2131 SYSCTL_ADD_QUAD(ctx, children, 2132 OID_AUTO, "tx_pfc_frames", 2133 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames, 2134 "tx_pfc_frames"); 2135 2136 SYSCTL_ADD_QUAD(ctx, children, 2137 OID_AUTO, "tx_lpi_entry_count", 2138 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count, 2139 "tx_lpi_entry_count"); 2140 2141 SYSCTL_ADD_QUAD(ctx, children, 2142 OID_AUTO, "tx_total_collisions", 2143 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions, 2144 "tx_total_collisions"); 2145 2146 SYSCTL_ADD_QUAD(ctx, children, 2147 OID_AUTO, "brb_truncates", 2148 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates, 2149 "brb_truncates"); 2150 2151 SYSCTL_ADD_QUAD(ctx, children, 2152 OID_AUTO, "brb_discards", 2153 CTLFLAG_RD, &ha->hw_stats.common.brb_discards, 2154 "brb_discards"); 2155 2156 SYSCTL_ADD_QUAD(ctx, children, 2157 OID_AUTO, "rx_mac_bytes", 2158 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes, 2159 "rx_mac_bytes"); 2160 2161 SYSCTL_ADD_QUAD(ctx, children, 2162 OID_AUTO, "rx_mac_uc_packets", 2163 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets, 2164 "rx_mac_uc_packets"); 2165 2166 SYSCTL_ADD_QUAD(ctx, children, 2167 OID_AUTO, "rx_mac_mc_packets", 2168 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets, 2169 "rx_mac_mc_packets"); 2170 2171 SYSCTL_ADD_QUAD(ctx, children, 2172 OID_AUTO, "rx_mac_bc_packets", 2173 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets, 2174 "rx_mac_bc_packets"); 2175 2176 SYSCTL_ADD_QUAD(ctx, children, 2177 OID_AUTO, "rx_mac_frames_ok", 2178 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok, 2179 "rx_mac_frames_ok"); 2180 2181 SYSCTL_ADD_QUAD(ctx, children, 2182 OID_AUTO, "tx_mac_bytes", 2183 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes, 2184 "tx_mac_bytes"); 2185 2186 SYSCTL_ADD_QUAD(ctx, children, 2187 OID_AUTO, "tx_mac_uc_packets", 2188 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets, 2189 "tx_mac_uc_packets"); 2190 2191 SYSCTL_ADD_QUAD(ctx, children, 2192 OID_AUTO, "tx_mac_mc_packets", 2193 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets, 2194 "tx_mac_mc_packets"); 2195 2196 SYSCTL_ADD_QUAD(ctx, children, 2197 OID_AUTO, "tx_mac_bc_packets", 2198 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets, 2199 "tx_mac_bc_packets"); 2200 2201 SYSCTL_ADD_QUAD(ctx, children, 2202 OID_AUTO, "tx_mac_ctrl_frames", 2203 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames, 2204 "tx_mac_ctrl_frames"); 2205 return; 2206 } 2207 2208 static void 2209 qlnx_add_sysctls(qlnx_host_t *ha) 2210 { 2211 device_t dev = ha->pci_dev; 2212 struct sysctl_ctx_list *ctx; 2213 struct sysctl_oid_list *children; 2214 2215 ctx = device_get_sysctl_ctx(dev); 2216 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2217 2218 qlnx_add_fp_stats_sysctls(ha); 2219 qlnx_add_sp_stats_sysctls(ha); 2220 2221 if (qlnx_vf_device(ha) != 0) 2222 qlnx_add_hw_stats_sysctls(ha); 2223 2224 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version", 2225 CTLFLAG_RD, qlnx_ver_str, 0, 2226 "Driver Version"); 2227 2228 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version", 2229 CTLFLAG_RD, ha->stormfw_ver, 0, 2230 "STORM Firmware Version"); 2231 2232 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version", 2233 CTLFLAG_RD, ha->mfw_ver, 0, 2234 "Management Firmware Version"); 2235 2236 SYSCTL_ADD_UINT(ctx, children, 2237 OID_AUTO, "personality", CTLFLAG_RD, 2238 &ha->personality, ha->personality, 2239 "\tpersonality = 0 => Ethernet Only\n" 2240 "\tpersonality = 3 => Ethernet and RoCE\n" 2241 "\tpersonality = 4 => Ethernet and iWARP\n" 2242 "\tpersonality = 6 => Default in Shared Memory\n"); 2243 2244 ha->dbg_level = 0; 2245 SYSCTL_ADD_UINT(ctx, children, 2246 OID_AUTO, "debug", CTLFLAG_RW, 2247 &ha->dbg_level, ha->dbg_level, "Debug Level"); 2248 2249 ha->dp_level = 0x01; 2250 SYSCTL_ADD_UINT(ctx, children, 2251 OID_AUTO, "dp_level", CTLFLAG_RW, 2252 &ha->dp_level, ha->dp_level, "DP Level"); 2253 2254 ha->dbg_trace_lro_cnt = 0; 2255 SYSCTL_ADD_UINT(ctx, children, 2256 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW, 2257 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt, 2258 "Trace LRO Counts"); 2259 2260 ha->dbg_trace_tso_pkt_len = 0; 2261 SYSCTL_ADD_UINT(ctx, children, 2262 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW, 2263 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len, 2264 "Trace TSO packet lengths"); 2265 2266 ha->dp_module = 0; 2267 SYSCTL_ADD_UINT(ctx, children, 2268 OID_AUTO, "dp_module", CTLFLAG_RW, 2269 &ha->dp_module, ha->dp_module, "DP Module"); 2270 2271 ha->err_inject = 0; 2272 2273 SYSCTL_ADD_UINT(ctx, children, 2274 OID_AUTO, "err_inject", CTLFLAG_RW, 2275 &ha->err_inject, ha->err_inject, "Error Inject"); 2276 2277 ha->storm_stats_enable = 0; 2278 2279 SYSCTL_ADD_UINT(ctx, children, 2280 OID_AUTO, "storm_stats_enable", CTLFLAG_RW, 2281 &ha->storm_stats_enable, ha->storm_stats_enable, 2282 "Enable Storm Statistics Gathering"); 2283 2284 ha->storm_stats_index = 0; 2285 2286 SYSCTL_ADD_UINT(ctx, children, 2287 OID_AUTO, "storm_stats_index", CTLFLAG_RD, 2288 &ha->storm_stats_index, ha->storm_stats_index, 2289 "Enable Storm Statistics Gathering Current Index"); 2290 2291 ha->grcdump_taken = 0; 2292 SYSCTL_ADD_UINT(ctx, children, 2293 OID_AUTO, "grcdump_taken", CTLFLAG_RD, 2294 &ha->grcdump_taken, ha->grcdump_taken, 2295 "grcdump_taken"); 2296 2297 ha->idle_chk_taken = 0; 2298 SYSCTL_ADD_UINT(ctx, children, 2299 OID_AUTO, "idle_chk_taken", CTLFLAG_RD, 2300 &ha->idle_chk_taken, ha->idle_chk_taken, 2301 "idle_chk_taken"); 2302 2303 SYSCTL_ADD_UINT(ctx, children, 2304 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD, 2305 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs, 2306 "rx_coalesce_usecs"); 2307 2308 SYSCTL_ADD_UINT(ctx, children, 2309 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD, 2310 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs, 2311 "tx_coalesce_usecs"); 2312 2313 SYSCTL_ADD_PROC(ctx, children, 2314 OID_AUTO, "trigger_dump", 2315 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2316 (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump"); 2317 2318 SYSCTL_ADD_PROC(ctx, children, 2319 OID_AUTO, "set_rx_coalesce_usecs", 2320 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2321 (void *)ha, 0, qlnx_set_rx_coalesce, "I", 2322 "rx interrupt coalesce period microseconds"); 2323 2324 SYSCTL_ADD_PROC(ctx, children, 2325 OID_AUTO, "set_tx_coalesce_usecs", 2326 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2327 (void *)ha, 0, qlnx_set_tx_coalesce, "I", 2328 "tx interrupt coalesce period microseconds"); 2329 2330 ha->rx_pkt_threshold = 128; 2331 SYSCTL_ADD_UINT(ctx, children, 2332 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW, 2333 &ha->rx_pkt_threshold, ha->rx_pkt_threshold, 2334 "No. of Rx Pkts to process at a time"); 2335 2336 ha->rx_jumbo_buf_eq_mtu = 0; 2337 SYSCTL_ADD_UINT(ctx, children, 2338 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW, 2339 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu, 2340 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n" 2341 "otherwise Rx Jumbo buffers are set to >= MTU size\n"); 2342 2343 SYSCTL_ADD_QUAD(ctx, children, 2344 OID_AUTO, "err_illegal_intr", CTLFLAG_RD, 2345 &ha->err_illegal_intr, "err_illegal_intr"); 2346 2347 SYSCTL_ADD_QUAD(ctx, children, 2348 OID_AUTO, "err_fp_null", CTLFLAG_RD, 2349 &ha->err_fp_null, "err_fp_null"); 2350 2351 SYSCTL_ADD_QUAD(ctx, children, 2352 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD, 2353 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type"); 2354 return; 2355 } 2356 2357 2358 2359 /***************************************************************************** 2360 * Operating System Network Interface Functions 2361 *****************************************************************************/ 2362 2363 static void 2364 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) 2365 { 2366 uint16_t device_id; 2367 struct ifnet *ifp; 2368 2369 ifp = ha->ifp = if_alloc(IFT_ETHER); 2370 2371 if (ifp == NULL) 2372 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 2373 2374 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2375 2376 device_id = pci_get_device(ha->pci_dev); 2377 2378 #if __FreeBSD_version >= 1000000 2379 2380 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) 2381 ifp->if_baudrate = IF_Gbps(40); 2382 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2383 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 2384 ifp->if_baudrate = IF_Gbps(25); 2385 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) 2386 ifp->if_baudrate = IF_Gbps(50); 2387 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) 2388 ifp->if_baudrate = IF_Gbps(100); 2389 2390 ifp->if_capabilities = IFCAP_LINKSTATE; 2391 #else 2392 ifp->if_mtu = ETHERMTU; 2393 ifp->if_baudrate = (1 * 1000 * 1000 *1000); 2394 2395 #endif /* #if __FreeBSD_version >= 1000000 */ 2396 2397 ifp->if_init = qlnx_init; 2398 ifp->if_softc = ha; 2399 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2400 ifp->if_ioctl = qlnx_ioctl; 2401 ifp->if_transmit = qlnx_transmit; 2402 ifp->if_qflush = qlnx_qflush; 2403 2404 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha)); 2405 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha); 2406 IFQ_SET_READY(&ifp->if_snd); 2407 2408 #if __FreeBSD_version >= 1100036 2409 if_setgetcounterfn(ifp, qlnx_get_counter); 2410 #endif 2411 2412 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2413 2414 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN); 2415 2416 if (!ha->primary_mac[0] && !ha->primary_mac[1] && 2417 !ha->primary_mac[2] && !ha->primary_mac[3] && 2418 !ha->primary_mac[4] && !ha->primary_mac[5]) { 2419 uint32_t rnd; 2420 2421 rnd = arc4random(); 2422 2423 ha->primary_mac[0] = 0x00; 2424 ha->primary_mac[1] = 0x0e; 2425 ha->primary_mac[2] = 0x1e; 2426 ha->primary_mac[3] = rnd & 0xFF; 2427 ha->primary_mac[4] = (rnd >> 8) & 0xFF; 2428 ha->primary_mac[5] = (rnd >> 16) & 0xFF; 2429 } 2430 2431 ether_ifattach(ifp, ha->primary_mac); 2432 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN); 2433 2434 ifp->if_capabilities = IFCAP_HWCSUM; 2435 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 2436 2437 ifp->if_capabilities |= IFCAP_VLAN_MTU; 2438 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 2439 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 2440 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 2441 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 2442 ifp->if_capabilities |= IFCAP_TSO4; 2443 ifp->if_capabilities |= IFCAP_TSO6; 2444 ifp->if_capabilities |= IFCAP_LRO; 2445 2446 ifp->if_hw_tsomax = QLNX_MAX_TSO_FRAME_SIZE - 2447 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 2448 ifp->if_hw_tsomaxsegcount = QLNX_MAX_SEGMENTS - 1 /* hdr */; 2449 ifp->if_hw_tsomaxsegsize = QLNX_MAX_TX_MBUF_SIZE; 2450 2451 2452 ifp->if_capenable = ifp->if_capabilities; 2453 2454 ifp->if_hwassist = CSUM_IP; 2455 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP; 2456 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; 2457 ifp->if_hwassist |= CSUM_TSO; 2458 2459 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 2460 2461 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\ 2462 qlnx_media_status); 2463 2464 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) { 2465 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL); 2466 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL); 2467 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL); 2468 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2469 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) { 2470 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL); 2471 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL); 2472 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) { 2473 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL); 2474 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL); 2475 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) { 2476 ifmedia_add(&ha->media, 2477 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL); 2478 ifmedia_add(&ha->media, 2479 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL); 2480 ifmedia_add(&ha->media, 2481 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL); 2482 } 2483 2484 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL); 2485 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 2486 2487 2488 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 2489 2490 QL_DPRINT2(ha, "exit\n"); 2491 2492 return; 2493 } 2494 2495 static void 2496 qlnx_init_locked(qlnx_host_t *ha) 2497 { 2498 struct ifnet *ifp = ha->ifp; 2499 2500 QL_DPRINT1(ha, "Driver Initialization start \n"); 2501 2502 qlnx_stop(ha); 2503 2504 if (qlnx_load(ha) == 0) { 2505 2506 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2507 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2508 2509 #ifdef QLNX_ENABLE_IWARP 2510 if (qlnx_vf_device(ha) != 0) { 2511 qlnx_rdma_dev_open(ha); 2512 } 2513 #endif /* #ifdef QLNX_ENABLE_IWARP */ 2514 } 2515 2516 return; 2517 } 2518 2519 static void 2520 qlnx_init(void *arg) 2521 { 2522 qlnx_host_t *ha; 2523 2524 ha = (qlnx_host_t *)arg; 2525 2526 QL_DPRINT2(ha, "enter\n"); 2527 2528 QLNX_LOCK(ha); 2529 qlnx_init_locked(ha); 2530 QLNX_UNLOCK(ha); 2531 2532 QL_DPRINT2(ha, "exit\n"); 2533 2534 return; 2535 } 2536 2537 static int 2538 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac) 2539 { 2540 struct ecore_filter_mcast *mcast; 2541 struct ecore_dev *cdev; 2542 int rc; 2543 2544 cdev = &ha->cdev; 2545 2546 mcast = &ha->ecore_mcast; 2547 bzero(mcast, sizeof(struct ecore_filter_mcast)); 2548 2549 if (add_mac) 2550 mcast->opcode = ECORE_FILTER_ADD; 2551 else 2552 mcast->opcode = ECORE_FILTER_REMOVE; 2553 2554 mcast->num_mc_addrs = 1; 2555 memcpy(mcast->mac, mac_addr, ETH_ALEN); 2556 2557 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 2558 2559 return (rc); 2560 } 2561 2562 static int 2563 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta) 2564 { 2565 int i; 2566 2567 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2568 2569 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) 2570 return 0; /* its been already added */ 2571 } 2572 2573 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2574 2575 if ((ha->mcast[i].addr[0] == 0) && 2576 (ha->mcast[i].addr[1] == 0) && 2577 (ha->mcast[i].addr[2] == 0) && 2578 (ha->mcast[i].addr[3] == 0) && 2579 (ha->mcast[i].addr[4] == 0) && 2580 (ha->mcast[i].addr[5] == 0)) { 2581 2582 if (qlnx_config_mcast_mac_addr(ha, mta, 1)) 2583 return (-1); 2584 2585 bcopy(mta, ha->mcast[i].addr, ETH_ALEN); 2586 ha->nmcast++; 2587 2588 return 0; 2589 } 2590 } 2591 return 0; 2592 } 2593 2594 static int 2595 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta) 2596 { 2597 int i; 2598 2599 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2600 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { 2601 2602 if (qlnx_config_mcast_mac_addr(ha, mta, 0)) 2603 return (-1); 2604 2605 ha->mcast[i].addr[0] = 0; 2606 ha->mcast[i].addr[1] = 0; 2607 ha->mcast[i].addr[2] = 0; 2608 ha->mcast[i].addr[3] = 0; 2609 ha->mcast[i].addr[4] = 0; 2610 ha->mcast[i].addr[5] = 0; 2611 2612 ha->nmcast--; 2613 2614 return 0; 2615 } 2616 } 2617 return 0; 2618 } 2619 2620 /* 2621 * Name: qls_hw_set_multi 2622 * Function: Sets the Multicast Addresses provided the host O.S into the 2623 * hardware (for the given interface) 2624 */ 2625 static void 2626 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 2627 uint32_t add_mac) 2628 { 2629 int i; 2630 2631 for (i = 0; i < mcnt; i++) { 2632 if (add_mac) { 2633 if (qlnx_hw_add_mcast(ha, mta)) 2634 break; 2635 } else { 2636 if (qlnx_hw_del_mcast(ha, mta)) 2637 break; 2638 } 2639 2640 mta += ETHER_HDR_LEN; 2641 } 2642 return; 2643 } 2644 2645 2646 static u_int 2647 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) 2648 { 2649 uint8_t *mta = arg; 2650 2651 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS) 2652 return (0); 2653 2654 bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN); 2655 2656 return (1); 2657 } 2658 2659 static int 2660 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi) 2661 { 2662 uint8_t mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN]; 2663 struct ifnet *ifp = ha->ifp; 2664 u_int mcnt; 2665 2666 if (qlnx_vf_device(ha) == 0) 2667 return (0); 2668 2669 mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta); 2670 2671 QLNX_LOCK(ha); 2672 qlnx_hw_set_multi(ha, mta, mcnt, add_multi); 2673 QLNX_UNLOCK(ha); 2674 2675 return (0); 2676 } 2677 2678 static int 2679 qlnx_set_promisc(qlnx_host_t *ha) 2680 { 2681 int rc = 0; 2682 uint8_t filter; 2683 2684 if (qlnx_vf_device(ha) == 0) 2685 return (0); 2686 2687 filter = ha->filter; 2688 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2689 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 2690 2691 rc = qlnx_set_rx_accept_filter(ha, filter); 2692 return (rc); 2693 } 2694 2695 static int 2696 qlnx_set_allmulti(qlnx_host_t *ha) 2697 { 2698 int rc = 0; 2699 uint8_t filter; 2700 2701 if (qlnx_vf_device(ha) == 0) 2702 return (0); 2703 2704 filter = ha->filter; 2705 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2706 rc = qlnx_set_rx_accept_filter(ha, filter); 2707 2708 return (rc); 2709 } 2710 2711 2712 static int 2713 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2714 { 2715 int ret = 0, mask; 2716 struct ifreq *ifr = (struct ifreq *)data; 2717 struct ifaddr *ifa = (struct ifaddr *)data; 2718 qlnx_host_t *ha; 2719 2720 ha = (qlnx_host_t *)ifp->if_softc; 2721 2722 switch (cmd) { 2723 case SIOCSIFADDR: 2724 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd); 2725 2726 if (ifa->ifa_addr->sa_family == AF_INET) { 2727 ifp->if_flags |= IFF_UP; 2728 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2729 QLNX_LOCK(ha); 2730 qlnx_init_locked(ha); 2731 QLNX_UNLOCK(ha); 2732 } 2733 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 2734 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)); 2735 2736 arp_ifinit(ifp, ifa); 2737 } else { 2738 ether_ioctl(ifp, cmd, data); 2739 } 2740 break; 2741 2742 case SIOCSIFMTU: 2743 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd); 2744 2745 if (ifr->ifr_mtu > QLNX_MAX_MTU) { 2746 ret = EINVAL; 2747 } else { 2748 QLNX_LOCK(ha); 2749 ifp->if_mtu = ifr->ifr_mtu; 2750 ha->max_frame_size = 2751 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2752 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2753 qlnx_init_locked(ha); 2754 } 2755 2756 QLNX_UNLOCK(ha); 2757 } 2758 2759 break; 2760 2761 case SIOCSIFFLAGS: 2762 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd); 2763 2764 QLNX_LOCK(ha); 2765 2766 if (ifp->if_flags & IFF_UP) { 2767 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2768 if ((ifp->if_flags ^ ha->if_flags) & 2769 IFF_PROMISC) { 2770 ret = qlnx_set_promisc(ha); 2771 } else if ((ifp->if_flags ^ ha->if_flags) & 2772 IFF_ALLMULTI) { 2773 ret = qlnx_set_allmulti(ha); 2774 } 2775 } else { 2776 ha->max_frame_size = ifp->if_mtu + 2777 ETHER_HDR_LEN + ETHER_CRC_LEN; 2778 qlnx_init_locked(ha); 2779 } 2780 } else { 2781 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2782 qlnx_stop(ha); 2783 ha->if_flags = ifp->if_flags; 2784 } 2785 2786 QLNX_UNLOCK(ha); 2787 break; 2788 2789 case SIOCADDMULTI: 2790 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd); 2791 2792 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2793 if (qlnx_set_multi(ha, 1)) 2794 ret = EINVAL; 2795 } 2796 break; 2797 2798 case SIOCDELMULTI: 2799 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd); 2800 2801 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2802 if (qlnx_set_multi(ha, 0)) 2803 ret = EINVAL; 2804 } 2805 break; 2806 2807 case SIOCSIFMEDIA: 2808 case SIOCGIFMEDIA: 2809 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd); 2810 2811 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 2812 break; 2813 2814 case SIOCSIFCAP: 2815 2816 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2817 2818 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd); 2819 2820 if (mask & IFCAP_HWCSUM) 2821 ifp->if_capenable ^= IFCAP_HWCSUM; 2822 if (mask & IFCAP_TSO4) 2823 ifp->if_capenable ^= IFCAP_TSO4; 2824 if (mask & IFCAP_TSO6) 2825 ifp->if_capenable ^= IFCAP_TSO6; 2826 if (mask & IFCAP_VLAN_HWTAGGING) 2827 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2828 if (mask & IFCAP_VLAN_HWTSO) 2829 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2830 if (mask & IFCAP_LRO) 2831 ifp->if_capenable ^= IFCAP_LRO; 2832 2833 QLNX_LOCK(ha); 2834 2835 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2836 qlnx_init_locked(ha); 2837 2838 QLNX_UNLOCK(ha); 2839 2840 VLAN_CAPABILITIES(ifp); 2841 break; 2842 2843 #if (__FreeBSD_version >= 1100101) 2844 2845 case SIOCGI2C: 2846 { 2847 struct ifi2creq i2c; 2848 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 2849 struct ecore_ptt *p_ptt; 2850 2851 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 2852 2853 if (ret) 2854 break; 2855 2856 if ((i2c.len > sizeof (i2c.data)) || 2857 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) { 2858 ret = EINVAL; 2859 break; 2860 } 2861 2862 p_ptt = ecore_ptt_acquire(p_hwfn); 2863 2864 if (!p_ptt) { 2865 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 2866 ret = -1; 2867 break; 2868 } 2869 2870 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt, 2871 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset, 2872 i2c.len, &i2c.data[0]); 2873 2874 ecore_ptt_release(p_hwfn, p_ptt); 2875 2876 if (ret) { 2877 ret = -1; 2878 break; 2879 } 2880 2881 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 2882 2883 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \ 2884 len = %d addr = 0x%02x offset = 0x%04x \ 2885 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \ 2886 0x%02x 0x%02x 0x%02x\n", 2887 ret, i2c.len, i2c.dev_addr, i2c.offset, 2888 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3], 2889 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]); 2890 break; 2891 } 2892 #endif /* #if (__FreeBSD_version >= 1100101) */ 2893 2894 default: 2895 QL_DPRINT4(ha, "default (0x%lx)\n", cmd); 2896 ret = ether_ioctl(ifp, cmd, data); 2897 break; 2898 } 2899 2900 return (ret); 2901 } 2902 2903 static int 2904 qlnx_media_change(struct ifnet *ifp) 2905 { 2906 qlnx_host_t *ha; 2907 struct ifmedia *ifm; 2908 int ret = 0; 2909 2910 ha = (qlnx_host_t *)ifp->if_softc; 2911 2912 QL_DPRINT2(ha, "enter\n"); 2913 2914 ifm = &ha->media; 2915 2916 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2917 ret = EINVAL; 2918 2919 QL_DPRINT2(ha, "exit\n"); 2920 2921 return (ret); 2922 } 2923 2924 static void 2925 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2926 { 2927 qlnx_host_t *ha; 2928 2929 ha = (qlnx_host_t *)ifp->if_softc; 2930 2931 QL_DPRINT2(ha, "enter\n"); 2932 2933 ifmr->ifm_status = IFM_AVALID; 2934 ifmr->ifm_active = IFM_ETHER; 2935 2936 if (ha->link_up) { 2937 ifmr->ifm_status |= IFM_ACTIVE; 2938 ifmr->ifm_active |= 2939 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link)); 2940 2941 if (ha->if_link.link_partner_caps & 2942 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause)) 2943 ifmr->ifm_active |= 2944 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 2945 } 2946 2947 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down")); 2948 2949 return; 2950 } 2951 2952 2953 static void 2954 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2955 struct qlnx_tx_queue *txq) 2956 { 2957 u16 idx; 2958 struct mbuf *mp; 2959 bus_dmamap_t map; 2960 int i; 2961 struct eth_tx_bd *tx_data_bd; 2962 struct eth_tx_1st_bd *first_bd; 2963 int nbds = 0; 2964 2965 idx = txq->sw_tx_cons; 2966 mp = txq->sw_tx_ring[idx].mp; 2967 map = txq->sw_tx_ring[idx].map; 2968 2969 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){ 2970 2971 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL); 2972 2973 QL_DPRINT1(ha, "(mp == NULL) " 2974 " tx_idx = 0x%x" 2975 " ecore_prod_idx = 0x%x" 2976 " ecore_cons_idx = 0x%x" 2977 " hw_bd_cons = 0x%x" 2978 " txq_db_last = 0x%x" 2979 " elem_left = 0x%x\n", 2980 fp->rss_id, 2981 ecore_chain_get_prod_idx(&txq->tx_pbl), 2982 ecore_chain_get_cons_idx(&txq->tx_pbl), 2983 le16toh(*txq->hw_cons_ptr), 2984 txq->tx_db.raw, 2985 ecore_chain_get_elem_left(&txq->tx_pbl)); 2986 2987 fp->err_tx_free_pkt_null++; 2988 2989 //DEBUG 2990 qlnx_trigger_dump(ha); 2991 2992 return; 2993 } else { 2994 2995 QLNX_INC_OPACKETS((ha->ifp)); 2996 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len)); 2997 2998 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE); 2999 bus_dmamap_unload(ha->tx_tag, map); 3000 3001 fp->tx_pkts_freed++; 3002 fp->tx_pkts_completed++; 3003 3004 m_freem(mp); 3005 } 3006 3007 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl); 3008 nbds = first_bd->data.nbds; 3009 3010 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0); 3011 3012 for (i = 1; i < nbds; i++) { 3013 tx_data_bd = ecore_chain_consume(&txq->tx_pbl); 3014 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0); 3015 } 3016 txq->sw_tx_ring[idx].flags = 0; 3017 txq->sw_tx_ring[idx].mp = NULL; 3018 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0; 3019 3020 return; 3021 } 3022 3023 static void 3024 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3025 struct qlnx_tx_queue *txq) 3026 { 3027 u16 hw_bd_cons; 3028 u16 ecore_cons_idx; 3029 uint16_t diff; 3030 uint16_t idx, idx2; 3031 3032 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 3033 3034 while (hw_bd_cons != 3035 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 3036 3037 if (hw_bd_cons < ecore_cons_idx) { 3038 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 3039 } else { 3040 diff = hw_bd_cons - ecore_cons_idx; 3041 } 3042 if ((diff > TX_RING_SIZE) || 3043 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){ 3044 3045 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF); 3046 3047 QL_DPRINT1(ha, "(diff = 0x%x) " 3048 " tx_idx = 0x%x" 3049 " ecore_prod_idx = 0x%x" 3050 " ecore_cons_idx = 0x%x" 3051 " hw_bd_cons = 0x%x" 3052 " txq_db_last = 0x%x" 3053 " elem_left = 0x%x\n", 3054 diff, 3055 fp->rss_id, 3056 ecore_chain_get_prod_idx(&txq->tx_pbl), 3057 ecore_chain_get_cons_idx(&txq->tx_pbl), 3058 le16toh(*txq->hw_cons_ptr), 3059 txq->tx_db.raw, 3060 ecore_chain_get_elem_left(&txq->tx_pbl)); 3061 3062 fp->err_tx_cons_idx_conflict++; 3063 3064 //DEBUG 3065 qlnx_trigger_dump(ha); 3066 } 3067 3068 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 3069 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1); 3070 prefetch(txq->sw_tx_ring[idx].mp); 3071 prefetch(txq->sw_tx_ring[idx2].mp); 3072 3073 qlnx_free_tx_pkt(ha, fp, txq); 3074 3075 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 3076 } 3077 return; 3078 } 3079 3080 static int 3081 qlnx_transmit_locked(struct ifnet *ifp,struct qlnx_fastpath *fp, struct mbuf *mp) 3082 { 3083 int ret = 0; 3084 struct qlnx_tx_queue *txq; 3085 qlnx_host_t * ha; 3086 uint16_t elem_left; 3087 3088 txq = fp->txq[0]; 3089 ha = (qlnx_host_t *)fp->edev; 3090 3091 3092 if ((!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || (!ha->link_up)) { 3093 if(mp != NULL) 3094 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3095 return (ret); 3096 } 3097 3098 if(mp != NULL) 3099 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3100 3101 mp = drbr_peek(ifp, fp->tx_br); 3102 3103 while (mp != NULL) { 3104 3105 if (qlnx_send(ha, fp, &mp)) { 3106 3107 if (mp != NULL) { 3108 drbr_putback(ifp, fp->tx_br, mp); 3109 } else { 3110 fp->tx_pkts_processed++; 3111 drbr_advance(ifp, fp->tx_br); 3112 } 3113 goto qlnx_transmit_locked_exit; 3114 3115 } else { 3116 drbr_advance(ifp, fp->tx_br); 3117 fp->tx_pkts_transmitted++; 3118 fp->tx_pkts_processed++; 3119 } 3120 3121 mp = drbr_peek(ifp, fp->tx_br); 3122 } 3123 3124 qlnx_transmit_locked_exit: 3125 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) || 3126 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) 3127 < QLNX_TX_ELEM_MAX_THRESH)) 3128 (void)qlnx_tx_int(ha, fp, fp->txq[0]); 3129 3130 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret); 3131 return ret; 3132 } 3133 3134 3135 static int 3136 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp) 3137 { 3138 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc; 3139 struct qlnx_fastpath *fp; 3140 int rss_id = 0, ret = 0; 3141 3142 #ifdef QLNX_TRACEPERF_DATA 3143 uint64_t tx_pkts = 0, tx_compl = 0; 3144 #endif 3145 3146 QL_DPRINT2(ha, "enter\n"); 3147 3148 #if __FreeBSD_version >= 1100000 3149 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 3150 #else 3151 if (mp->m_flags & M_FLOWID) 3152 #endif 3153 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) % 3154 ha->num_rss; 3155 3156 fp = &ha->fp_array[rss_id]; 3157 3158 if (fp->tx_br == NULL) { 3159 ret = EINVAL; 3160 goto qlnx_transmit_exit; 3161 } 3162 3163 if (mtx_trylock(&fp->tx_mtx)) { 3164 3165 #ifdef QLNX_TRACEPERF_DATA 3166 tx_pkts = fp->tx_pkts_transmitted; 3167 tx_compl = fp->tx_pkts_completed; 3168 #endif 3169 3170 ret = qlnx_transmit_locked(ifp, fp, mp); 3171 3172 #ifdef QLNX_TRACEPERF_DATA 3173 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts); 3174 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl); 3175 #endif 3176 mtx_unlock(&fp->tx_mtx); 3177 } else { 3178 if (mp != NULL && (fp->fp_taskqueue != NULL)) { 3179 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3180 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 3181 } 3182 } 3183 3184 qlnx_transmit_exit: 3185 3186 QL_DPRINT2(ha, "exit ret = %d\n", ret); 3187 return ret; 3188 } 3189 3190 static void 3191 qlnx_qflush(struct ifnet *ifp) 3192 { 3193 int rss_id; 3194 struct qlnx_fastpath *fp; 3195 struct mbuf *mp; 3196 qlnx_host_t *ha; 3197 3198 ha = (qlnx_host_t *)ifp->if_softc; 3199 3200 QL_DPRINT2(ha, "enter\n"); 3201 3202 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 3203 3204 fp = &ha->fp_array[rss_id]; 3205 3206 if (fp == NULL) 3207 continue; 3208 3209 if (fp->tx_br) { 3210 mtx_lock(&fp->tx_mtx); 3211 3212 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 3213 fp->tx_pkts_freed++; 3214 m_freem(mp); 3215 } 3216 mtx_unlock(&fp->tx_mtx); 3217 } 3218 } 3219 QL_DPRINT2(ha, "exit\n"); 3220 3221 return; 3222 } 3223 3224 static void 3225 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value) 3226 { 3227 struct ecore_dev *cdev; 3228 uint32_t offset; 3229 3230 cdev = &ha->cdev; 3231 3232 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells); 3233 3234 bus_write_4(ha->pci_dbells, offset, value); 3235 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ); 3236 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ); 3237 3238 return; 3239 } 3240 3241 static uint32_t 3242 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp) 3243 { 3244 struct ether_vlan_header *eh = NULL; 3245 struct ip *ip = NULL; 3246 struct ip6_hdr *ip6 = NULL; 3247 struct tcphdr *th = NULL; 3248 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0; 3249 uint16_t etype = 0; 3250 device_t dev; 3251 uint8_t buf[sizeof(struct ip6_hdr)]; 3252 3253 dev = ha->pci_dev; 3254 3255 eh = mtod(mp, struct ether_vlan_header *); 3256 3257 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 3258 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3259 etype = ntohs(eh->evl_proto); 3260 } else { 3261 ehdrlen = ETHER_HDR_LEN; 3262 etype = ntohs(eh->evl_encap_proto); 3263 } 3264 3265 switch (etype) { 3266 3267 case ETHERTYPE_IP: 3268 ip = (struct ip *)(mp->m_data + ehdrlen); 3269 3270 ip_hlen = sizeof (struct ip); 3271 3272 if (mp->m_len < (ehdrlen + ip_hlen)) { 3273 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 3274 ip = (struct ip *)buf; 3275 } 3276 3277 th = (struct tcphdr *)(ip + 1); 3278 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3279 break; 3280 3281 case ETHERTYPE_IPV6: 3282 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 3283 3284 ip_hlen = sizeof(struct ip6_hdr); 3285 3286 if (mp->m_len < (ehdrlen + ip_hlen)) { 3287 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 3288 buf); 3289 ip6 = (struct ip6_hdr *)buf; 3290 } 3291 th = (struct tcphdr *)(ip6 + 1); 3292 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3293 break; 3294 3295 default: 3296 break; 3297 } 3298 3299 return (offset); 3300 } 3301 3302 static __inline int 3303 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs, 3304 uint32_t offset) 3305 { 3306 int i; 3307 uint32_t sum, nbds_in_hdr = 1; 3308 uint32_t window; 3309 bus_dma_segment_t *s_seg; 3310 3311 /* If the header spans mulitple segments, skip those segments */ 3312 3313 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM) 3314 return (0); 3315 3316 i = 0; 3317 3318 while ((i < nsegs) && (offset >= segs->ds_len)) { 3319 offset = offset - segs->ds_len; 3320 segs++; 3321 i++; 3322 nbds_in_hdr++; 3323 } 3324 3325 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr; 3326 3327 nsegs = nsegs - i; 3328 3329 while (nsegs >= window) { 3330 3331 sum = 0; 3332 s_seg = segs; 3333 3334 for (i = 0; i < window; i++){ 3335 sum += s_seg->ds_len; 3336 s_seg++; 3337 } 3338 3339 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) { 3340 fp->tx_lso_wnd_min_len++; 3341 return (-1); 3342 } 3343 3344 nsegs = nsegs - 1; 3345 segs++; 3346 } 3347 3348 return (0); 3349 } 3350 3351 static int 3352 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp) 3353 { 3354 bus_dma_segment_t *segs; 3355 bus_dmamap_t map = 0; 3356 uint32_t nsegs = 0; 3357 int ret = -1; 3358 struct mbuf *m_head = *m_headp; 3359 uint16_t idx = 0; 3360 uint16_t elem_left; 3361 3362 uint8_t nbd = 0; 3363 struct qlnx_tx_queue *txq; 3364 3365 struct eth_tx_1st_bd *first_bd; 3366 struct eth_tx_2nd_bd *second_bd; 3367 struct eth_tx_3rd_bd *third_bd; 3368 struct eth_tx_bd *tx_data_bd; 3369 3370 int seg_idx = 0; 3371 uint32_t nbds_in_hdr = 0; 3372 uint32_t offset = 0; 3373 3374 #ifdef QLNX_TRACE_PERF_DATA 3375 uint16_t bd_used; 3376 #endif 3377 3378 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id); 3379 3380 if (!ha->link_up) 3381 return (-1); 3382 3383 first_bd = NULL; 3384 second_bd = NULL; 3385 third_bd = NULL; 3386 tx_data_bd = NULL; 3387 3388 txq = fp->txq[0]; 3389 3390 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) < 3391 QLNX_TX_ELEM_MIN_THRESH) { 3392 3393 fp->tx_nsegs_gt_elem_left++; 3394 fp->err_tx_nsegs_gt_elem_left++; 3395 3396 return (ENOBUFS); 3397 } 3398 3399 idx = txq->sw_tx_prod; 3400 3401 map = txq->sw_tx_ring[idx].map; 3402 segs = txq->segs; 3403 3404 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 3405 BUS_DMA_NOWAIT); 3406 3407 if (ha->dbg_trace_tso_pkt_len) { 3408 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3409 if (!fp->tx_tso_min_pkt_len) { 3410 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3411 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3412 } else { 3413 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len) 3414 fp->tx_tso_min_pkt_len = 3415 m_head->m_pkthdr.len; 3416 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len) 3417 fp->tx_tso_max_pkt_len = 3418 m_head->m_pkthdr.len; 3419 } 3420 } 3421 } 3422 3423 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3424 offset = qlnx_tcp_offset(ha, m_head); 3425 3426 if ((ret == EFBIG) || 3427 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && ( 3428 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) || 3429 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) && 3430 qlnx_tso_check(fp, segs, nsegs, offset))))) { 3431 3432 struct mbuf *m; 3433 3434 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len); 3435 3436 fp->tx_defrag++; 3437 3438 m = m_defrag(m_head, M_NOWAIT); 3439 if (m == NULL) { 3440 fp->err_tx_defrag++; 3441 fp->tx_pkts_freed++; 3442 m_freem(m_head); 3443 *m_headp = NULL; 3444 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret); 3445 return (ENOBUFS); 3446 } 3447 3448 m_head = m; 3449 *m_headp = m_head; 3450 3451 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 3452 segs, &nsegs, BUS_DMA_NOWAIT))) { 3453 3454 fp->err_tx_defrag_dmamap_load++; 3455 3456 QL_DPRINT1(ha, 3457 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n", 3458 ret, m_head->m_pkthdr.len); 3459 3460 fp->tx_pkts_freed++; 3461 m_freem(m_head); 3462 *m_headp = NULL; 3463 3464 return (ret); 3465 } 3466 3467 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && 3468 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) { 3469 3470 fp->err_tx_non_tso_max_seg++; 3471 3472 QL_DPRINT1(ha, 3473 "(%d) nsegs too many for non-TSO [%d, %d]\n", 3474 ret, nsegs, m_head->m_pkthdr.len); 3475 3476 fp->tx_pkts_freed++; 3477 m_freem(m_head); 3478 *m_headp = NULL; 3479 3480 return (ret); 3481 } 3482 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3483 offset = qlnx_tcp_offset(ha, m_head); 3484 3485 } else if (ret) { 3486 3487 fp->err_tx_dmamap_load++; 3488 3489 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n", 3490 ret, m_head->m_pkthdr.len); 3491 fp->tx_pkts_freed++; 3492 m_freem(m_head); 3493 *m_headp = NULL; 3494 return (ret); 3495 } 3496 3497 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet")); 3498 3499 if (ha->dbg_trace_tso_pkt_len) { 3500 if (nsegs < QLNX_FP_MAX_SEGS) 3501 fp->tx_pkts[(nsegs - 1)]++; 3502 else 3503 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; 3504 } 3505 3506 #ifdef QLNX_TRACE_PERF_DATA 3507 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3508 if(m_head->m_pkthdr.len <= 2048) 3509 fp->tx_pkts_hist[0]++; 3510 else if((m_head->m_pkthdr.len > 2048) && 3511 (m_head->m_pkthdr.len <= 4096)) 3512 fp->tx_pkts_hist[1]++; 3513 else if((m_head->m_pkthdr.len > 4096) && 3514 (m_head->m_pkthdr.len <= 8192)) 3515 fp->tx_pkts_hist[2]++; 3516 else if((m_head->m_pkthdr.len > 8192) && 3517 (m_head->m_pkthdr.len <= 12288 )) 3518 fp->tx_pkts_hist[3]++; 3519 else if((m_head->m_pkthdr.len > 11288) && 3520 (m_head->m_pkthdr.len <= 16394)) 3521 fp->tx_pkts_hist[4]++; 3522 else if((m_head->m_pkthdr.len > 16384) && 3523 (m_head->m_pkthdr.len <= 20480)) 3524 fp->tx_pkts_hist[5]++; 3525 else if((m_head->m_pkthdr.len > 20480) && 3526 (m_head->m_pkthdr.len <= 24576)) 3527 fp->tx_pkts_hist[6]++; 3528 else if((m_head->m_pkthdr.len > 24576) && 3529 (m_head->m_pkthdr.len <= 28672)) 3530 fp->tx_pkts_hist[7]++; 3531 else if((m_head->m_pkthdr.len > 28762) && 3532 (m_head->m_pkthdr.len <= 32768)) 3533 fp->tx_pkts_hist[8]++; 3534 else if((m_head->m_pkthdr.len > 32768) && 3535 (m_head->m_pkthdr.len <= 36864)) 3536 fp->tx_pkts_hist[9]++; 3537 else if((m_head->m_pkthdr.len > 36864) && 3538 (m_head->m_pkthdr.len <= 40960)) 3539 fp->tx_pkts_hist[10]++; 3540 else if((m_head->m_pkthdr.len > 40960) && 3541 (m_head->m_pkthdr.len <= 45056)) 3542 fp->tx_pkts_hist[11]++; 3543 else if((m_head->m_pkthdr.len > 45056) && 3544 (m_head->m_pkthdr.len <= 49152)) 3545 fp->tx_pkts_hist[12]++; 3546 else if((m_head->m_pkthdr.len > 49512) && 3547 m_head->m_pkthdr.len <= 53248)) 3548 fp->tx_pkts_hist[13]++; 3549 else if((m_head->m_pkthdr.len > 53248) && 3550 (m_head->m_pkthdr.len <= 57344)) 3551 fp->tx_pkts_hist[14]++; 3552 else if((m_head->m_pkthdr.len > 53248) && 3553 (m_head->m_pkthdr.len <= 57344)) 3554 fp->tx_pkts_hist[15]++; 3555 else if((m_head->m_pkthdr.len > 57344) && 3556 (m_head->m_pkthdr.len <= 61440)) 3557 fp->tx_pkts_hist[16]++; 3558 else 3559 fp->tx_pkts_hist[17]++; 3560 } 3561 3562 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3563 3564 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl); 3565 bd_used = TX_RING_SIZE - elem_left; 3566 3567 if(bd_used <= 100) 3568 fp->tx_pkts_q[0]++; 3569 else if((bd_used > 100) && (bd_used <= 500)) 3570 fp->tx_pkts_q[1]++; 3571 else if((bd_used > 500) && (bd_used <= 1000)) 3572 fp->tx_pkts_q[2]++; 3573 else if((bd_used > 1000) && (bd_used <= 2000)) 3574 fp->tx_pkts_q[3]++; 3575 else if((bd_used > 3000) && (bd_used <= 4000)) 3576 fp->tx_pkts_q[4]++; 3577 else if((bd_used > 4000) && (bd_used <= 5000)) 3578 fp->tx_pkts_q[5]++; 3579 else if((bd_used > 6000) && (bd_used <= 7000)) 3580 fp->tx_pkts_q[6]++; 3581 else if((bd_used > 7000) && (bd_used <= 8000)) 3582 fp->tx_pkts_q[7]++; 3583 else if((bd_used > 8000) && (bd_used <= 9000)) 3584 fp->tx_pkts_q[8]++; 3585 else if((bd_used > 9000) && (bd_used <= 10000)) 3586 fp->tx_pkts_q[9]++; 3587 else if((bd_used > 10000) && (bd_used <= 11000)) 3588 fp->tx_pkts_q[10]++; 3589 else if((bd_used > 11000) && (bd_used <= 12000)) 3590 fp->tx_pkts_q[11]++; 3591 else if((bd_used > 12000) && (bd_used <= 13000)) 3592 fp->tx_pkts_q[12]++; 3593 else if((bd_used > 13000) && (bd_used <= 14000)) 3594 fp->tx_pkts_q[13]++; 3595 else if((bd_used > 14000) && (bd_used <= 15000)) 3596 fp->tx_pkts_q[14]++; 3597 else if((bd_used > 15000) && (bd_used <= 16000)) 3598 fp->tx_pkts_q[15]++; 3599 else 3600 fp->tx_pkts_q[16]++; 3601 } 3602 3603 #endif /* end of QLNX_TRACE_PERF_DATA */ 3604 3605 if ((nsegs + QLNX_TX_ELEM_RESERVE) > 3606 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) { 3607 3608 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs" 3609 " in chain[%d] trying to free packets\n", 3610 nsegs, elem_left, fp->rss_id); 3611 3612 fp->tx_nsegs_gt_elem_left++; 3613 3614 (void)qlnx_tx_int(ha, fp, txq); 3615 3616 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left = 3617 ecore_chain_get_elem_left(&txq->tx_pbl))) { 3618 3619 QL_DPRINT1(ha, 3620 "(%d, 0x%x) insuffient BDs in chain[%d]\n", 3621 nsegs, elem_left, fp->rss_id); 3622 3623 fp->err_tx_nsegs_gt_elem_left++; 3624 fp->tx_ring_full = 1; 3625 if (ha->storm_stats_enable) 3626 ha->storm_stats_gather = 1; 3627 return (ENOBUFS); 3628 } 3629 } 3630 3631 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 3632 3633 txq->sw_tx_ring[idx].mp = m_head; 3634 3635 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); 3636 3637 memset(first_bd, 0, sizeof(*first_bd)); 3638 3639 first_bd->data.bd_flags.bitfields = 3640 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 3641 3642 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len); 3643 3644 nbd++; 3645 3646 if (m_head->m_pkthdr.csum_flags & CSUM_IP) { 3647 first_bd->data.bd_flags.bitfields |= 3648 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3649 } 3650 3651 if (m_head->m_pkthdr.csum_flags & 3652 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) { 3653 first_bd->data.bd_flags.bitfields |= 3654 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT); 3655 } 3656 3657 if (m_head->m_flags & M_VLANTAG) { 3658 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag; 3659 first_bd->data.bd_flags.bitfields |= 3660 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT); 3661 } 3662 3663 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3664 3665 first_bd->data.bd_flags.bitfields |= 3666 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); 3667 first_bd->data.bd_flags.bitfields |= 3668 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3669 3670 nbds_in_hdr = 1; 3671 3672 if (offset == segs->ds_len) { 3673 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3674 segs++; 3675 seg_idx++; 3676 3677 second_bd = (struct eth_tx_2nd_bd *) 3678 ecore_chain_produce(&txq->tx_pbl); 3679 memset(second_bd, 0, sizeof(*second_bd)); 3680 nbd++; 3681 3682 if (seg_idx < nsegs) { 3683 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3684 (segs->ds_addr), (segs->ds_len)); 3685 segs++; 3686 seg_idx++; 3687 } 3688 3689 third_bd = (struct eth_tx_3rd_bd *) 3690 ecore_chain_produce(&txq->tx_pbl); 3691 memset(third_bd, 0, sizeof(*third_bd)); 3692 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3693 third_bd->data.bitfields |= 3694 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3695 nbd++; 3696 3697 if (seg_idx < nsegs) { 3698 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3699 (segs->ds_addr), (segs->ds_len)); 3700 segs++; 3701 seg_idx++; 3702 } 3703 3704 for (; seg_idx < nsegs; seg_idx++) { 3705 tx_data_bd = (struct eth_tx_bd *) 3706 ecore_chain_produce(&txq->tx_pbl); 3707 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3708 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3709 segs->ds_addr,\ 3710 segs->ds_len); 3711 segs++; 3712 nbd++; 3713 } 3714 3715 } else if (offset < segs->ds_len) { 3716 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3717 3718 second_bd = (struct eth_tx_2nd_bd *) 3719 ecore_chain_produce(&txq->tx_pbl); 3720 memset(second_bd, 0, sizeof(*second_bd)); 3721 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3722 (segs->ds_addr + offset),\ 3723 (segs->ds_len - offset)); 3724 nbd++; 3725 segs++; 3726 3727 third_bd = (struct eth_tx_3rd_bd *) 3728 ecore_chain_produce(&txq->tx_pbl); 3729 memset(third_bd, 0, sizeof(*third_bd)); 3730 3731 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3732 segs->ds_addr,\ 3733 segs->ds_len); 3734 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3735 third_bd->data.bitfields |= 3736 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3737 segs++; 3738 nbd++; 3739 3740 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) { 3741 tx_data_bd = (struct eth_tx_bd *) 3742 ecore_chain_produce(&txq->tx_pbl); 3743 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3744 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3745 segs->ds_addr,\ 3746 segs->ds_len); 3747 segs++; 3748 nbd++; 3749 } 3750 3751 } else { 3752 offset = offset - segs->ds_len; 3753 segs++; 3754 3755 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3756 3757 if (offset) 3758 nbds_in_hdr++; 3759 3760 tx_data_bd = (struct eth_tx_bd *) 3761 ecore_chain_produce(&txq->tx_pbl); 3762 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3763 3764 if (second_bd == NULL) { 3765 second_bd = (struct eth_tx_2nd_bd *) 3766 tx_data_bd; 3767 } else if (third_bd == NULL) { 3768 third_bd = (struct eth_tx_3rd_bd *) 3769 tx_data_bd; 3770 } 3771 3772 if (offset && (offset < segs->ds_len)) { 3773 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3774 segs->ds_addr, offset); 3775 3776 tx_data_bd = (struct eth_tx_bd *) 3777 ecore_chain_produce(&txq->tx_pbl); 3778 3779 memset(tx_data_bd, 0, 3780 sizeof(*tx_data_bd)); 3781 3782 if (second_bd == NULL) { 3783 second_bd = 3784 (struct eth_tx_2nd_bd *)tx_data_bd; 3785 } else if (third_bd == NULL) { 3786 third_bd = 3787 (struct eth_tx_3rd_bd *)tx_data_bd; 3788 } 3789 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3790 (segs->ds_addr + offset), \ 3791 (segs->ds_len - offset)); 3792 nbd++; 3793 offset = 0; 3794 } else { 3795 if (offset) 3796 offset = offset - segs->ds_len; 3797 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3798 segs->ds_addr, segs->ds_len); 3799 } 3800 segs++; 3801 nbd++; 3802 } 3803 3804 if (third_bd == NULL) { 3805 third_bd = (struct eth_tx_3rd_bd *) 3806 ecore_chain_produce(&txq->tx_pbl); 3807 memset(third_bd, 0, sizeof(*third_bd)); 3808 } 3809 3810 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3811 third_bd->data.bitfields |= 3812 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3813 } 3814 fp->tx_tso_pkts++; 3815 } else { 3816 segs++; 3817 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3818 tx_data_bd = (struct eth_tx_bd *) 3819 ecore_chain_produce(&txq->tx_pbl); 3820 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3821 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\ 3822 segs->ds_len); 3823 segs++; 3824 nbd++; 3825 } 3826 first_bd->data.bitfields = 3827 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) 3828 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 3829 first_bd->data.bitfields = 3830 htole16(first_bd->data.bitfields); 3831 fp->tx_non_tso_pkts++; 3832 } 3833 3834 3835 first_bd->data.nbds = nbd; 3836 3837 if (ha->dbg_trace_tso_pkt_len) { 3838 if (fp->tx_tso_max_nsegs < nsegs) 3839 fp->tx_tso_max_nsegs = nsegs; 3840 3841 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs)) 3842 fp->tx_tso_min_nsegs = nsegs; 3843 } 3844 3845 txq->sw_tx_ring[idx].nsegs = nsegs; 3846 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1); 3847 3848 txq->tx_db.data.bd_prod = 3849 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl)); 3850 3851 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw); 3852 3853 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id); 3854 return (0); 3855 } 3856 3857 static void 3858 qlnx_stop(qlnx_host_t *ha) 3859 { 3860 struct ifnet *ifp = ha->ifp; 3861 device_t dev; 3862 int i; 3863 3864 dev = ha->pci_dev; 3865 3866 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 3867 3868 /* 3869 * We simply lock and unlock each fp->tx_mtx to 3870 * propagate the if_drv_flags 3871 * state to each tx thread 3872 */ 3873 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state); 3874 3875 if (ha->state == QLNX_STATE_OPEN) { 3876 for (i = 0; i < ha->num_rss; i++) { 3877 struct qlnx_fastpath *fp = &ha->fp_array[i]; 3878 3879 mtx_lock(&fp->tx_mtx); 3880 mtx_unlock(&fp->tx_mtx); 3881 3882 if (fp->fp_taskqueue != NULL) 3883 taskqueue_enqueue(fp->fp_taskqueue, 3884 &fp->fp_task); 3885 } 3886 } 3887 #ifdef QLNX_ENABLE_IWARP 3888 if (qlnx_vf_device(ha) != 0) { 3889 qlnx_rdma_dev_close(ha); 3890 } 3891 #endif /* #ifdef QLNX_ENABLE_IWARP */ 3892 3893 qlnx_unload(ha); 3894 3895 return; 3896 } 3897 3898 static int 3899 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha) 3900 { 3901 return(TX_RING_SIZE - 1); 3902 } 3903 3904 uint8_t * 3905 qlnx_get_mac_addr(qlnx_host_t *ha) 3906 { 3907 struct ecore_hwfn *p_hwfn; 3908 unsigned char mac[ETHER_ADDR_LEN]; 3909 uint8_t p_is_forced; 3910 3911 p_hwfn = &ha->cdev.hwfns[0]; 3912 3913 if (qlnx_vf_device(ha) != 0) 3914 return (p_hwfn->hw_info.hw_mac_addr); 3915 3916 ecore_vf_read_bulletin(p_hwfn, &p_is_forced); 3917 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) == 3918 true) { 3919 device_printf(ha->pci_dev, "%s: p_is_forced = %d" 3920 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, 3921 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3922 memcpy(ha->primary_mac, mac, ETH_ALEN); 3923 } 3924 3925 return (ha->primary_mac); 3926 } 3927 3928 static uint32_t 3929 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link) 3930 { 3931 uint32_t ifm_type = 0; 3932 3933 switch (if_link->media_type) { 3934 3935 case MEDIA_MODULE_FIBER: 3936 case MEDIA_UNSPECIFIED: 3937 if (if_link->speed == (100 * 1000)) 3938 ifm_type = QLNX_IFM_100G_SR4; 3939 else if (if_link->speed == (40 * 1000)) 3940 ifm_type = IFM_40G_SR4; 3941 else if (if_link->speed == (25 * 1000)) 3942 ifm_type = QLNX_IFM_25G_SR; 3943 else if (if_link->speed == (10 * 1000)) 3944 ifm_type = (IFM_10G_LR | IFM_10G_SR); 3945 else if (if_link->speed == (1 * 1000)) 3946 ifm_type = (IFM_1000_SX | IFM_1000_LX); 3947 3948 break; 3949 3950 case MEDIA_DA_TWINAX: 3951 if (if_link->speed == (100 * 1000)) 3952 ifm_type = QLNX_IFM_100G_CR4; 3953 else if (if_link->speed == (40 * 1000)) 3954 ifm_type = IFM_40G_CR4; 3955 else if (if_link->speed == (25 * 1000)) 3956 ifm_type = QLNX_IFM_25G_CR; 3957 else if (if_link->speed == (10 * 1000)) 3958 ifm_type = IFM_10G_TWINAX; 3959 3960 break; 3961 3962 default : 3963 ifm_type = IFM_UNKNOWN; 3964 break; 3965 } 3966 return (ifm_type); 3967 } 3968 3969 3970 3971 /***************************************************************************** 3972 * Interrupt Service Functions 3973 *****************************************************************************/ 3974 3975 static int 3976 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3977 struct mbuf *mp_head, uint16_t len) 3978 { 3979 struct mbuf *mp, *mpf, *mpl; 3980 struct sw_rx_data *sw_rx_data; 3981 struct qlnx_rx_queue *rxq; 3982 uint16_t len_in_buffer; 3983 3984 rxq = fp->rxq; 3985 mpf = mpl = mp = NULL; 3986 3987 while (len) { 3988 3989 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3990 3991 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3992 mp = sw_rx_data->data; 3993 3994 if (mp == NULL) { 3995 QL_DPRINT1(ha, "mp = NULL\n"); 3996 fp->err_rx_mp_null++; 3997 rxq->sw_rx_cons = 3998 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3999 4000 if (mpf != NULL) 4001 m_freem(mpf); 4002 4003 return (-1); 4004 } 4005 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4006 BUS_DMASYNC_POSTREAD); 4007 4008 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4009 4010 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 4011 " incoming packet and reusing its buffer\n"); 4012 4013 qlnx_reuse_rx_data(rxq); 4014 fp->err_rx_alloc_errors++; 4015 4016 if (mpf != NULL) 4017 m_freem(mpf); 4018 4019 return (-1); 4020 } 4021 ecore_chain_consume(&rxq->rx_bd_ring); 4022 4023 if (len > rxq->rx_buf_size) 4024 len_in_buffer = rxq->rx_buf_size; 4025 else 4026 len_in_buffer = len; 4027 4028 len = len - len_in_buffer; 4029 4030 mp->m_flags &= ~M_PKTHDR; 4031 mp->m_next = NULL; 4032 mp->m_len = len_in_buffer; 4033 4034 if (mpf == NULL) 4035 mpf = mpl = mp; 4036 else { 4037 mpl->m_next = mp; 4038 mpl = mp; 4039 } 4040 } 4041 4042 if (mpf != NULL) 4043 mp_head->m_next = mpf; 4044 4045 return (0); 4046 } 4047 4048 static void 4049 qlnx_tpa_start(qlnx_host_t *ha, 4050 struct qlnx_fastpath *fp, 4051 struct qlnx_rx_queue *rxq, 4052 struct eth_fast_path_rx_tpa_start_cqe *cqe) 4053 { 4054 uint32_t agg_index; 4055 struct ifnet *ifp = ha->ifp; 4056 struct mbuf *mp; 4057 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4058 struct sw_rx_data *sw_rx_data; 4059 dma_addr_t addr; 4060 bus_dmamap_t map; 4061 struct eth_rx_bd *rx_bd; 4062 int i; 4063 device_t dev; 4064 #if __FreeBSD_version >= 1100000 4065 uint8_t hash_type; 4066 #endif /* #if __FreeBSD_version >= 1100000 */ 4067 4068 dev = ha->pci_dev; 4069 agg_index = cqe->tpa_agg_index; 4070 4071 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \ 4072 \t type = 0x%x\n \ 4073 \t bitfields = 0x%x\n \ 4074 \t seg_len = 0x%x\n \ 4075 \t pars_flags = 0x%x\n \ 4076 \t vlan_tag = 0x%x\n \ 4077 \t rss_hash = 0x%x\n \ 4078 \t len_on_first_bd = 0x%x\n \ 4079 \t placement_offset = 0x%x\n \ 4080 \t tpa_agg_index = 0x%x\n \ 4081 \t header_len = 0x%x\n \ 4082 \t ext_bd_len_list[0] = 0x%x\n \ 4083 \t ext_bd_len_list[1] = 0x%x\n \ 4084 \t ext_bd_len_list[2] = 0x%x\n \ 4085 \t ext_bd_len_list[3] = 0x%x\n \ 4086 \t ext_bd_len_list[4] = 0x%x\n", 4087 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len, 4088 cqe->pars_flags.flags, cqe->vlan_tag, 4089 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset, 4090 cqe->tpa_agg_index, cqe->header_len, 4091 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1], 4092 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3], 4093 cqe->ext_bd_len_list[4]); 4094 4095 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4096 fp->err_rx_tpa_invalid_agg_num++; 4097 return; 4098 } 4099 4100 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4101 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); 4102 mp = sw_rx_data->data; 4103 4104 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp); 4105 4106 if (mp == NULL) { 4107 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id); 4108 fp->err_rx_mp_null++; 4109 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4110 4111 return; 4112 } 4113 4114 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) { 4115 4116 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error," 4117 " flags = %x, dropping incoming packet\n", fp->rss_id, 4118 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags)); 4119 4120 fp->err_rx_hw_errors++; 4121 4122 qlnx_reuse_rx_data(rxq); 4123 4124 QLNX_INC_IERRORS(ifp); 4125 4126 return; 4127 } 4128 4129 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4130 4131 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4132 " dropping incoming packet and reusing its buffer\n", 4133 fp->rss_id); 4134 4135 fp->err_rx_alloc_errors++; 4136 QLNX_INC_IQDROPS(ifp); 4137 4138 /* 4139 * Load the tpa mbuf into the rx ring and save the 4140 * posted mbuf 4141 */ 4142 4143 map = sw_rx_data->map; 4144 addr = sw_rx_data->dma_addr; 4145 4146 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 4147 4148 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data; 4149 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr; 4150 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map; 4151 4152 rxq->tpa_info[agg_index].rx_buf.data = mp; 4153 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr; 4154 rxq->tpa_info[agg_index].rx_buf.map = map; 4155 4156 rx_bd = (struct eth_rx_bd *) 4157 ecore_chain_produce(&rxq->rx_bd_ring); 4158 4159 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr)); 4160 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr)); 4161 4162 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4163 BUS_DMASYNC_PREREAD); 4164 4165 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 4166 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4167 4168 ecore_chain_consume(&rxq->rx_bd_ring); 4169 4170 /* Now reuse any buffers posted in ext_bd_len_list */ 4171 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4172 4173 if (cqe->ext_bd_len_list[i] == 0) 4174 break; 4175 4176 qlnx_reuse_rx_data(rxq); 4177 } 4178 4179 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4180 return; 4181 } 4182 4183 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4184 4185 QL_DPRINT7(ha, "[%d]: invalid aggregation state," 4186 " dropping incoming packet and reusing its buffer\n", 4187 fp->rss_id); 4188 4189 QLNX_INC_IQDROPS(ifp); 4190 4191 /* if we already have mbuf head in aggregation free it */ 4192 if (rxq->tpa_info[agg_index].mpf) { 4193 m_freem(rxq->tpa_info[agg_index].mpf); 4194 rxq->tpa_info[agg_index].mpl = NULL; 4195 } 4196 rxq->tpa_info[agg_index].mpf = mp; 4197 rxq->tpa_info[agg_index].mpl = NULL; 4198 4199 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4200 ecore_chain_consume(&rxq->rx_bd_ring); 4201 4202 /* Now reuse any buffers posted in ext_bd_len_list */ 4203 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4204 4205 if (cqe->ext_bd_len_list[i] == 0) 4206 break; 4207 4208 qlnx_reuse_rx_data(rxq); 4209 } 4210 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4211 4212 return; 4213 } 4214 4215 /* 4216 * first process the ext_bd_len_list 4217 * if this fails then we simply drop the packet 4218 */ 4219 ecore_chain_consume(&rxq->rx_bd_ring); 4220 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4221 4222 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4223 4224 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id); 4225 4226 if (cqe->ext_bd_len_list[i] == 0) 4227 break; 4228 4229 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4230 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4231 BUS_DMASYNC_POSTREAD); 4232 4233 mpc = sw_rx_data->data; 4234 4235 if (mpc == NULL) { 4236 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4237 fp->err_rx_mp_null++; 4238 if (mpf != NULL) 4239 m_freem(mpf); 4240 mpf = mpl = NULL; 4241 rxq->tpa_info[agg_index].agg_state = 4242 QLNX_AGG_STATE_ERROR; 4243 ecore_chain_consume(&rxq->rx_bd_ring); 4244 rxq->sw_rx_cons = 4245 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4246 continue; 4247 } 4248 4249 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4250 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4251 " dropping incoming packet and reusing its" 4252 " buffer\n", fp->rss_id); 4253 4254 qlnx_reuse_rx_data(rxq); 4255 4256 if (mpf != NULL) 4257 m_freem(mpf); 4258 mpf = mpl = NULL; 4259 4260 rxq->tpa_info[agg_index].agg_state = 4261 QLNX_AGG_STATE_ERROR; 4262 4263 ecore_chain_consume(&rxq->rx_bd_ring); 4264 rxq->sw_rx_cons = 4265 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4266 4267 continue; 4268 } 4269 4270 mpc->m_flags &= ~M_PKTHDR; 4271 mpc->m_next = NULL; 4272 mpc->m_len = cqe->ext_bd_len_list[i]; 4273 4274 4275 if (mpf == NULL) { 4276 mpf = mpl = mpc; 4277 } else { 4278 mpl->m_len = ha->rx_buf_size; 4279 mpl->m_next = mpc; 4280 mpl = mpc; 4281 } 4282 4283 ecore_chain_consume(&rxq->rx_bd_ring); 4284 rxq->sw_rx_cons = 4285 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4286 } 4287 4288 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4289 4290 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping" 4291 " incoming packet and reusing its buffer\n", 4292 fp->rss_id); 4293 4294 QLNX_INC_IQDROPS(ifp); 4295 4296 rxq->tpa_info[agg_index].mpf = mp; 4297 rxq->tpa_info[agg_index].mpl = NULL; 4298 4299 return; 4300 } 4301 4302 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset; 4303 4304 if (mpf != NULL) { 4305 mp->m_len = ha->rx_buf_size; 4306 mp->m_next = mpf; 4307 rxq->tpa_info[agg_index].mpf = mp; 4308 rxq->tpa_info[agg_index].mpl = mpl; 4309 } else { 4310 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset; 4311 rxq->tpa_info[agg_index].mpf = mp; 4312 rxq->tpa_info[agg_index].mpl = mp; 4313 mp->m_next = NULL; 4314 } 4315 4316 mp->m_flags |= M_PKTHDR; 4317 4318 /* assign packet to this interface interface */ 4319 mp->m_pkthdr.rcvif = ifp; 4320 4321 /* assume no hardware checksum has complated */ 4322 mp->m_pkthdr.csum_flags = 0; 4323 4324 //mp->m_pkthdr.flowid = fp->rss_id; 4325 mp->m_pkthdr.flowid = cqe->rss_hash; 4326 4327 #if __FreeBSD_version >= 1100000 4328 4329 hash_type = cqe->bitfields & 4330 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4331 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4332 4333 switch (hash_type) { 4334 4335 case RSS_HASH_TYPE_IPV4: 4336 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4337 break; 4338 4339 case RSS_HASH_TYPE_TCP_IPV4: 4340 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4341 break; 4342 4343 case RSS_HASH_TYPE_IPV6: 4344 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4345 break; 4346 4347 case RSS_HASH_TYPE_TCP_IPV6: 4348 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4349 break; 4350 4351 default: 4352 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4353 break; 4354 } 4355 4356 #else 4357 mp->m_flags |= M_FLOWID; 4358 #endif 4359 4360 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | 4361 CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4362 4363 mp->m_pkthdr.csum_data = 0xFFFF; 4364 4365 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) { 4366 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag); 4367 mp->m_flags |= M_VLANTAG; 4368 } 4369 4370 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START; 4371 4372 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n", 4373 fp->rss_id, rxq->tpa_info[agg_index].agg_state, 4374 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl); 4375 4376 return; 4377 } 4378 4379 static void 4380 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4381 struct qlnx_rx_queue *rxq, 4382 struct eth_fast_path_rx_tpa_cont_cqe *cqe) 4383 { 4384 struct sw_rx_data *sw_rx_data; 4385 int i; 4386 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4387 struct mbuf *mp; 4388 uint32_t agg_index; 4389 device_t dev; 4390 4391 dev = ha->pci_dev; 4392 4393 QL_DPRINT7(ha, "[%d]: enter\n \ 4394 \t type = 0x%x\n \ 4395 \t tpa_agg_index = 0x%x\n \ 4396 \t len_list[0] = 0x%x\n \ 4397 \t len_list[1] = 0x%x\n \ 4398 \t len_list[2] = 0x%x\n \ 4399 \t len_list[3] = 0x%x\n \ 4400 \t len_list[4] = 0x%x\n \ 4401 \t len_list[5] = 0x%x\n", 4402 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4403 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4404 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]); 4405 4406 agg_index = cqe->tpa_agg_index; 4407 4408 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4409 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4410 fp->err_rx_tpa_invalid_agg_num++; 4411 return; 4412 } 4413 4414 4415 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) { 4416 4417 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4418 4419 if (cqe->len_list[i] == 0) 4420 break; 4421 4422 if (rxq->tpa_info[agg_index].agg_state != 4423 QLNX_AGG_STATE_START) { 4424 qlnx_reuse_rx_data(rxq); 4425 continue; 4426 } 4427 4428 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4429 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4430 BUS_DMASYNC_POSTREAD); 4431 4432 mpc = sw_rx_data->data; 4433 4434 if (mpc == NULL) { 4435 4436 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4437 4438 fp->err_rx_mp_null++; 4439 if (mpf != NULL) 4440 m_freem(mpf); 4441 mpf = mpl = NULL; 4442 rxq->tpa_info[agg_index].agg_state = 4443 QLNX_AGG_STATE_ERROR; 4444 ecore_chain_consume(&rxq->rx_bd_ring); 4445 rxq->sw_rx_cons = 4446 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4447 continue; 4448 } 4449 4450 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4451 4452 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4453 " dropping incoming packet and reusing its" 4454 " buffer\n", fp->rss_id); 4455 4456 qlnx_reuse_rx_data(rxq); 4457 4458 if (mpf != NULL) 4459 m_freem(mpf); 4460 mpf = mpl = NULL; 4461 4462 rxq->tpa_info[agg_index].agg_state = 4463 QLNX_AGG_STATE_ERROR; 4464 4465 ecore_chain_consume(&rxq->rx_bd_ring); 4466 rxq->sw_rx_cons = 4467 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4468 4469 continue; 4470 } 4471 4472 mpc->m_flags &= ~M_PKTHDR; 4473 mpc->m_next = NULL; 4474 mpc->m_len = cqe->len_list[i]; 4475 4476 4477 if (mpf == NULL) { 4478 mpf = mpl = mpc; 4479 } else { 4480 mpl->m_len = ha->rx_buf_size; 4481 mpl->m_next = mpc; 4482 mpl = mpc; 4483 } 4484 4485 ecore_chain_consume(&rxq->rx_bd_ring); 4486 rxq->sw_rx_cons = 4487 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4488 } 4489 4490 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n", 4491 fp->rss_id, mpf, mpl); 4492 4493 if (mpf != NULL) { 4494 mp = rxq->tpa_info[agg_index].mpl; 4495 mp->m_len = ha->rx_buf_size; 4496 mp->m_next = mpf; 4497 rxq->tpa_info[agg_index].mpl = mpl; 4498 } 4499 4500 return; 4501 } 4502 4503 static int 4504 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4505 struct qlnx_rx_queue *rxq, 4506 struct eth_fast_path_rx_tpa_end_cqe *cqe) 4507 { 4508 struct sw_rx_data *sw_rx_data; 4509 int i; 4510 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4511 struct mbuf *mp; 4512 uint32_t agg_index; 4513 uint32_t len = 0; 4514 struct ifnet *ifp = ha->ifp; 4515 device_t dev; 4516 4517 dev = ha->pci_dev; 4518 4519 QL_DPRINT7(ha, "[%d]: enter\n \ 4520 \t type = 0x%x\n \ 4521 \t tpa_agg_index = 0x%x\n \ 4522 \t total_packet_len = 0x%x\n \ 4523 \t num_of_bds = 0x%x\n \ 4524 \t end_reason = 0x%x\n \ 4525 \t num_of_coalesced_segs = 0x%x\n \ 4526 \t ts_delta = 0x%x\n \ 4527 \t len_list[0] = 0x%x\n \ 4528 \t len_list[1] = 0x%x\n \ 4529 \t len_list[2] = 0x%x\n \ 4530 \t len_list[3] = 0x%x\n", 4531 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4532 cqe->total_packet_len, cqe->num_of_bds, 4533 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta, 4534 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4535 cqe->len_list[3]); 4536 4537 agg_index = cqe->tpa_agg_index; 4538 4539 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4540 4541 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4542 4543 fp->err_rx_tpa_invalid_agg_num++; 4544 return (0); 4545 } 4546 4547 4548 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) { 4549 4550 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4551 4552 if (cqe->len_list[i] == 0) 4553 break; 4554 4555 if (rxq->tpa_info[agg_index].agg_state != 4556 QLNX_AGG_STATE_START) { 4557 4558 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id); 4559 4560 qlnx_reuse_rx_data(rxq); 4561 continue; 4562 } 4563 4564 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4565 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4566 BUS_DMASYNC_POSTREAD); 4567 4568 mpc = sw_rx_data->data; 4569 4570 if (mpc == NULL) { 4571 4572 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4573 4574 fp->err_rx_mp_null++; 4575 if (mpf != NULL) 4576 m_freem(mpf); 4577 mpf = mpl = NULL; 4578 rxq->tpa_info[agg_index].agg_state = 4579 QLNX_AGG_STATE_ERROR; 4580 ecore_chain_consume(&rxq->rx_bd_ring); 4581 rxq->sw_rx_cons = 4582 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4583 continue; 4584 } 4585 4586 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4587 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4588 " dropping incoming packet and reusing its" 4589 " buffer\n", fp->rss_id); 4590 4591 qlnx_reuse_rx_data(rxq); 4592 4593 if (mpf != NULL) 4594 m_freem(mpf); 4595 mpf = mpl = NULL; 4596 4597 rxq->tpa_info[agg_index].agg_state = 4598 QLNX_AGG_STATE_ERROR; 4599 4600 ecore_chain_consume(&rxq->rx_bd_ring); 4601 rxq->sw_rx_cons = 4602 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4603 4604 continue; 4605 } 4606 4607 mpc->m_flags &= ~M_PKTHDR; 4608 mpc->m_next = NULL; 4609 mpc->m_len = cqe->len_list[i]; 4610 4611 4612 if (mpf == NULL) { 4613 mpf = mpl = mpc; 4614 } else { 4615 mpl->m_len = ha->rx_buf_size; 4616 mpl->m_next = mpc; 4617 mpl = mpc; 4618 } 4619 4620 ecore_chain_consume(&rxq->rx_bd_ring); 4621 rxq->sw_rx_cons = 4622 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4623 } 4624 4625 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id); 4626 4627 if (mpf != NULL) { 4628 4629 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id); 4630 4631 mp = rxq->tpa_info[agg_index].mpl; 4632 mp->m_len = ha->rx_buf_size; 4633 mp->m_next = mpf; 4634 } 4635 4636 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { 4637 4638 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id); 4639 4640 if (rxq->tpa_info[agg_index].mpf != NULL) 4641 m_freem(rxq->tpa_info[agg_index].mpf); 4642 rxq->tpa_info[agg_index].mpf = NULL; 4643 rxq->tpa_info[agg_index].mpl = NULL; 4644 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4645 return (0); 4646 } 4647 4648 mp = rxq->tpa_info[agg_index].mpf; 4649 m_adj(mp, rxq->tpa_info[agg_index].placement_offset); 4650 mp->m_pkthdr.len = cqe->total_packet_len; 4651 4652 if (mp->m_next == NULL) 4653 mp->m_len = mp->m_pkthdr.len; 4654 else { 4655 /* compute the total packet length */ 4656 mpf = mp; 4657 while (mpf != NULL) { 4658 len += mpf->m_len; 4659 mpf = mpf->m_next; 4660 } 4661 4662 if (cqe->total_packet_len > len) { 4663 mpl = rxq->tpa_info[agg_index].mpl; 4664 mpl->m_len += (cqe->total_packet_len - len); 4665 } 4666 } 4667 4668 QLNX_INC_IPACKETS(ifp); 4669 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len)); 4670 4671 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \ 4672 m_len = 0x%x m_pkthdr_len = 0x%x\n", 4673 fp->rss_id, mp->m_pkthdr.csum_data, 4674 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len); 4675 4676 (*ifp->if_input)(ifp, mp); 4677 4678 rxq->tpa_info[agg_index].mpf = NULL; 4679 rxq->tpa_info[agg_index].mpl = NULL; 4680 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4681 4682 return (cqe->num_of_coalesced_segs); 4683 } 4684 4685 static int 4686 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 4687 int lro_enable) 4688 { 4689 uint16_t hw_comp_cons, sw_comp_cons; 4690 int rx_pkt = 0; 4691 struct qlnx_rx_queue *rxq = fp->rxq; 4692 struct ifnet *ifp = ha->ifp; 4693 struct ecore_dev *cdev = &ha->cdev; 4694 struct ecore_hwfn *p_hwfn; 4695 4696 #ifdef QLNX_SOFT_LRO 4697 struct lro_ctrl *lro; 4698 4699 lro = &rxq->lro; 4700 #endif /* #ifdef QLNX_SOFT_LRO */ 4701 4702 hw_comp_cons = le16toh(*rxq->hw_cons_ptr); 4703 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4704 4705 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)]; 4706 4707 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 4708 * / BD in the while-loop before reading hw_comp_cons. If the CQE is 4709 * read before it is written by FW, then FW writes CQE and SB, and then 4710 * the CPU reads the hw_comp_cons, it will use an old CQE. 4711 */ 4712 4713 /* Loop to complete all indicated BDs */ 4714 while (sw_comp_cons != hw_comp_cons) { 4715 union eth_rx_cqe *cqe; 4716 struct eth_fast_path_rx_reg_cqe *fp_cqe; 4717 struct sw_rx_data *sw_rx_data; 4718 register struct mbuf *mp; 4719 enum eth_rx_cqe_type cqe_type; 4720 uint16_t len, pad, len_on_first_bd; 4721 uint8_t *data; 4722 #if __FreeBSD_version >= 1100000 4723 uint8_t hash_type; 4724 #endif /* #if __FreeBSD_version >= 1100000 */ 4725 4726 /* Get the CQE from the completion ring */ 4727 cqe = (union eth_rx_cqe *) 4728 ecore_chain_consume(&rxq->rx_comp_ring); 4729 cqe_type = cqe->fast_path_regular.type; 4730 4731 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { 4732 QL_DPRINT3(ha, "Got a slowath CQE\n"); 4733 4734 ecore_eth_cqe_completion(p_hwfn, 4735 (struct eth_slow_path_rx_cqe *)cqe); 4736 goto next_cqe; 4737 } 4738 4739 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { 4740 4741 switch (cqe_type) { 4742 4743 case ETH_RX_CQE_TYPE_TPA_START: 4744 qlnx_tpa_start(ha, fp, rxq, 4745 &cqe->fast_path_tpa_start); 4746 fp->tpa_start++; 4747 break; 4748 4749 case ETH_RX_CQE_TYPE_TPA_CONT: 4750 qlnx_tpa_cont(ha, fp, rxq, 4751 &cqe->fast_path_tpa_cont); 4752 fp->tpa_cont++; 4753 break; 4754 4755 case ETH_RX_CQE_TYPE_TPA_END: 4756 rx_pkt += qlnx_tpa_end(ha, fp, rxq, 4757 &cqe->fast_path_tpa_end); 4758 fp->tpa_end++; 4759 break; 4760 4761 default: 4762 break; 4763 } 4764 4765 goto next_cqe; 4766 } 4767 4768 /* Get the data from the SW ring */ 4769 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4770 mp = sw_rx_data->data; 4771 4772 if (mp == NULL) { 4773 QL_DPRINT1(ha, "mp = NULL\n"); 4774 fp->err_rx_mp_null++; 4775 rxq->sw_rx_cons = 4776 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4777 goto next_cqe; 4778 } 4779 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4780 BUS_DMASYNC_POSTREAD); 4781 4782 /* non GRO */ 4783 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */ 4784 len = le16toh(fp_cqe->pkt_len); 4785 pad = fp_cqe->placement_offset; 4786 #if 0 4787 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x," 4788 " len %u, parsing flags = %d pad = %d\n", 4789 cqe_type, fp_cqe->bitfields, 4790 le16toh(fp_cqe->vlan_tag), 4791 len, le16toh(fp_cqe->pars_flags.flags), pad); 4792 #endif 4793 data = mtod(mp, uint8_t *); 4794 data = data + pad; 4795 4796 if (0) 4797 qlnx_dump_buf8(ha, __func__, data, len); 4798 4799 /* For every Rx BD consumed, we allocate a new BD so the BD ring 4800 * is always with a fixed size. If allocation fails, we take the 4801 * consumed BD and return it to the ring in the PROD position. 4802 * The packet that was received on that BD will be dropped (and 4803 * not passed to the upper stack). 4804 */ 4805 /* If this is an error packet then drop it */ 4806 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) & 4807 CQE_FLAGS_ERR) { 4808 4809 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x," 4810 " dropping incoming packet\n", sw_comp_cons, 4811 le16toh(cqe->fast_path_regular.pars_flags.flags)); 4812 fp->err_rx_hw_errors++; 4813 4814 qlnx_reuse_rx_data(rxq); 4815 4816 QLNX_INC_IERRORS(ifp); 4817 4818 goto next_cqe; 4819 } 4820 4821 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4822 4823 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 4824 " incoming packet and reusing its buffer\n"); 4825 qlnx_reuse_rx_data(rxq); 4826 4827 fp->err_rx_alloc_errors++; 4828 4829 QLNX_INC_IQDROPS(ifp); 4830 4831 goto next_cqe; 4832 } 4833 4834 ecore_chain_consume(&rxq->rx_bd_ring); 4835 4836 len_on_first_bd = fp_cqe->len_on_first_bd; 4837 m_adj(mp, pad); 4838 mp->m_pkthdr.len = len; 4839 4840 if ((len > 60 ) && (len > len_on_first_bd)) { 4841 4842 mp->m_len = len_on_first_bd; 4843 4844 if (qlnx_rx_jumbo_chain(ha, fp, mp, 4845 (len - len_on_first_bd)) != 0) { 4846 4847 m_freem(mp); 4848 4849 QLNX_INC_IQDROPS(ifp); 4850 4851 goto next_cqe; 4852 } 4853 4854 } else if (len_on_first_bd < len) { 4855 fp->err_rx_jumbo_chain_pkts++; 4856 } else { 4857 mp->m_len = len; 4858 } 4859 4860 mp->m_flags |= M_PKTHDR; 4861 4862 /* assign packet to this interface interface */ 4863 mp->m_pkthdr.rcvif = ifp; 4864 4865 /* assume no hardware checksum has complated */ 4866 mp->m_pkthdr.csum_flags = 0; 4867 4868 mp->m_pkthdr.flowid = fp_cqe->rss_hash; 4869 4870 #if __FreeBSD_version >= 1100000 4871 4872 hash_type = fp_cqe->bitfields & 4873 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4874 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4875 4876 switch (hash_type) { 4877 4878 case RSS_HASH_TYPE_IPV4: 4879 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4880 break; 4881 4882 case RSS_HASH_TYPE_TCP_IPV4: 4883 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4884 break; 4885 4886 case RSS_HASH_TYPE_IPV6: 4887 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4888 break; 4889 4890 case RSS_HASH_TYPE_TCP_IPV6: 4891 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4892 break; 4893 4894 default: 4895 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4896 break; 4897 } 4898 4899 #else 4900 mp->m_flags |= M_FLOWID; 4901 #endif 4902 4903 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) { 4904 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4905 } 4906 4907 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) { 4908 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4909 } 4910 4911 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) { 4912 mp->m_pkthdr.csum_data = 0xFFFF; 4913 mp->m_pkthdr.csum_flags |= 4914 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4915 } 4916 4917 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) { 4918 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag); 4919 mp->m_flags |= M_VLANTAG; 4920 } 4921 4922 QLNX_INC_IPACKETS(ifp); 4923 QLNX_INC_IBYTES(ifp, len); 4924 4925 #ifdef QLNX_SOFT_LRO 4926 4927 if (lro_enable) { 4928 4929 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 4930 4931 tcp_lro_queue_mbuf(lro, mp); 4932 4933 #else 4934 4935 if (tcp_lro_rx(lro, mp, 0)) 4936 (*ifp->if_input)(ifp, mp); 4937 4938 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 4939 4940 } else { 4941 (*ifp->if_input)(ifp, mp); 4942 } 4943 #else 4944 4945 (*ifp->if_input)(ifp, mp); 4946 4947 #endif /* #ifdef QLNX_SOFT_LRO */ 4948 4949 rx_pkt++; 4950 4951 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4952 4953 next_cqe: /* don't consume bd rx buffer */ 4954 ecore_chain_recycle_consumed(&rxq->rx_comp_ring); 4955 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4956 4957 /* CR TPA - revisit how to handle budget in TPA perhaps 4958 increase on "end" */ 4959 if (rx_pkt == budget) 4960 break; 4961 } /* repeat while sw_comp_cons != hw_comp_cons... */ 4962 4963 /* Update producers */ 4964 qlnx_update_rx_prod(p_hwfn, rxq); 4965 4966 return rx_pkt; 4967 } 4968 4969 4970 /* 4971 * fast path interrupt 4972 */ 4973 4974 static void 4975 qlnx_fp_isr(void *arg) 4976 { 4977 qlnx_ivec_t *ivec = arg; 4978 qlnx_host_t *ha; 4979 struct qlnx_fastpath *fp = NULL; 4980 int idx; 4981 4982 ha = ivec->ha; 4983 4984 if (ha->state != QLNX_STATE_OPEN) { 4985 return; 4986 } 4987 4988 idx = ivec->rss_idx; 4989 4990 if ((idx = ivec->rss_idx) >= ha->num_rss) { 4991 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx); 4992 ha->err_illegal_intr++; 4993 return; 4994 } 4995 fp = &ha->fp_array[idx]; 4996 4997 if (fp == NULL) { 4998 ha->err_fp_null++; 4999 } else { 5000 int rx_int = 0, total_rx_count = 0; 5001 int lro_enable, tc; 5002 struct qlnx_tx_queue *txq; 5003 uint16_t elem_left; 5004 5005 lro_enable = ha->ifp->if_capenable & IFCAP_LRO; 5006 5007 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); 5008 5009 do { 5010 for (tc = 0; tc < ha->num_tc; tc++) { 5011 5012 txq = fp->txq[tc]; 5013 5014 if((int)(elem_left = 5015 ecore_chain_get_elem_left(&txq->tx_pbl)) < 5016 QLNX_TX_ELEM_THRESH) { 5017 5018 if (mtx_trylock(&fp->tx_mtx)) { 5019 #ifdef QLNX_TRACE_PERF_DATA 5020 tx_compl = fp->tx_pkts_completed; 5021 #endif 5022 5023 qlnx_tx_int(ha, fp, fp->txq[tc]); 5024 #ifdef QLNX_TRACE_PERF_DATA 5025 fp->tx_pkts_compl_intr += 5026 (fp->tx_pkts_completed - tx_compl); 5027 if ((fp->tx_pkts_completed - tx_compl) <= 32) 5028 fp->tx_comInt[0]++; 5029 else if (((fp->tx_pkts_completed - tx_compl) > 32) && 5030 ((fp->tx_pkts_completed - tx_compl) <= 64)) 5031 fp->tx_comInt[1]++; 5032 else if(((fp->tx_pkts_completed - tx_compl) > 64) && 5033 ((fp->tx_pkts_completed - tx_compl) <= 128)) 5034 fp->tx_comInt[2]++; 5035 else if(((fp->tx_pkts_completed - tx_compl) > 128)) 5036 fp->tx_comInt[3]++; 5037 #endif 5038 mtx_unlock(&fp->tx_mtx); 5039 } 5040 } 5041 } 5042 5043 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, 5044 lro_enable); 5045 5046 if (rx_int) { 5047 fp->rx_pkts += rx_int; 5048 total_rx_count += rx_int; 5049 } 5050 5051 } while (rx_int); 5052 5053 #ifdef QLNX_SOFT_LRO 5054 { 5055 struct lro_ctrl *lro; 5056 5057 lro = &fp->rxq->lro; 5058 5059 if (lro_enable && total_rx_count) { 5060 5061 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 5062 5063 #ifdef QLNX_TRACE_LRO_CNT 5064 if (lro->lro_mbuf_count & ~1023) 5065 fp->lro_cnt_1024++; 5066 else if (lro->lro_mbuf_count & ~511) 5067 fp->lro_cnt_512++; 5068 else if (lro->lro_mbuf_count & ~255) 5069 fp->lro_cnt_256++; 5070 else if (lro->lro_mbuf_count & ~127) 5071 fp->lro_cnt_128++; 5072 else if (lro->lro_mbuf_count & ~63) 5073 fp->lro_cnt_64++; 5074 #endif /* #ifdef QLNX_TRACE_LRO_CNT */ 5075 5076 tcp_lro_flush_all(lro); 5077 5078 #else 5079 struct lro_entry *queued; 5080 5081 while ((!SLIST_EMPTY(&lro->lro_active))) { 5082 queued = SLIST_FIRST(&lro->lro_active); 5083 SLIST_REMOVE_HEAD(&lro->lro_active, \ 5084 next); 5085 tcp_lro_flush(lro, queued); 5086 } 5087 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 5088 } 5089 } 5090 #endif /* #ifdef QLNX_SOFT_LRO */ 5091 5092 ecore_sb_update_sb_idx(fp->sb_info); 5093 rmb(); 5094 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 5095 } 5096 5097 return; 5098 } 5099 5100 5101 /* 5102 * slow path interrupt processing function 5103 * can be invoked in polled mode or in interrupt mode via taskqueue. 5104 */ 5105 void 5106 qlnx_sp_isr(void *arg) 5107 { 5108 struct ecore_hwfn *p_hwfn; 5109 qlnx_host_t *ha; 5110 5111 p_hwfn = arg; 5112 5113 ha = (qlnx_host_t *)p_hwfn->p_dev; 5114 5115 ha->sp_interrupts++; 5116 5117 QL_DPRINT2(ha, "enter\n"); 5118 5119 ecore_int_sp_dpc(p_hwfn); 5120 5121 QL_DPRINT2(ha, "exit\n"); 5122 5123 return; 5124 } 5125 5126 /***************************************************************************** 5127 * Support Functions for DMA'able Memory 5128 *****************************************************************************/ 5129 5130 static void 5131 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 5132 { 5133 *((bus_addr_t *)arg) = 0; 5134 5135 if (error) { 5136 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 5137 return; 5138 } 5139 5140 *((bus_addr_t *)arg) = segs[0].ds_addr; 5141 5142 return; 5143 } 5144 5145 static int 5146 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 5147 { 5148 int ret = 0; 5149 device_t dev; 5150 bus_addr_t b_addr; 5151 5152 dev = ha->pci_dev; 5153 5154 ret = bus_dma_tag_create( 5155 ha->parent_tag,/* parent */ 5156 dma_buf->alignment, 5157 ((bus_size_t)(1ULL << 32)),/* boundary */ 5158 BUS_SPACE_MAXADDR, /* lowaddr */ 5159 BUS_SPACE_MAXADDR, /* highaddr */ 5160 NULL, NULL, /* filter, filterarg */ 5161 dma_buf->size, /* maxsize */ 5162 1, /* nsegments */ 5163 dma_buf->size, /* maxsegsize */ 5164 0, /* flags */ 5165 NULL, NULL, /* lockfunc, lockarg */ 5166 &dma_buf->dma_tag); 5167 5168 if (ret) { 5169 QL_DPRINT1(ha, "could not create dma tag\n"); 5170 goto qlnx_alloc_dmabuf_exit; 5171 } 5172 ret = bus_dmamem_alloc(dma_buf->dma_tag, 5173 (void **)&dma_buf->dma_b, 5174 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 5175 &dma_buf->dma_map); 5176 if (ret) { 5177 bus_dma_tag_destroy(dma_buf->dma_tag); 5178 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n"); 5179 goto qlnx_alloc_dmabuf_exit; 5180 } 5181 5182 ret = bus_dmamap_load(dma_buf->dma_tag, 5183 dma_buf->dma_map, 5184 dma_buf->dma_b, 5185 dma_buf->size, 5186 qlnx_dmamap_callback, 5187 &b_addr, BUS_DMA_NOWAIT); 5188 5189 if (ret || !b_addr) { 5190 bus_dma_tag_destroy(dma_buf->dma_tag); 5191 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 5192 dma_buf->dma_map); 5193 ret = -1; 5194 goto qlnx_alloc_dmabuf_exit; 5195 } 5196 5197 dma_buf->dma_addr = b_addr; 5198 5199 qlnx_alloc_dmabuf_exit: 5200 5201 return ret; 5202 } 5203 5204 static void 5205 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 5206 { 5207 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 5208 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 5209 bus_dma_tag_destroy(dma_buf->dma_tag); 5210 return; 5211 } 5212 5213 void * 5214 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size) 5215 { 5216 qlnx_dma_t dma_buf; 5217 qlnx_dma_t *dma_p; 5218 qlnx_host_t *ha; 5219 device_t dev; 5220 5221 ha = (qlnx_host_t *)ecore_dev; 5222 dev = ha->pci_dev; 5223 5224 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5225 5226 memset(&dma_buf, 0, sizeof (qlnx_dma_t)); 5227 5228 dma_buf.size = size + PAGE_SIZE; 5229 dma_buf.alignment = 8; 5230 5231 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0) 5232 return (NULL); 5233 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size); 5234 5235 *phys = dma_buf.dma_addr; 5236 5237 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size); 5238 5239 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t)); 5240 5241 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5242 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag, 5243 dma_buf.dma_b, (void *)dma_buf.dma_addr, size); 5244 5245 return (dma_buf.dma_b); 5246 } 5247 5248 void 5249 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys, 5250 uint32_t size) 5251 { 5252 qlnx_dma_t dma_buf, *dma_p; 5253 qlnx_host_t *ha; 5254 device_t dev; 5255 5256 ha = (qlnx_host_t *)ecore_dev; 5257 dev = ha->pci_dev; 5258 5259 if (v_addr == NULL) 5260 return; 5261 5262 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5263 5264 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size); 5265 5266 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5267 (void *)dma_p->dma_map, (void *)dma_p->dma_tag, 5268 dma_p->dma_b, (void *)dma_p->dma_addr, size); 5269 5270 dma_buf = *dma_p; 5271 5272 if (!ha->qlnxr_debug) 5273 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf); 5274 return; 5275 } 5276 5277 static int 5278 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha) 5279 { 5280 int ret; 5281 device_t dev; 5282 5283 dev = ha->pci_dev; 5284 5285 /* 5286 * Allocate parent DMA Tag 5287 */ 5288 ret = bus_dma_tag_create( 5289 bus_get_dma_tag(dev), /* parent */ 5290 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 5291 BUS_SPACE_MAXADDR, /* lowaddr */ 5292 BUS_SPACE_MAXADDR, /* highaddr */ 5293 NULL, NULL, /* filter, filterarg */ 5294 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 5295 0, /* nsegments */ 5296 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 5297 0, /* flags */ 5298 NULL, NULL, /* lockfunc, lockarg */ 5299 &ha->parent_tag); 5300 5301 if (ret) { 5302 QL_DPRINT1(ha, "could not create parent dma tag\n"); 5303 return (-1); 5304 } 5305 5306 ha->flags.parent_tag = 1; 5307 5308 return (0); 5309 } 5310 5311 static void 5312 qlnx_free_parent_dma_tag(qlnx_host_t *ha) 5313 { 5314 if (ha->parent_tag != NULL) { 5315 bus_dma_tag_destroy(ha->parent_tag); 5316 ha->parent_tag = NULL; 5317 } 5318 return; 5319 } 5320 5321 static int 5322 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha) 5323 { 5324 if (bus_dma_tag_create(NULL, /* parent */ 5325 1, 0, /* alignment, bounds */ 5326 BUS_SPACE_MAXADDR, /* lowaddr */ 5327 BUS_SPACE_MAXADDR, /* highaddr */ 5328 NULL, NULL, /* filter, filterarg */ 5329 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */ 5330 QLNX_MAX_SEGMENTS, /* nsegments */ 5331 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */ 5332 0, /* flags */ 5333 NULL, /* lockfunc */ 5334 NULL, /* lockfuncarg */ 5335 &ha->tx_tag)) { 5336 5337 QL_DPRINT1(ha, "tx_tag alloc failed\n"); 5338 return (-1); 5339 } 5340 5341 return (0); 5342 } 5343 5344 static void 5345 qlnx_free_tx_dma_tag(qlnx_host_t *ha) 5346 { 5347 if (ha->tx_tag != NULL) { 5348 bus_dma_tag_destroy(ha->tx_tag); 5349 ha->tx_tag = NULL; 5350 } 5351 return; 5352 } 5353 5354 static int 5355 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha) 5356 { 5357 if (bus_dma_tag_create(NULL, /* parent */ 5358 1, 0, /* alignment, bounds */ 5359 BUS_SPACE_MAXADDR, /* lowaddr */ 5360 BUS_SPACE_MAXADDR, /* highaddr */ 5361 NULL, NULL, /* filter, filterarg */ 5362 MJUM9BYTES, /* maxsize */ 5363 1, /* nsegments */ 5364 MJUM9BYTES, /* maxsegsize */ 5365 0, /* flags */ 5366 NULL, /* lockfunc */ 5367 NULL, /* lockfuncarg */ 5368 &ha->rx_tag)) { 5369 5370 QL_DPRINT1(ha, " rx_tag alloc failed\n"); 5371 5372 return (-1); 5373 } 5374 return (0); 5375 } 5376 5377 static void 5378 qlnx_free_rx_dma_tag(qlnx_host_t *ha) 5379 { 5380 if (ha->rx_tag != NULL) { 5381 bus_dma_tag_destroy(ha->rx_tag); 5382 ha->rx_tag = NULL; 5383 } 5384 return; 5385 } 5386 5387 /********************************* 5388 * Exported functions 5389 *********************************/ 5390 uint32_t 5391 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id) 5392 { 5393 uint32_t bar_size; 5394 5395 bar_id = bar_id * 2; 5396 5397 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev, 5398 SYS_RES_MEMORY, 5399 PCIR_BAR(bar_id)); 5400 5401 return (bar_size); 5402 } 5403 5404 uint32_t 5405 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value) 5406 { 5407 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5408 pci_reg, 1); 5409 return 0; 5410 } 5411 5412 uint32_t 5413 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg, 5414 uint16_t *reg_value) 5415 { 5416 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5417 pci_reg, 2); 5418 return 0; 5419 } 5420 5421 uint32_t 5422 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg, 5423 uint32_t *reg_value) 5424 { 5425 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5426 pci_reg, 4); 5427 return 0; 5428 } 5429 5430 void 5431 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value) 5432 { 5433 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5434 pci_reg, reg_value, 1); 5435 return; 5436 } 5437 5438 void 5439 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg, 5440 uint16_t reg_value) 5441 { 5442 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5443 pci_reg, reg_value, 2); 5444 return; 5445 } 5446 5447 void 5448 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg, 5449 uint32_t reg_value) 5450 { 5451 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5452 pci_reg, reg_value, 4); 5453 return; 5454 } 5455 5456 int 5457 qlnx_pci_find_capability(void *ecore_dev, int cap) 5458 { 5459 int reg; 5460 qlnx_host_t *ha; 5461 5462 ha = ecore_dev; 5463 5464 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0) 5465 return reg; 5466 else { 5467 QL_DPRINT1(ha, "failed\n"); 5468 return 0; 5469 } 5470 } 5471 5472 int 5473 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap) 5474 { 5475 int reg; 5476 qlnx_host_t *ha; 5477 5478 ha = ecore_dev; 5479 5480 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0) 5481 return reg; 5482 else { 5483 QL_DPRINT1(ha, "failed\n"); 5484 return 0; 5485 } 5486 } 5487 5488 uint32_t 5489 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr) 5490 { 5491 uint32_t data32; 5492 struct ecore_hwfn *p_hwfn; 5493 5494 p_hwfn = hwfn; 5495 5496 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5497 (bus_size_t)(p_hwfn->reg_offset + reg_addr)); 5498 5499 return (data32); 5500 } 5501 5502 void 5503 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5504 { 5505 struct ecore_hwfn *p_hwfn = hwfn; 5506 5507 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5508 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5509 5510 return; 5511 } 5512 5513 void 5514 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value) 5515 { 5516 struct ecore_hwfn *p_hwfn = hwfn; 5517 5518 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5519 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5520 return; 5521 } 5522 5523 void 5524 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value) 5525 { 5526 struct ecore_dev *cdev; 5527 struct ecore_hwfn *p_hwfn; 5528 uint32_t offset; 5529 5530 p_hwfn = hwfn; 5531 5532 cdev = p_hwfn->p_dev; 5533 5534 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells)); 5535 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value); 5536 5537 return; 5538 } 5539 5540 void 5541 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5542 { 5543 struct ecore_hwfn *p_hwfn = hwfn; 5544 5545 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \ 5546 (bus_size_t)(p_hwfn->db_offset + reg_addr), value); 5547 5548 return; 5549 } 5550 5551 uint32_t 5552 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr) 5553 { 5554 uint32_t data32; 5555 bus_size_t offset; 5556 struct ecore_dev *cdev; 5557 5558 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5559 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5560 5561 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset); 5562 5563 return (data32); 5564 } 5565 5566 void 5567 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value) 5568 { 5569 bus_size_t offset; 5570 struct ecore_dev *cdev; 5571 5572 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5573 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5574 5575 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5576 5577 return; 5578 } 5579 5580 void 5581 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value) 5582 { 5583 bus_size_t offset; 5584 struct ecore_dev *cdev; 5585 5586 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5587 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5588 5589 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5590 return; 5591 } 5592 5593 void * 5594 qlnx_zalloc(uint32_t size) 5595 { 5596 caddr_t va; 5597 5598 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT); 5599 bzero(va, size); 5600 return ((void *)va); 5601 } 5602 5603 void 5604 qlnx_barrier(void *p_hwfn) 5605 { 5606 qlnx_host_t *ha; 5607 5608 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5609 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE); 5610 } 5611 5612 void 5613 qlnx_link_update(void *p_hwfn) 5614 { 5615 qlnx_host_t *ha; 5616 int prev_link_state; 5617 5618 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5619 5620 qlnx_fill_link(ha, p_hwfn, &ha->if_link); 5621 5622 prev_link_state = ha->link_up; 5623 ha->link_up = ha->if_link.link_up; 5624 5625 if (prev_link_state != ha->link_up) { 5626 if (ha->link_up) { 5627 if_link_state_change(ha->ifp, LINK_STATE_UP); 5628 } else { 5629 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 5630 } 5631 } 5632 #ifndef QLNX_VF 5633 #ifdef CONFIG_ECORE_SRIOV 5634 5635 if (qlnx_vf_device(ha) != 0) { 5636 if (ha->sriov_initialized) 5637 qlnx_inform_vf_link_state(p_hwfn, ha); 5638 } 5639 5640 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 5641 #endif /* #ifdef QLNX_VF */ 5642 5643 return; 5644 } 5645 5646 static void 5647 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn, 5648 struct ecore_vf_acquire_sw_info *p_sw_info) 5649 { 5650 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) | 5651 (QLNX_VERSION_MINOR << 16) | 5652 QLNX_VERSION_BUILD; 5653 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD; 5654 5655 return; 5656 } 5657 5658 void 5659 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req, 5660 void *p_sw_info) 5661 { 5662 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info); 5663 5664 return; 5665 } 5666 5667 void 5668 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn, 5669 struct qlnx_link_output *if_link) 5670 { 5671 struct ecore_mcp_link_params link_params; 5672 struct ecore_mcp_link_state link_state; 5673 uint8_t p_change; 5674 struct ecore_ptt *p_ptt = NULL; 5675 5676 5677 memset(if_link, 0, sizeof(*if_link)); 5678 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params)); 5679 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state)); 5680 5681 ha = (qlnx_host_t *)hwfn->p_dev; 5682 5683 /* Prepare source inputs */ 5684 /* we only deal with physical functions */ 5685 if (qlnx_vf_device(ha) != 0) { 5686 5687 p_ptt = ecore_ptt_acquire(hwfn); 5688 5689 if (p_ptt == NULL) { 5690 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 5691 return; 5692 } 5693 5694 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type); 5695 ecore_ptt_release(hwfn, p_ptt); 5696 5697 memcpy(&link_params, ecore_mcp_get_link_params(hwfn), 5698 sizeof(link_params)); 5699 memcpy(&link_state, ecore_mcp_get_link_state(hwfn), 5700 sizeof(link_state)); 5701 } else { 5702 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type); 5703 ecore_vf_read_bulletin(hwfn, &p_change); 5704 ecore_vf_get_link_params(hwfn, &link_params); 5705 ecore_vf_get_link_state(hwfn, &link_state); 5706 } 5707 5708 /* Set the link parameters to pass to protocol driver */ 5709 if (link_state.link_up) { 5710 if_link->link_up = true; 5711 if_link->speed = link_state.speed; 5712 } 5713 5714 if_link->supported_caps = QLNX_LINK_CAP_FIBRE; 5715 5716 if (link_params.speed.autoneg) 5717 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg; 5718 5719 if (link_params.pause.autoneg || 5720 (link_params.pause.forced_rx && link_params.pause.forced_tx)) 5721 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause; 5722 5723 if (link_params.pause.autoneg || link_params.pause.forced_rx || 5724 link_params.pause.forced_tx) 5725 if_link->supported_caps |= QLNX_LINK_CAP_Pause; 5726 5727 if (link_params.speed.advertised_speeds & 5728 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 5729 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half | 5730 QLNX_LINK_CAP_1000baseT_Full; 5731 5732 if (link_params.speed.advertised_speeds & 5733 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 5734 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5735 5736 if (link_params.speed.advertised_speeds & 5737 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 5738 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5739 5740 if (link_params.speed.advertised_speeds & 5741 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) 5742 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5743 5744 if (link_params.speed.advertised_speeds & 5745 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 5746 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5747 5748 if (link_params.speed.advertised_speeds & 5749 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 5750 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5751 5752 if_link->advertised_caps = if_link->supported_caps; 5753 5754 if_link->autoneg = link_params.speed.autoneg; 5755 if_link->duplex = QLNX_LINK_DUPLEX; 5756 5757 /* Link partner capabilities */ 5758 5759 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD) 5760 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half; 5761 5762 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD) 5763 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full; 5764 5765 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G) 5766 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5767 5768 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G) 5769 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5770 5771 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G) 5772 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5773 5774 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G) 5775 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5776 5777 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G) 5778 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5779 5780 if (link_state.an_complete) 5781 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg; 5782 5783 if (link_state.partner_adv_pause) 5784 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause; 5785 5786 if ((link_state.partner_adv_pause == 5787 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) || 5788 (link_state.partner_adv_pause == 5789 ECORE_LINK_PARTNER_BOTH_PAUSE)) 5790 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause; 5791 5792 return; 5793 } 5794 5795 void 5796 qlnx_schedule_recovery(void *p_hwfn) 5797 { 5798 qlnx_host_t *ha; 5799 5800 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5801 5802 if (qlnx_vf_device(ha) != 0) { 5803 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 5804 } 5805 5806 return; 5807 } 5808 5809 static int 5810 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params) 5811 { 5812 int rc, i; 5813 5814 for (i = 0; i < cdev->num_hwfns; i++) { 5815 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 5816 p_hwfn->pf_params = *func_params; 5817 5818 #ifdef QLNX_ENABLE_IWARP 5819 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) { 5820 p_hwfn->using_ll2 = true; 5821 } 5822 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5823 5824 } 5825 5826 rc = ecore_resc_alloc(cdev); 5827 if (rc) 5828 goto qlnx_nic_setup_exit; 5829 5830 ecore_resc_setup(cdev); 5831 5832 qlnx_nic_setup_exit: 5833 5834 return rc; 5835 } 5836 5837 static int 5838 qlnx_nic_start(struct ecore_dev *cdev) 5839 { 5840 int rc; 5841 struct ecore_hw_init_params params; 5842 5843 bzero(¶ms, sizeof (struct ecore_hw_init_params)); 5844 5845 params.p_tunn = NULL; 5846 params.b_hw_start = true; 5847 params.int_mode = cdev->int_mode; 5848 params.allow_npar_tx_switch = true; 5849 params.bin_fw_data = NULL; 5850 5851 rc = ecore_hw_init(cdev, ¶ms); 5852 if (rc) { 5853 ecore_resc_free(cdev); 5854 return rc; 5855 } 5856 5857 return 0; 5858 } 5859 5860 static int 5861 qlnx_slowpath_start(qlnx_host_t *ha) 5862 { 5863 struct ecore_dev *cdev; 5864 struct ecore_pf_params pf_params; 5865 int rc; 5866 5867 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 5868 pf_params.eth_pf_params.num_cons = 5869 (ha->num_rss) * (ha->num_tc + 1); 5870 5871 #ifdef QLNX_ENABLE_IWARP 5872 if (qlnx_vf_device(ha) != 0) { 5873 if(ha->personality == ECORE_PCI_ETH_IWARP) { 5874 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n"); 5875 pf_params.rdma_pf_params.num_qps = 1024; 5876 pf_params.rdma_pf_params.num_srqs = 1024; 5877 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5878 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP; 5879 } else if(ha->personality == ECORE_PCI_ETH_ROCE) { 5880 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n"); 5881 pf_params.rdma_pf_params.num_qps = 8192; 5882 pf_params.rdma_pf_params.num_srqs = 8192; 5883 //pf_params.rdma_pf_params.min_dpis = 0; 5884 pf_params.rdma_pf_params.min_dpis = 8; 5885 pf_params.rdma_pf_params.roce_edpm_mode = 0; 5886 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5887 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE; 5888 } 5889 } 5890 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5891 5892 cdev = &ha->cdev; 5893 5894 rc = qlnx_nic_setup(cdev, &pf_params); 5895 if (rc) 5896 goto qlnx_slowpath_start_exit; 5897 5898 cdev->int_mode = ECORE_INT_MODE_MSIX; 5899 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; 5900 5901 #ifdef QLNX_MAX_COALESCE 5902 cdev->rx_coalesce_usecs = 255; 5903 cdev->tx_coalesce_usecs = 255; 5904 #endif 5905 5906 rc = qlnx_nic_start(cdev); 5907 5908 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs; 5909 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs; 5910 5911 #ifdef QLNX_USER_LLDP 5912 (void)qlnx_set_lldp_tlvx(ha, NULL); 5913 #endif /* #ifdef QLNX_USER_LLDP */ 5914 5915 qlnx_slowpath_start_exit: 5916 5917 return (rc); 5918 } 5919 5920 static int 5921 qlnx_slowpath_stop(qlnx_host_t *ha) 5922 { 5923 struct ecore_dev *cdev; 5924 device_t dev = ha->pci_dev; 5925 int i; 5926 5927 cdev = &ha->cdev; 5928 5929 ecore_hw_stop(cdev); 5930 5931 for (i = 0; i < ha->cdev.num_hwfns; i++) { 5932 5933 if (ha->sp_handle[i]) 5934 (void)bus_teardown_intr(dev, ha->sp_irq[i], 5935 ha->sp_handle[i]); 5936 5937 ha->sp_handle[i] = NULL; 5938 5939 if (ha->sp_irq[i]) 5940 (void) bus_release_resource(dev, SYS_RES_IRQ, 5941 ha->sp_irq_rid[i], ha->sp_irq[i]); 5942 ha->sp_irq[i] = NULL; 5943 } 5944 5945 ecore_resc_free(cdev); 5946 5947 return 0; 5948 } 5949 5950 static void 5951 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 5952 char ver_str[VER_SIZE]) 5953 { 5954 int i; 5955 5956 memcpy(cdev->name, name, NAME_SIZE); 5957 5958 for_each_hwfn(cdev, i) { 5959 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 5960 } 5961 5962 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD; 5963 5964 return ; 5965 } 5966 5967 void 5968 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats) 5969 { 5970 enum ecore_mcp_protocol_type type; 5971 union ecore_mcp_protocol_stats *stats; 5972 struct ecore_eth_stats eth_stats; 5973 qlnx_host_t *ha; 5974 5975 ha = cdev; 5976 stats = proto_stats; 5977 type = proto_type; 5978 5979 switch (type) { 5980 5981 case ECORE_MCP_LAN_STATS: 5982 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats); 5983 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; 5984 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts; 5985 stats->lan_stats.fcs_err = -1; 5986 break; 5987 5988 default: 5989 ha->err_get_proto_invalid_type++; 5990 5991 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type); 5992 break; 5993 } 5994 return; 5995 } 5996 5997 static int 5998 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver) 5999 { 6000 struct ecore_hwfn *p_hwfn; 6001 struct ecore_ptt *p_ptt; 6002 6003 p_hwfn = &ha->cdev.hwfns[0]; 6004 p_ptt = ecore_ptt_acquire(p_hwfn); 6005 6006 if (p_ptt == NULL) { 6007 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 6008 return (-1); 6009 } 6010 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL); 6011 6012 ecore_ptt_release(p_hwfn, p_ptt); 6013 6014 return (0); 6015 } 6016 6017 static int 6018 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size) 6019 { 6020 struct ecore_hwfn *p_hwfn; 6021 struct ecore_ptt *p_ptt; 6022 6023 p_hwfn = &ha->cdev.hwfns[0]; 6024 p_ptt = ecore_ptt_acquire(p_hwfn); 6025 6026 if (p_ptt == NULL) { 6027 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n"); 6028 return (-1); 6029 } 6030 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size); 6031 6032 ecore_ptt_release(p_hwfn, p_ptt); 6033 6034 return (0); 6035 } 6036 6037 static int 6038 qlnx_alloc_mem_arrays(qlnx_host_t *ha) 6039 { 6040 struct ecore_dev *cdev; 6041 6042 cdev = &ha->cdev; 6043 6044 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS)); 6045 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS)); 6046 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS)); 6047 6048 return 0; 6049 } 6050 6051 static void 6052 qlnx_init_fp(qlnx_host_t *ha) 6053 { 6054 int rss_id, txq_array_index, tc; 6055 6056 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 6057 6058 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 6059 6060 fp->rss_id = rss_id; 6061 fp->edev = ha; 6062 fp->sb_info = &ha->sb_array[rss_id]; 6063 fp->rxq = &ha->rxq_array[rss_id]; 6064 fp->rxq->rxq_id = rss_id; 6065 6066 for (tc = 0; tc < ha->num_tc; tc++) { 6067 txq_array_index = tc * ha->num_rss + rss_id; 6068 fp->txq[tc] = &ha->txq_array[txq_array_index]; 6069 fp->txq[tc]->index = txq_array_index; 6070 } 6071 6072 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str, 6073 rss_id); 6074 6075 fp->tx_ring_full = 0; 6076 6077 /* reset all the statistics counters */ 6078 6079 fp->tx_pkts_processed = 0; 6080 fp->tx_pkts_freed = 0; 6081 fp->tx_pkts_transmitted = 0; 6082 fp->tx_pkts_completed = 0; 6083 6084 #ifdef QLNX_TRACE_PERF_DATA 6085 fp->tx_pkts_trans_ctx = 0; 6086 fp->tx_pkts_compl_ctx = 0; 6087 fp->tx_pkts_trans_fp = 0; 6088 fp->tx_pkts_compl_fp = 0; 6089 fp->tx_pkts_compl_intr = 0; 6090 #endif 6091 fp->tx_lso_wnd_min_len = 0; 6092 fp->tx_defrag = 0; 6093 fp->tx_nsegs_gt_elem_left = 0; 6094 fp->tx_tso_max_nsegs = 0; 6095 fp->tx_tso_min_nsegs = 0; 6096 fp->err_tx_nsegs_gt_elem_left = 0; 6097 fp->err_tx_dmamap_create = 0; 6098 fp->err_tx_defrag_dmamap_load = 0; 6099 fp->err_tx_non_tso_max_seg = 0; 6100 fp->err_tx_dmamap_load = 0; 6101 fp->err_tx_defrag = 0; 6102 fp->err_tx_free_pkt_null = 0; 6103 fp->err_tx_cons_idx_conflict = 0; 6104 6105 fp->rx_pkts = 0; 6106 fp->err_m_getcl = 0; 6107 fp->err_m_getjcl = 0; 6108 } 6109 return; 6110 } 6111 6112 void 6113 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info) 6114 { 6115 struct ecore_dev *cdev; 6116 6117 cdev = &ha->cdev; 6118 6119 if (sb_info->sb_virt) { 6120 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt), 6121 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt))); 6122 sb_info->sb_virt = NULL; 6123 } 6124 } 6125 6126 static int 6127 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info, 6128 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id) 6129 { 6130 struct ecore_hwfn *p_hwfn; 6131 int hwfn_index, rc; 6132 u16 rel_sb_id; 6133 6134 hwfn_index = sb_id % cdev->num_hwfns; 6135 p_hwfn = &cdev->hwfns[hwfn_index]; 6136 rel_sb_id = sb_id / cdev->num_hwfns; 6137 6138 QL_DPRINT2(((qlnx_host_t *)cdev), 6139 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \ 6140 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n", 6141 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info, 6142 sb_virt_addr, (void *)sb_phy_addr); 6143 6144 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, 6145 sb_virt_addr, sb_phy_addr, rel_sb_id); 6146 6147 return rc; 6148 } 6149 6150 /* This function allocates fast-path status block memory */ 6151 int 6152 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id) 6153 { 6154 struct status_block_e4 *sb_virt; 6155 bus_addr_t sb_phys; 6156 int rc; 6157 uint32_t size; 6158 struct ecore_dev *cdev; 6159 6160 cdev = &ha->cdev; 6161 6162 size = sizeof(*sb_virt); 6163 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size); 6164 6165 if (!sb_virt) { 6166 QL_DPRINT1(ha, "Status block allocation failed\n"); 6167 return -ENOMEM; 6168 } 6169 6170 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id); 6171 if (rc) { 6172 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size); 6173 } 6174 6175 return rc; 6176 } 6177 6178 static void 6179 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6180 { 6181 int i; 6182 struct sw_rx_data *rx_buf; 6183 6184 for (i = 0; i < rxq->num_rx_buffers; i++) { 6185 6186 rx_buf = &rxq->sw_rx_ring[i]; 6187 6188 if (rx_buf->data != NULL) { 6189 if (rx_buf->map != NULL) { 6190 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 6191 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 6192 rx_buf->map = NULL; 6193 } 6194 m_freem(rx_buf->data); 6195 rx_buf->data = NULL; 6196 } 6197 } 6198 return; 6199 } 6200 6201 static void 6202 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6203 { 6204 struct ecore_dev *cdev; 6205 int i; 6206 6207 cdev = &ha->cdev; 6208 6209 qlnx_free_rx_buffers(ha, rxq); 6210 6211 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 6212 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]); 6213 if (rxq->tpa_info[i].mpf != NULL) 6214 m_freem(rxq->tpa_info[i].mpf); 6215 } 6216 6217 bzero((void *)&rxq->sw_rx_ring[0], 6218 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 6219 6220 /* Free the real RQ ring used by FW */ 6221 if (rxq->rx_bd_ring.p_virt_addr) { 6222 ecore_chain_free(cdev, &rxq->rx_bd_ring); 6223 rxq->rx_bd_ring.p_virt_addr = NULL; 6224 } 6225 6226 /* Free the real completion ring used by FW */ 6227 if (rxq->rx_comp_ring.p_virt_addr && 6228 rxq->rx_comp_ring.pbl_sp.p_virt_table) { 6229 ecore_chain_free(cdev, &rxq->rx_comp_ring); 6230 rxq->rx_comp_ring.p_virt_addr = NULL; 6231 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL; 6232 } 6233 6234 #ifdef QLNX_SOFT_LRO 6235 { 6236 struct lro_ctrl *lro; 6237 6238 lro = &rxq->lro; 6239 tcp_lro_free(lro); 6240 } 6241 #endif /* #ifdef QLNX_SOFT_LRO */ 6242 6243 return; 6244 } 6245 6246 static int 6247 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6248 { 6249 register struct mbuf *mp; 6250 uint16_t rx_buf_size; 6251 struct sw_rx_data *sw_rx_data; 6252 struct eth_rx_bd *rx_bd; 6253 dma_addr_t dma_addr; 6254 bus_dmamap_t map; 6255 bus_dma_segment_t segs[1]; 6256 int nsegs; 6257 int ret; 6258 struct ecore_dev *cdev; 6259 6260 cdev = &ha->cdev; 6261 6262 rx_buf_size = rxq->rx_buf_size; 6263 6264 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6265 6266 if (mp == NULL) { 6267 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6268 return -ENOMEM; 6269 } 6270 6271 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6272 6273 map = (bus_dmamap_t)0; 6274 6275 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6276 BUS_DMA_NOWAIT); 6277 dma_addr = segs[0].ds_addr; 6278 6279 if (ret || !dma_addr || (nsegs != 1)) { 6280 m_freem(mp); 6281 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6282 ret, (long long unsigned int)dma_addr, nsegs); 6283 return -ENOMEM; 6284 } 6285 6286 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6287 sw_rx_data->data = mp; 6288 sw_rx_data->dma_addr = dma_addr; 6289 sw_rx_data->map = map; 6290 6291 /* Advance PROD and get BD pointer */ 6292 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); 6293 rx_bd->addr.hi = htole32(U64_HI(dma_addr)); 6294 rx_bd->addr.lo = htole32(U64_LO(dma_addr)); 6295 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6296 6297 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6298 6299 return 0; 6300 } 6301 6302 static int 6303 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 6304 struct qlnx_agg_info *tpa) 6305 { 6306 struct mbuf *mp; 6307 dma_addr_t dma_addr; 6308 bus_dmamap_t map; 6309 bus_dma_segment_t segs[1]; 6310 int nsegs; 6311 int ret; 6312 struct sw_rx_data *rx_buf; 6313 6314 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6315 6316 if (mp == NULL) { 6317 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6318 return -ENOMEM; 6319 } 6320 6321 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6322 6323 map = (bus_dmamap_t)0; 6324 6325 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6326 BUS_DMA_NOWAIT); 6327 dma_addr = segs[0].ds_addr; 6328 6329 if (ret || !dma_addr || (nsegs != 1)) { 6330 m_freem(mp); 6331 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6332 ret, (long long unsigned int)dma_addr, nsegs); 6333 return -ENOMEM; 6334 } 6335 6336 rx_buf = &tpa->rx_buf; 6337 6338 memset(rx_buf, 0, sizeof (struct sw_rx_data)); 6339 6340 rx_buf->data = mp; 6341 rx_buf->dma_addr = dma_addr; 6342 rx_buf->map = map; 6343 6344 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6345 6346 return (0); 6347 } 6348 6349 static void 6350 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa) 6351 { 6352 struct sw_rx_data *rx_buf; 6353 6354 rx_buf = &tpa->rx_buf; 6355 6356 if (rx_buf->data != NULL) { 6357 if (rx_buf->map != NULL) { 6358 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 6359 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 6360 rx_buf->map = NULL; 6361 } 6362 m_freem(rx_buf->data); 6363 rx_buf->data = NULL; 6364 } 6365 return; 6366 } 6367 6368 /* This function allocates all memory needed per Rx queue */ 6369 static int 6370 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6371 { 6372 int i, rc, num_allocated; 6373 struct ifnet *ifp; 6374 struct ecore_dev *cdev; 6375 6376 cdev = &ha->cdev; 6377 ifp = ha->ifp; 6378 6379 rxq->num_rx_buffers = RX_RING_SIZE; 6380 6381 rxq->rx_buf_size = ha->rx_buf_size; 6382 6383 /* Allocate the parallel driver ring for Rx buffers */ 6384 bzero((void *)&rxq->sw_rx_ring[0], 6385 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 6386 6387 /* Allocate FW Rx ring */ 6388 6389 rc = ecore_chain_alloc(cdev, 6390 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6391 ECORE_CHAIN_MODE_NEXT_PTR, 6392 ECORE_CHAIN_CNT_TYPE_U16, 6393 RX_RING_SIZE, 6394 sizeof(struct eth_rx_bd), 6395 &rxq->rx_bd_ring, NULL); 6396 6397 if (rc) 6398 goto err; 6399 6400 /* Allocate FW completion ring */ 6401 rc = ecore_chain_alloc(cdev, 6402 ECORE_CHAIN_USE_TO_CONSUME, 6403 ECORE_CHAIN_MODE_PBL, 6404 ECORE_CHAIN_CNT_TYPE_U16, 6405 RX_RING_SIZE, 6406 sizeof(union eth_rx_cqe), 6407 &rxq->rx_comp_ring, NULL); 6408 6409 if (rc) 6410 goto err; 6411 6412 /* Allocate buffers for the Rx ring */ 6413 6414 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 6415 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size, 6416 &rxq->tpa_info[i]); 6417 if (rc) 6418 break; 6419 6420 } 6421 6422 for (i = 0; i < rxq->num_rx_buffers; i++) { 6423 rc = qlnx_alloc_rx_buffer(ha, rxq); 6424 if (rc) 6425 break; 6426 } 6427 num_allocated = i; 6428 if (!num_allocated) { 6429 QL_DPRINT1(ha, "Rx buffers allocation failed\n"); 6430 goto err; 6431 } else if (num_allocated < rxq->num_rx_buffers) { 6432 QL_DPRINT1(ha, "Allocated less buffers than" 6433 " desired (%d allocated)\n", num_allocated); 6434 } 6435 6436 #ifdef QLNX_SOFT_LRO 6437 6438 { 6439 struct lro_ctrl *lro; 6440 6441 lro = &rxq->lro; 6442 6443 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 6444 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) { 6445 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 6446 rxq->rxq_id); 6447 goto err; 6448 } 6449 #else 6450 if (tcp_lro_init(lro)) { 6451 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 6452 rxq->rxq_id); 6453 goto err; 6454 } 6455 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 6456 6457 lro->ifp = ha->ifp; 6458 } 6459 #endif /* #ifdef QLNX_SOFT_LRO */ 6460 return 0; 6461 6462 err: 6463 qlnx_free_mem_rxq(ha, rxq); 6464 return -ENOMEM; 6465 } 6466 6467 6468 static void 6469 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6470 struct qlnx_tx_queue *txq) 6471 { 6472 struct ecore_dev *cdev; 6473 6474 cdev = &ha->cdev; 6475 6476 bzero((void *)&txq->sw_tx_ring[0], 6477 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6478 6479 /* Free the real RQ ring used by FW */ 6480 if (txq->tx_pbl.p_virt_addr) { 6481 ecore_chain_free(cdev, &txq->tx_pbl); 6482 txq->tx_pbl.p_virt_addr = NULL; 6483 } 6484 return; 6485 } 6486 6487 /* This function allocates all memory needed per Tx queue */ 6488 static int 6489 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6490 struct qlnx_tx_queue *txq) 6491 { 6492 int ret = ECORE_SUCCESS; 6493 union eth_tx_bd_types *p_virt; 6494 struct ecore_dev *cdev; 6495 6496 cdev = &ha->cdev; 6497 6498 bzero((void *)&txq->sw_tx_ring[0], 6499 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6500 6501 /* Allocate the real Tx ring to be used by FW */ 6502 ret = ecore_chain_alloc(cdev, 6503 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6504 ECORE_CHAIN_MODE_PBL, 6505 ECORE_CHAIN_CNT_TYPE_U16, 6506 TX_RING_SIZE, 6507 sizeof(*p_virt), 6508 &txq->tx_pbl, NULL); 6509 6510 if (ret != ECORE_SUCCESS) { 6511 goto err; 6512 } 6513 6514 txq->num_tx_buffers = TX_RING_SIZE; 6515 6516 return 0; 6517 6518 err: 6519 qlnx_free_mem_txq(ha, fp, txq); 6520 return -ENOMEM; 6521 } 6522 6523 static void 6524 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6525 { 6526 struct mbuf *mp; 6527 struct ifnet *ifp = ha->ifp; 6528 6529 if (mtx_initialized(&fp->tx_mtx)) { 6530 6531 if (fp->tx_br != NULL) { 6532 6533 mtx_lock(&fp->tx_mtx); 6534 6535 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 6536 fp->tx_pkts_freed++; 6537 m_freem(mp); 6538 } 6539 6540 mtx_unlock(&fp->tx_mtx); 6541 6542 buf_ring_free(fp->tx_br, M_DEVBUF); 6543 fp->tx_br = NULL; 6544 } 6545 mtx_destroy(&fp->tx_mtx); 6546 } 6547 return; 6548 } 6549 6550 static void 6551 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6552 { 6553 int tc; 6554 6555 qlnx_free_mem_sb(ha, fp->sb_info); 6556 6557 qlnx_free_mem_rxq(ha, fp->rxq); 6558 6559 for (tc = 0; tc < ha->num_tc; tc++) 6560 qlnx_free_mem_txq(ha, fp, fp->txq[tc]); 6561 6562 return; 6563 } 6564 6565 static int 6566 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6567 { 6568 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 6569 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id); 6570 6571 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 6572 6573 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF, 6574 M_NOWAIT, &fp->tx_mtx); 6575 if (fp->tx_br == NULL) { 6576 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n", 6577 ha->dev_unit, fp->rss_id); 6578 return -ENOMEM; 6579 } 6580 return 0; 6581 } 6582 6583 static int 6584 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6585 { 6586 int rc, tc; 6587 6588 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id); 6589 if (rc) 6590 goto err; 6591 6592 if (ha->rx_jumbo_buf_eq_mtu) { 6593 if (ha->max_frame_size <= MCLBYTES) 6594 ha->rx_buf_size = MCLBYTES; 6595 else if (ha->max_frame_size <= MJUMPAGESIZE) 6596 ha->rx_buf_size = MJUMPAGESIZE; 6597 else if (ha->max_frame_size <= MJUM9BYTES) 6598 ha->rx_buf_size = MJUM9BYTES; 6599 else if (ha->max_frame_size <= MJUM16BYTES) 6600 ha->rx_buf_size = MJUM16BYTES; 6601 } else { 6602 if (ha->max_frame_size <= MCLBYTES) 6603 ha->rx_buf_size = MCLBYTES; 6604 else 6605 ha->rx_buf_size = MJUMPAGESIZE; 6606 } 6607 6608 rc = qlnx_alloc_mem_rxq(ha, fp->rxq); 6609 if (rc) 6610 goto err; 6611 6612 for (tc = 0; tc < ha->num_tc; tc++) { 6613 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]); 6614 if (rc) 6615 goto err; 6616 } 6617 6618 return 0; 6619 6620 err: 6621 qlnx_free_mem_fp(ha, fp); 6622 return -ENOMEM; 6623 } 6624 6625 static void 6626 qlnx_free_mem_load(qlnx_host_t *ha) 6627 { 6628 int i; 6629 struct ecore_dev *cdev; 6630 6631 cdev = &ha->cdev; 6632 6633 for (i = 0; i < ha->num_rss; i++) { 6634 struct qlnx_fastpath *fp = &ha->fp_array[i]; 6635 6636 qlnx_free_mem_fp(ha, fp); 6637 } 6638 return; 6639 } 6640 6641 static int 6642 qlnx_alloc_mem_load(qlnx_host_t *ha) 6643 { 6644 int rc = 0, rss_id; 6645 6646 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 6647 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 6648 6649 rc = qlnx_alloc_mem_fp(ha, fp); 6650 if (rc) 6651 break; 6652 } 6653 return (rc); 6654 } 6655 6656 static int 6657 qlnx_start_vport(struct ecore_dev *cdev, 6658 u8 vport_id, 6659 u16 mtu, 6660 u8 drop_ttl0_flg, 6661 u8 inner_vlan_removal_en_flg, 6662 u8 tx_switching, 6663 u8 hw_lro_enable) 6664 { 6665 int rc, i; 6666 struct ecore_sp_vport_start_params vport_start_params = { 0 }; 6667 qlnx_host_t *ha; 6668 6669 ha = (qlnx_host_t *)cdev; 6670 6671 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg; 6672 vport_start_params.tx_switching = 0; 6673 vport_start_params.handle_ptp_pkts = 0; 6674 vport_start_params.only_untagged = 0; 6675 vport_start_params.drop_ttl0 = drop_ttl0_flg; 6676 6677 vport_start_params.tpa_mode = 6678 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE); 6679 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6680 6681 vport_start_params.vport_id = vport_id; 6682 vport_start_params.mtu = mtu; 6683 6684 6685 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id); 6686 6687 for_each_hwfn(cdev, i) { 6688 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6689 6690 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid; 6691 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6692 6693 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params); 6694 6695 if (rc) { 6696 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d" 6697 " with MTU %d\n" , vport_id, mtu); 6698 return -ENOMEM; 6699 } 6700 6701 ecore_hw_start_fastpath(p_hwfn); 6702 6703 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n", 6704 vport_id, mtu); 6705 } 6706 return 0; 6707 } 6708 6709 6710 static int 6711 qlnx_update_vport(struct ecore_dev *cdev, 6712 struct qlnx_update_vport_params *params) 6713 { 6714 struct ecore_sp_vport_update_params sp_params; 6715 int rc, i, j, fp_index; 6716 struct ecore_hwfn *p_hwfn; 6717 struct ecore_rss_params *rss; 6718 qlnx_host_t *ha = (qlnx_host_t *)cdev; 6719 struct qlnx_fastpath *fp; 6720 6721 memset(&sp_params, 0, sizeof(sp_params)); 6722 /* Translate protocol params into sp params */ 6723 sp_params.vport_id = params->vport_id; 6724 6725 sp_params.update_vport_active_rx_flg = 6726 params->update_vport_active_rx_flg; 6727 sp_params.vport_active_rx_flg = params->vport_active_rx_flg; 6728 6729 sp_params.update_vport_active_tx_flg = 6730 params->update_vport_active_tx_flg; 6731 sp_params.vport_active_tx_flg = params->vport_active_tx_flg; 6732 6733 sp_params.update_inner_vlan_removal_flg = 6734 params->update_inner_vlan_removal_flg; 6735 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; 6736 6737 sp_params.sge_tpa_params = params->sge_tpa_params; 6738 6739 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 6740 * We need to re-fix the rss values per engine for CMT. 6741 */ 6742 if (params->rss_params->update_rss_config) 6743 sp_params.rss_params = params->rss_params; 6744 else 6745 sp_params.rss_params = NULL; 6746 6747 for_each_hwfn(cdev, i) { 6748 6749 p_hwfn = &cdev->hwfns[i]; 6750 6751 if ((cdev->num_hwfns > 1) && 6752 params->rss_params->update_rss_config && 6753 params->rss_params->rss_enable) { 6754 6755 rss = params->rss_params; 6756 6757 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) { 6758 6759 fp_index = ((cdev->num_hwfns * j) + i) % 6760 ha->num_rss; 6761 6762 fp = &ha->fp_array[fp_index]; 6763 rss->rss_ind_table[j] = fp->rxq->handle; 6764 } 6765 6766 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) { 6767 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n", 6768 rss->rss_ind_table[j], 6769 rss->rss_ind_table[j+1], 6770 rss->rss_ind_table[j+2], 6771 rss->rss_ind_table[j+3], 6772 rss->rss_ind_table[j+4], 6773 rss->rss_ind_table[j+5], 6774 rss->rss_ind_table[j+6], 6775 rss->rss_ind_table[j+7]); 6776 j += 8; 6777 } 6778 } 6779 6780 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6781 6782 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id); 6783 6784 rc = ecore_sp_vport_update(p_hwfn, &sp_params, 6785 ECORE_SPQ_MODE_EBLOCK, NULL); 6786 if (rc) { 6787 QL_DPRINT1(ha, "Failed to update VPORT\n"); 6788 return rc; 6789 } 6790 6791 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \ 6792 rx_active_flag %d [tx_update %d], [rx_update %d]\n", 6793 params->vport_id, params->vport_active_tx_flg, 6794 params->vport_active_rx_flg, 6795 params->update_vport_active_tx_flg, 6796 params->update_vport_active_rx_flg); 6797 } 6798 6799 return 0; 6800 } 6801 6802 static void 6803 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq) 6804 { 6805 struct eth_rx_bd *rx_bd_cons = 6806 ecore_chain_consume(&rxq->rx_bd_ring); 6807 struct eth_rx_bd *rx_bd_prod = 6808 ecore_chain_produce(&rxq->rx_bd_ring); 6809 struct sw_rx_data *sw_rx_data_cons = 6810 &rxq->sw_rx_ring[rxq->sw_rx_cons]; 6811 struct sw_rx_data *sw_rx_data_prod = 6812 &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6813 6814 sw_rx_data_prod->data = sw_rx_data_cons->data; 6815 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); 6816 6817 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 6818 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6819 6820 return; 6821 } 6822 6823 static void 6824 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq) 6825 { 6826 6827 uint16_t bd_prod; 6828 uint16_t cqe_prod; 6829 union { 6830 struct eth_rx_prod_data rx_prod_data; 6831 uint32_t data32; 6832 } rx_prods; 6833 6834 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); 6835 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); 6836 6837 /* Update producers */ 6838 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod); 6839 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod); 6840 6841 /* Make sure that the BD and SGE data is updated before updating the 6842 * producers since FW might read the BD/SGE right after the producer 6843 * is updated. 6844 */ 6845 wmb(); 6846 6847 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr, 6848 sizeof(rx_prods), &rx_prods.data32); 6849 6850 /* mmiowb is needed to synchronize doorbell writes from more than one 6851 * processor. It guarantees that the write arrives to the device before 6852 * the napi lock is released and another qlnx_poll is called (possibly 6853 * on another CPU). Without this barrier, the next doorbell can bypass 6854 * this doorbell. This is applicable to IA64/Altix systems. 6855 */ 6856 wmb(); 6857 6858 return; 6859 } 6860 6861 static uint32_t qlnx_hash_key[] = { 6862 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda), 6863 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2), 6864 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d), 6865 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0), 6866 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb), 6867 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4), 6868 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3), 6869 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c), 6870 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b), 6871 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)}; 6872 6873 static int 6874 qlnx_start_queues(qlnx_host_t *ha) 6875 { 6876 int rc, tc, i, vport_id = 0, 6877 drop_ttl0_flg = 1, vlan_removal_en = 1, 6878 tx_switching = 0, hw_lro_enable = 0; 6879 struct ecore_dev *cdev = &ha->cdev; 6880 struct ecore_rss_params *rss_params = &ha->rss_params; 6881 struct qlnx_update_vport_params vport_update_params; 6882 struct ifnet *ifp; 6883 struct ecore_hwfn *p_hwfn; 6884 struct ecore_sge_tpa_params tpa_params; 6885 struct ecore_queue_start_common_params qparams; 6886 struct qlnx_fastpath *fp; 6887 6888 ifp = ha->ifp; 6889 6890 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss); 6891 6892 if (!ha->num_rss) { 6893 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there" 6894 " are no Rx queues\n"); 6895 return -EINVAL; 6896 } 6897 6898 #ifndef QLNX_SOFT_LRO 6899 hw_lro_enable = ifp->if_capenable & IFCAP_LRO; 6900 #endif /* #ifndef QLNX_SOFT_LRO */ 6901 6902 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg, 6903 vlan_removal_en, tx_switching, hw_lro_enable); 6904 6905 if (rc) { 6906 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc); 6907 return rc; 6908 } 6909 6910 QL_DPRINT2(ha, "Start vport ramrod passed, " 6911 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n", 6912 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en); 6913 6914 for_each_rss(i) { 6915 struct ecore_rxq_start_ret_params rx_ret_params; 6916 struct ecore_txq_start_ret_params tx_ret_params; 6917 6918 fp = &ha->fp_array[i]; 6919 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)]; 6920 6921 bzero(&qparams, sizeof(struct ecore_queue_start_common_params)); 6922 bzero(&rx_ret_params, 6923 sizeof (struct ecore_rxq_start_ret_params)); 6924 6925 qparams.queue_id = i ; 6926 qparams.vport_id = vport_id; 6927 qparams.stats_id = vport_id; 6928 qparams.p_sb = fp->sb_info; 6929 qparams.sb_idx = RX_PI; 6930 6931 6932 rc = ecore_eth_rx_queue_start(p_hwfn, 6933 p_hwfn->hw_info.opaque_fid, 6934 &qparams, 6935 fp->rxq->rx_buf_size, /* bd_max_bytes */ 6936 /* bd_chain_phys_addr */ 6937 fp->rxq->rx_bd_ring.p_phys_addr, 6938 /* cqe_pbl_addr */ 6939 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring), 6940 /* cqe_pbl_size */ 6941 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring), 6942 &rx_ret_params); 6943 6944 if (rc) { 6945 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc); 6946 return rc; 6947 } 6948 6949 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod; 6950 fp->rxq->handle = rx_ret_params.p_handle; 6951 fp->rxq->hw_cons_ptr = 6952 &fp->sb_info->sb_virt->pi_array[RX_PI]; 6953 6954 qlnx_update_rx_prod(p_hwfn, fp->rxq); 6955 6956 for (tc = 0; tc < ha->num_tc; tc++) { 6957 struct qlnx_tx_queue *txq = fp->txq[tc]; 6958 6959 bzero(&qparams, 6960 sizeof(struct ecore_queue_start_common_params)); 6961 bzero(&tx_ret_params, 6962 sizeof (struct ecore_txq_start_ret_params)); 6963 6964 qparams.queue_id = txq->index / cdev->num_hwfns ; 6965 qparams.vport_id = vport_id; 6966 qparams.stats_id = vport_id; 6967 qparams.p_sb = fp->sb_info; 6968 qparams.sb_idx = TX_PI(tc); 6969 6970 rc = ecore_eth_tx_queue_start(p_hwfn, 6971 p_hwfn->hw_info.opaque_fid, 6972 &qparams, tc, 6973 /* bd_chain_phys_addr */ 6974 ecore_chain_get_pbl_phys(&txq->tx_pbl), 6975 ecore_chain_get_page_cnt(&txq->tx_pbl), 6976 &tx_ret_params); 6977 6978 if (rc) { 6979 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n", 6980 txq->index, rc); 6981 return rc; 6982 } 6983 6984 txq->doorbell_addr = tx_ret_params.p_doorbell; 6985 txq->handle = tx_ret_params.p_handle; 6986 6987 txq->hw_cons_ptr = 6988 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; 6989 SET_FIELD(txq->tx_db.data.params, 6990 ETH_DB_DATA_DEST, DB_DEST_XCM); 6991 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, 6992 DB_AGG_CMD_SET); 6993 SET_FIELD(txq->tx_db.data.params, 6994 ETH_DB_DATA_AGG_VAL_SEL, 6995 DQ_XCM_ETH_TX_BD_PROD_CMD); 6996 6997 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 6998 } 6999 } 7000 7001 /* Fill struct with RSS params */ 7002 if (ha->num_rss > 1) { 7003 7004 rss_params->update_rss_config = 1; 7005 rss_params->rss_enable = 1; 7006 rss_params->update_rss_capabilities = 1; 7007 rss_params->update_rss_ind_table = 1; 7008 rss_params->update_rss_key = 1; 7009 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | 7010 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; 7011 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */ 7012 7013 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 7014 fp = &ha->fp_array[(i % ha->num_rss)]; 7015 rss_params->rss_ind_table[i] = fp->rxq->handle; 7016 } 7017 7018 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 7019 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i]; 7020 7021 } else { 7022 memset(rss_params, 0, sizeof(*rss_params)); 7023 } 7024 7025 7026 /* Prepare and send the vport enable */ 7027 memset(&vport_update_params, 0, sizeof(vport_update_params)); 7028 vport_update_params.vport_id = vport_id; 7029 vport_update_params.update_vport_active_tx_flg = 1; 7030 vport_update_params.vport_active_tx_flg = 1; 7031 vport_update_params.update_vport_active_rx_flg = 1; 7032 vport_update_params.vport_active_rx_flg = 1; 7033 vport_update_params.rss_params = rss_params; 7034 vport_update_params.update_inner_vlan_removal_flg = 1; 7035 vport_update_params.inner_vlan_removal_flg = 1; 7036 7037 if (hw_lro_enable) { 7038 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params)); 7039 7040 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 7041 7042 tpa_params.update_tpa_en_flg = 1; 7043 tpa_params.tpa_ipv4_en_flg = 1; 7044 tpa_params.tpa_ipv6_en_flg = 1; 7045 7046 tpa_params.update_tpa_param_flg = 1; 7047 tpa_params.tpa_pkt_split_flg = 0; 7048 tpa_params.tpa_hdr_data_split_flg = 0; 7049 tpa_params.tpa_gro_consistent_flg = 0; 7050 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 7051 tpa_params.tpa_max_size = (uint16_t)(-1); 7052 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2; 7053 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2; 7054 7055 vport_update_params.sge_tpa_params = &tpa_params; 7056 } 7057 7058 rc = qlnx_update_vport(cdev, &vport_update_params); 7059 if (rc) { 7060 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc); 7061 return rc; 7062 } 7063 7064 return 0; 7065 } 7066 7067 static int 7068 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 7069 struct qlnx_tx_queue *txq) 7070 { 7071 uint16_t hw_bd_cons; 7072 uint16_t ecore_cons_idx; 7073 7074 QL_DPRINT2(ha, "enter\n"); 7075 7076 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 7077 7078 while (hw_bd_cons != 7079 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 7080 7081 mtx_lock(&fp->tx_mtx); 7082 7083 (void)qlnx_tx_int(ha, fp, txq); 7084 7085 mtx_unlock(&fp->tx_mtx); 7086 7087 qlnx_mdelay(__func__, 2); 7088 7089 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 7090 } 7091 7092 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index); 7093 7094 return 0; 7095 } 7096 7097 static int 7098 qlnx_stop_queues(qlnx_host_t *ha) 7099 { 7100 struct qlnx_update_vport_params vport_update_params; 7101 struct ecore_dev *cdev; 7102 struct qlnx_fastpath *fp; 7103 int rc, tc, i; 7104 7105 cdev = &ha->cdev; 7106 7107 /* Disable the vport */ 7108 7109 memset(&vport_update_params, 0, sizeof(vport_update_params)); 7110 7111 vport_update_params.vport_id = 0; 7112 vport_update_params.update_vport_active_tx_flg = 1; 7113 vport_update_params.vport_active_tx_flg = 0; 7114 vport_update_params.update_vport_active_rx_flg = 1; 7115 vport_update_params.vport_active_rx_flg = 0; 7116 vport_update_params.rss_params = &ha->rss_params; 7117 vport_update_params.rss_params->update_rss_config = 0; 7118 vport_update_params.rss_params->rss_enable = 0; 7119 vport_update_params.update_inner_vlan_removal_flg = 0; 7120 vport_update_params.inner_vlan_removal_flg = 0; 7121 7122 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id); 7123 7124 rc = qlnx_update_vport(cdev, &vport_update_params); 7125 if (rc) { 7126 QL_DPRINT1(ha, "Failed to update vport\n"); 7127 return rc; 7128 } 7129 7130 /* Flush Tx queues. If needed, request drain from MCP */ 7131 for_each_rss(i) { 7132 fp = &ha->fp_array[i]; 7133 7134 for (tc = 0; tc < ha->num_tc; tc++) { 7135 struct qlnx_tx_queue *txq = fp->txq[tc]; 7136 7137 rc = qlnx_drain_txq(ha, fp, txq); 7138 if (rc) 7139 return rc; 7140 } 7141 } 7142 7143 /* Stop all Queues in reverse order*/ 7144 for (i = ha->num_rss - 1; i >= 0; i--) { 7145 7146 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)]; 7147 7148 fp = &ha->fp_array[i]; 7149 7150 /* Stop the Tx Queue(s)*/ 7151 for (tc = 0; tc < ha->num_tc; tc++) { 7152 int tx_queue_id; 7153 7154 tx_queue_id = tc * ha->num_rss + i; 7155 rc = ecore_eth_tx_queue_stop(p_hwfn, 7156 fp->txq[tc]->handle); 7157 7158 if (rc) { 7159 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n", 7160 tx_queue_id); 7161 return rc; 7162 } 7163 } 7164 7165 /* Stop the Rx Queue*/ 7166 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false, 7167 false); 7168 if (rc) { 7169 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i); 7170 return rc; 7171 } 7172 } 7173 7174 /* Stop the vport */ 7175 for_each_hwfn(cdev, i) { 7176 7177 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 7178 7179 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0); 7180 7181 if (rc) { 7182 QL_DPRINT1(ha, "Failed to stop VPORT\n"); 7183 return rc; 7184 } 7185 } 7186 7187 return rc; 7188 } 7189 7190 static int 7191 qlnx_set_ucast_rx_mac(qlnx_host_t *ha, 7192 enum ecore_filter_opcode opcode, 7193 unsigned char mac[ETH_ALEN]) 7194 { 7195 struct ecore_filter_ucast ucast; 7196 struct ecore_dev *cdev; 7197 int rc; 7198 7199 cdev = &ha->cdev; 7200 7201 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 7202 7203 ucast.opcode = opcode; 7204 ucast.type = ECORE_FILTER_MAC; 7205 ucast.is_rx_filter = 1; 7206 ucast.vport_to_add_to = 0; 7207 memcpy(&ucast.mac[0], mac, ETH_ALEN); 7208 7209 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 7210 7211 return (rc); 7212 } 7213 7214 static int 7215 qlnx_remove_all_ucast_mac(qlnx_host_t *ha) 7216 { 7217 struct ecore_filter_ucast ucast; 7218 struct ecore_dev *cdev; 7219 int rc; 7220 7221 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 7222 7223 ucast.opcode = ECORE_FILTER_REPLACE; 7224 ucast.type = ECORE_FILTER_MAC; 7225 ucast.is_rx_filter = 1; 7226 7227 cdev = &ha->cdev; 7228 7229 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 7230 7231 return (rc); 7232 } 7233 7234 static int 7235 qlnx_remove_all_mcast_mac(qlnx_host_t *ha) 7236 { 7237 struct ecore_filter_mcast *mcast; 7238 struct ecore_dev *cdev; 7239 int rc, i; 7240 7241 cdev = &ha->cdev; 7242 7243 mcast = &ha->ecore_mcast; 7244 bzero(mcast, sizeof(struct ecore_filter_mcast)); 7245 7246 mcast->opcode = ECORE_FILTER_REMOVE; 7247 7248 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 7249 7250 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] || 7251 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] || 7252 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) { 7253 7254 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN); 7255 mcast->num_mc_addrs++; 7256 } 7257 } 7258 mcast = &ha->ecore_mcast; 7259 7260 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 7261 7262 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS)); 7263 ha->nmcast = 0; 7264 7265 return (rc); 7266 } 7267 7268 static int 7269 qlnx_clean_filters(qlnx_host_t *ha) 7270 { 7271 int rc = 0; 7272 7273 /* Remove all unicast macs */ 7274 rc = qlnx_remove_all_ucast_mac(ha); 7275 if (rc) 7276 return rc; 7277 7278 /* Remove all multicast macs */ 7279 rc = qlnx_remove_all_mcast_mac(ha); 7280 if (rc) 7281 return rc; 7282 7283 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac); 7284 7285 return (rc); 7286 } 7287 7288 static int 7289 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter) 7290 { 7291 struct ecore_filter_accept_flags accept; 7292 int rc = 0; 7293 struct ecore_dev *cdev; 7294 7295 cdev = &ha->cdev; 7296 7297 bzero(&accept, sizeof(struct ecore_filter_accept_flags)); 7298 7299 accept.update_rx_mode_config = 1; 7300 accept.rx_accept_filter = filter; 7301 7302 accept.update_tx_mode_config = 1; 7303 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 7304 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST; 7305 7306 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false, 7307 ECORE_SPQ_MODE_CB, NULL); 7308 7309 return (rc); 7310 } 7311 7312 static int 7313 qlnx_set_rx_mode(qlnx_host_t *ha) 7314 { 7315 int rc = 0; 7316 uint8_t filter; 7317 7318 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac); 7319 if (rc) 7320 return rc; 7321 7322 rc = qlnx_remove_all_mcast_mac(ha); 7323 if (rc) 7324 return rc; 7325 7326 filter = ECORE_ACCEPT_UCAST_MATCHED | 7327 ECORE_ACCEPT_MCAST_MATCHED | 7328 ECORE_ACCEPT_BCAST; 7329 7330 if (qlnx_vf_device(ha) == 0) { 7331 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 7332 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 7333 } 7334 ha->filter = filter; 7335 7336 rc = qlnx_set_rx_accept_filter(ha, filter); 7337 7338 return (rc); 7339 } 7340 7341 static int 7342 qlnx_set_link(qlnx_host_t *ha, bool link_up) 7343 { 7344 int i, rc = 0; 7345 struct ecore_dev *cdev; 7346 struct ecore_hwfn *hwfn; 7347 struct ecore_ptt *ptt; 7348 7349 if (qlnx_vf_device(ha) == 0) 7350 return (0); 7351 7352 cdev = &ha->cdev; 7353 7354 for_each_hwfn(cdev, i) { 7355 7356 hwfn = &cdev->hwfns[i]; 7357 7358 ptt = ecore_ptt_acquire(hwfn); 7359 if (!ptt) 7360 return -EBUSY; 7361 7362 rc = ecore_mcp_set_link(hwfn, ptt, link_up); 7363 7364 ecore_ptt_release(hwfn, ptt); 7365 7366 if (rc) 7367 return rc; 7368 } 7369 return (rc); 7370 } 7371 7372 #if __FreeBSD_version >= 1100000 7373 static uint64_t 7374 qlnx_get_counter(if_t ifp, ift_counter cnt) 7375 { 7376 qlnx_host_t *ha; 7377 uint64_t count; 7378 7379 ha = (qlnx_host_t *)if_getsoftc(ifp); 7380 7381 switch (cnt) { 7382 7383 case IFCOUNTER_IPACKETS: 7384 count = ha->hw_stats.common.rx_ucast_pkts + 7385 ha->hw_stats.common.rx_mcast_pkts + 7386 ha->hw_stats.common.rx_bcast_pkts; 7387 break; 7388 7389 case IFCOUNTER_IERRORS: 7390 count = ha->hw_stats.common.rx_crc_errors + 7391 ha->hw_stats.common.rx_align_errors + 7392 ha->hw_stats.common.rx_oversize_packets + 7393 ha->hw_stats.common.rx_undersize_packets; 7394 break; 7395 7396 case IFCOUNTER_OPACKETS: 7397 count = ha->hw_stats.common.tx_ucast_pkts + 7398 ha->hw_stats.common.tx_mcast_pkts + 7399 ha->hw_stats.common.tx_bcast_pkts; 7400 break; 7401 7402 case IFCOUNTER_OERRORS: 7403 count = ha->hw_stats.common.tx_err_drop_pkts; 7404 break; 7405 7406 case IFCOUNTER_COLLISIONS: 7407 return (0); 7408 7409 case IFCOUNTER_IBYTES: 7410 count = ha->hw_stats.common.rx_ucast_bytes + 7411 ha->hw_stats.common.rx_mcast_bytes + 7412 ha->hw_stats.common.rx_bcast_bytes; 7413 break; 7414 7415 case IFCOUNTER_OBYTES: 7416 count = ha->hw_stats.common.tx_ucast_bytes + 7417 ha->hw_stats.common.tx_mcast_bytes + 7418 ha->hw_stats.common.tx_bcast_bytes; 7419 break; 7420 7421 case IFCOUNTER_IMCASTS: 7422 count = ha->hw_stats.common.rx_mcast_bytes; 7423 break; 7424 7425 case IFCOUNTER_OMCASTS: 7426 count = ha->hw_stats.common.tx_mcast_bytes; 7427 break; 7428 7429 case IFCOUNTER_IQDROPS: 7430 case IFCOUNTER_OQDROPS: 7431 case IFCOUNTER_NOPROTO: 7432 7433 default: 7434 return (if_get_counter_default(ifp, cnt)); 7435 } 7436 return (count); 7437 } 7438 #endif 7439 7440 7441 static void 7442 qlnx_timer(void *arg) 7443 { 7444 qlnx_host_t *ha; 7445 7446 ha = (qlnx_host_t *)arg; 7447 7448 if (ha->error_recovery) { 7449 ha->error_recovery = 0; 7450 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 7451 return; 7452 } 7453 7454 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats); 7455 7456 if (ha->storm_stats_gather) 7457 qlnx_sample_storm_stats(ha); 7458 7459 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7460 7461 return; 7462 } 7463 7464 static int 7465 qlnx_load(qlnx_host_t *ha) 7466 { 7467 int i; 7468 int rc = 0; 7469 struct ecore_dev *cdev; 7470 device_t dev; 7471 7472 cdev = &ha->cdev; 7473 dev = ha->pci_dev; 7474 7475 QL_DPRINT2(ha, "enter\n"); 7476 7477 rc = qlnx_alloc_mem_arrays(ha); 7478 if (rc) 7479 goto qlnx_load_exit0; 7480 7481 qlnx_init_fp(ha); 7482 7483 rc = qlnx_alloc_mem_load(ha); 7484 if (rc) 7485 goto qlnx_load_exit1; 7486 7487 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n", 7488 ha->num_rss, ha->num_tc); 7489 7490 for (i = 0; i < ha->num_rss; i++) { 7491 7492 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq, 7493 (INTR_TYPE_NET | INTR_MPSAFE), 7494 NULL, qlnx_fp_isr, &ha->irq_vec[i], 7495 &ha->irq_vec[i].handle))) { 7496 7497 QL_DPRINT1(ha, "could not setup interrupt\n"); 7498 goto qlnx_load_exit2; 7499 } 7500 7501 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \ 7502 irq %p handle %p\n", i, 7503 ha->irq_vec[i].irq_rid, 7504 ha->irq_vec[i].irq, ha->irq_vec[i].handle); 7505 7506 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus)); 7507 } 7508 7509 rc = qlnx_start_queues(ha); 7510 if (rc) 7511 goto qlnx_load_exit2; 7512 7513 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n"); 7514 7515 /* Add primary mac and set Rx filters */ 7516 rc = qlnx_set_rx_mode(ha); 7517 if (rc) 7518 goto qlnx_load_exit2; 7519 7520 /* Ask for link-up using current configuration */ 7521 qlnx_set_link(ha, true); 7522 7523 if (qlnx_vf_device(ha) == 0) 7524 qlnx_link_update(&ha->cdev.hwfns[0]); 7525 7526 ha->state = QLNX_STATE_OPEN; 7527 7528 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats)); 7529 7530 if (ha->flags.callout_init) 7531 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7532 7533 goto qlnx_load_exit0; 7534 7535 qlnx_load_exit2: 7536 qlnx_free_mem_load(ha); 7537 7538 qlnx_load_exit1: 7539 ha->num_rss = 0; 7540 7541 qlnx_load_exit0: 7542 QL_DPRINT2(ha, "exit [%d]\n", rc); 7543 return rc; 7544 } 7545 7546 static void 7547 qlnx_drain_soft_lro(qlnx_host_t *ha) 7548 { 7549 #ifdef QLNX_SOFT_LRO 7550 7551 struct ifnet *ifp; 7552 int i; 7553 7554 ifp = ha->ifp; 7555 7556 7557 if (ifp->if_capenable & IFCAP_LRO) { 7558 7559 for (i = 0; i < ha->num_rss; i++) { 7560 7561 struct qlnx_fastpath *fp = &ha->fp_array[i]; 7562 struct lro_ctrl *lro; 7563 7564 lro = &fp->rxq->lro; 7565 7566 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 7567 7568 tcp_lro_flush_all(lro); 7569 7570 #else 7571 struct lro_entry *queued; 7572 7573 while ((!SLIST_EMPTY(&lro->lro_active))){ 7574 queued = SLIST_FIRST(&lro->lro_active); 7575 SLIST_REMOVE_HEAD(&lro->lro_active, next); 7576 tcp_lro_flush(lro, queued); 7577 } 7578 7579 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 7580 7581 } 7582 } 7583 7584 #endif /* #ifdef QLNX_SOFT_LRO */ 7585 7586 return; 7587 } 7588 7589 static void 7590 qlnx_unload(qlnx_host_t *ha) 7591 { 7592 struct ecore_dev *cdev; 7593 device_t dev; 7594 int i; 7595 7596 cdev = &ha->cdev; 7597 dev = ha->pci_dev; 7598 7599 QL_DPRINT2(ha, "enter\n"); 7600 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state); 7601 7602 if (ha->state == QLNX_STATE_OPEN) { 7603 7604 qlnx_set_link(ha, false); 7605 qlnx_clean_filters(ha); 7606 qlnx_stop_queues(ha); 7607 ecore_hw_stop_fastpath(cdev); 7608 7609 for (i = 0; i < ha->num_rss; i++) { 7610 if (ha->irq_vec[i].handle) { 7611 (void)bus_teardown_intr(dev, 7612 ha->irq_vec[i].irq, 7613 ha->irq_vec[i].handle); 7614 ha->irq_vec[i].handle = NULL; 7615 } 7616 } 7617 7618 qlnx_drain_fp_taskqueues(ha); 7619 qlnx_drain_soft_lro(ha); 7620 qlnx_free_mem_load(ha); 7621 } 7622 7623 if (ha->flags.callout_init) 7624 callout_drain(&ha->qlnx_callout); 7625 7626 qlnx_mdelay(__func__, 1000); 7627 7628 ha->state = QLNX_STATE_CLOSED; 7629 7630 QL_DPRINT2(ha, "exit\n"); 7631 return; 7632 } 7633 7634 static int 7635 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7636 { 7637 int rval = -1; 7638 struct ecore_hwfn *p_hwfn; 7639 struct ecore_ptt *p_ptt; 7640 7641 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7642 7643 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7644 p_ptt = ecore_ptt_acquire(p_hwfn); 7645 7646 if (!p_ptt) { 7647 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7648 return (rval); 7649 } 7650 7651 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7652 7653 if (rval == DBG_STATUS_OK) 7654 rval = 0; 7655 else { 7656 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed" 7657 "[0x%x]\n", rval); 7658 } 7659 7660 ecore_ptt_release(p_hwfn, p_ptt); 7661 7662 return (rval); 7663 } 7664 7665 static int 7666 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7667 { 7668 int rval = -1; 7669 struct ecore_hwfn *p_hwfn; 7670 struct ecore_ptt *p_ptt; 7671 7672 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7673 7674 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7675 p_ptt = ecore_ptt_acquire(p_hwfn); 7676 7677 if (!p_ptt) { 7678 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7679 return (rval); 7680 } 7681 7682 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7683 7684 if (rval == DBG_STATUS_OK) 7685 rval = 0; 7686 else { 7687 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed" 7688 " [0x%x]\n", rval); 7689 } 7690 7691 ecore_ptt_release(p_hwfn, p_ptt); 7692 7693 return (rval); 7694 } 7695 7696 7697 static void 7698 qlnx_sample_storm_stats(qlnx_host_t *ha) 7699 { 7700 int i, index; 7701 struct ecore_dev *cdev; 7702 qlnx_storm_stats_t *s_stats; 7703 uint32_t reg; 7704 struct ecore_ptt *p_ptt; 7705 struct ecore_hwfn *hwfn; 7706 7707 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) { 7708 ha->storm_stats_gather = 0; 7709 return; 7710 } 7711 7712 cdev = &ha->cdev; 7713 7714 for_each_hwfn(cdev, i) { 7715 7716 hwfn = &cdev->hwfns[i]; 7717 7718 p_ptt = ecore_ptt_acquire(hwfn); 7719 if (!p_ptt) 7720 return; 7721 7722 index = ha->storm_stats_index + 7723 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN); 7724 7725 s_stats = &ha->storm_stats[index]; 7726 7727 /* XSTORM */ 7728 reg = XSEM_REG_FAST_MEMORY + 7729 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7730 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7731 7732 reg = XSEM_REG_FAST_MEMORY + 7733 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7734 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7735 7736 reg = XSEM_REG_FAST_MEMORY + 7737 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7738 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7739 7740 reg = XSEM_REG_FAST_MEMORY + 7741 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7742 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7743 7744 /* YSTORM */ 7745 reg = YSEM_REG_FAST_MEMORY + 7746 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7747 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7748 7749 reg = YSEM_REG_FAST_MEMORY + 7750 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7751 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7752 7753 reg = YSEM_REG_FAST_MEMORY + 7754 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7755 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7756 7757 reg = YSEM_REG_FAST_MEMORY + 7758 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7759 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7760 7761 /* PSTORM */ 7762 reg = PSEM_REG_FAST_MEMORY + 7763 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7764 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7765 7766 reg = PSEM_REG_FAST_MEMORY + 7767 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7768 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7769 7770 reg = PSEM_REG_FAST_MEMORY + 7771 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7772 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7773 7774 reg = PSEM_REG_FAST_MEMORY + 7775 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7776 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7777 7778 /* TSTORM */ 7779 reg = TSEM_REG_FAST_MEMORY + 7780 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7781 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7782 7783 reg = TSEM_REG_FAST_MEMORY + 7784 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7785 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7786 7787 reg = TSEM_REG_FAST_MEMORY + 7788 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7789 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7790 7791 reg = TSEM_REG_FAST_MEMORY + 7792 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7793 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7794 7795 /* MSTORM */ 7796 reg = MSEM_REG_FAST_MEMORY + 7797 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7798 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7799 7800 reg = MSEM_REG_FAST_MEMORY + 7801 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7802 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7803 7804 reg = MSEM_REG_FAST_MEMORY + 7805 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7806 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7807 7808 reg = MSEM_REG_FAST_MEMORY + 7809 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7810 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7811 7812 /* USTORM */ 7813 reg = USEM_REG_FAST_MEMORY + 7814 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7815 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7816 7817 reg = USEM_REG_FAST_MEMORY + 7818 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7819 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7820 7821 reg = USEM_REG_FAST_MEMORY + 7822 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7823 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7824 7825 reg = USEM_REG_FAST_MEMORY + 7826 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7827 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7828 7829 ecore_ptt_release(hwfn, p_ptt); 7830 } 7831 7832 ha->storm_stats_index++; 7833 7834 return; 7835 } 7836 7837 /* 7838 * Name: qlnx_dump_buf8 7839 * Function: dumps a buffer as bytes 7840 */ 7841 static void 7842 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len) 7843 { 7844 device_t dev; 7845 uint32_t i = 0; 7846 uint8_t *buf; 7847 7848 dev = ha->pci_dev; 7849 buf = dbuf; 7850 7851 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len); 7852 7853 while (len >= 16) { 7854 device_printf(dev,"0x%08x:" 7855 " %02x %02x %02x %02x %02x %02x %02x %02x" 7856 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7857 buf[0], buf[1], buf[2], buf[3], 7858 buf[4], buf[5], buf[6], buf[7], 7859 buf[8], buf[9], buf[10], buf[11], 7860 buf[12], buf[13], buf[14], buf[15]); 7861 i += 16; 7862 len -= 16; 7863 buf += 16; 7864 } 7865 switch (len) { 7866 case 1: 7867 device_printf(dev,"0x%08x: %02x\n", i, buf[0]); 7868 break; 7869 case 2: 7870 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]); 7871 break; 7872 case 3: 7873 device_printf(dev,"0x%08x: %02x %02x %02x\n", 7874 i, buf[0], buf[1], buf[2]); 7875 break; 7876 case 4: 7877 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i, 7878 buf[0], buf[1], buf[2], buf[3]); 7879 break; 7880 case 5: 7881 device_printf(dev,"0x%08x:" 7882 " %02x %02x %02x %02x %02x\n", i, 7883 buf[0], buf[1], buf[2], buf[3], buf[4]); 7884 break; 7885 case 6: 7886 device_printf(dev,"0x%08x:" 7887 " %02x %02x %02x %02x %02x %02x\n", i, 7888 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); 7889 break; 7890 case 7: 7891 device_printf(dev,"0x%08x:" 7892 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7893 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); 7894 break; 7895 case 8: 7896 device_printf(dev,"0x%08x:" 7897 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7898 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7899 buf[7]); 7900 break; 7901 case 9: 7902 device_printf(dev,"0x%08x:" 7903 " %02x %02x %02x %02x %02x %02x %02x %02x" 7904 " %02x\n", i, 7905 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7906 buf[7], buf[8]); 7907 break; 7908 case 10: 7909 device_printf(dev,"0x%08x:" 7910 " %02x %02x %02x %02x %02x %02x %02x %02x" 7911 " %02x %02x\n", i, 7912 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7913 buf[7], buf[8], buf[9]); 7914 break; 7915 case 11: 7916 device_printf(dev,"0x%08x:" 7917 " %02x %02x %02x %02x %02x %02x %02x %02x" 7918 " %02x %02x %02x\n", i, 7919 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7920 buf[7], buf[8], buf[9], buf[10]); 7921 break; 7922 case 12: 7923 device_printf(dev,"0x%08x:" 7924 " %02x %02x %02x %02x %02x %02x %02x %02x" 7925 " %02x %02x %02x %02x\n", i, 7926 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7927 buf[7], buf[8], buf[9], buf[10], buf[11]); 7928 break; 7929 case 13: 7930 device_printf(dev,"0x%08x:" 7931 " %02x %02x %02x %02x %02x %02x %02x %02x" 7932 " %02x %02x %02x %02x %02x\n", i, 7933 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7934 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]); 7935 break; 7936 case 14: 7937 device_printf(dev,"0x%08x:" 7938 " %02x %02x %02x %02x %02x %02x %02x %02x" 7939 " %02x %02x %02x %02x %02x %02x\n", i, 7940 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7941 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7942 buf[13]); 7943 break; 7944 case 15: 7945 device_printf(dev,"0x%08x:" 7946 " %02x %02x %02x %02x %02x %02x %02x %02x" 7947 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7948 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7949 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7950 buf[13], buf[14]); 7951 break; 7952 default: 7953 break; 7954 } 7955 7956 device_printf(dev, "%s: %s dump end\n", __func__, msg); 7957 7958 return; 7959 } 7960 7961 #ifdef CONFIG_ECORE_SRIOV 7962 7963 static void 7964 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id) 7965 { 7966 struct ecore_public_vf_info *vf_info; 7967 7968 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false); 7969 7970 if (!vf_info) 7971 return; 7972 7973 /* Clear the VF mac */ 7974 memset(vf_info->forced_mac, 0, ETH_ALEN); 7975 7976 vf_info->forced_vlan = 0; 7977 7978 return; 7979 } 7980 7981 void 7982 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id) 7983 { 7984 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id); 7985 return; 7986 } 7987 7988 static int 7989 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid, 7990 struct ecore_filter_ucast *params) 7991 { 7992 struct ecore_public_vf_info *vf; 7993 7994 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) { 7995 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev), 7996 "VF[%d] vport not initialized\n", vfid); 7997 return ECORE_INVAL; 7998 } 7999 8000 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true); 8001 if (!vf) 8002 return -EINVAL; 8003 8004 /* No real decision to make; Store the configured MAC */ 8005 if (params->type == ECORE_FILTER_MAC || 8006 params->type == ECORE_FILTER_MAC_VLAN) 8007 memcpy(params->mac, vf->forced_mac, ETH_ALEN); 8008 8009 return 0; 8010 } 8011 8012 int 8013 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params) 8014 { 8015 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params)); 8016 } 8017 8018 static int 8019 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid, 8020 struct ecore_sp_vport_update_params *params, uint16_t * tlvs) 8021 { 8022 uint8_t mask; 8023 struct ecore_filter_accept_flags *flags; 8024 8025 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) { 8026 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev), 8027 "VF[%d] vport not initialized\n", vfid); 8028 return ECORE_INVAL; 8029 } 8030 8031 /* Untrusted VFs can't even be trusted to know that fact. 8032 * Simply indicate everything is configured fine, and trace 8033 * configuration 'behind their back'. 8034 */ 8035 mask = ECORE_ACCEPT_UCAST_UNMATCHED | ECORE_ACCEPT_MCAST_UNMATCHED; 8036 flags = ¶ms->accept_flags; 8037 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM))) 8038 return 0; 8039 8040 return 0; 8041 8042 } 8043 int 8044 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs) 8045 { 8046 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs)); 8047 } 8048 8049 static int 8050 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn) 8051 { 8052 int i; 8053 struct ecore_dev *cdev; 8054 8055 cdev = p_hwfn->p_dev; 8056 8057 for (i = 0; i < cdev->num_hwfns; i++) { 8058 if (&cdev->hwfns[i] == p_hwfn) 8059 break; 8060 } 8061 8062 if (i >= cdev->num_hwfns) 8063 return (-1); 8064 8065 return (i); 8066 } 8067 8068 static int 8069 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id) 8070 { 8071 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 8072 int i; 8073 8074 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n", 8075 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id); 8076 8077 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8078 return (-1); 8079 8080 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8081 8082 atomic_testandset_32(&ha->sriov_task[i].flags, 8083 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG); 8084 8085 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 8086 &ha->sriov_task[i].pf_task); 8087 8088 } 8089 8090 return (ECORE_SUCCESS); 8091 } 8092 8093 8094 int 8095 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id) 8096 { 8097 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id)); 8098 } 8099 8100 static void 8101 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn) 8102 { 8103 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 8104 int i; 8105 8106 if (!ha->sriov_initialized) 8107 return; 8108 8109 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 8110 ha, p_hwfn->p_dev, p_hwfn); 8111 8112 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8113 return; 8114 8115 8116 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8117 8118 atomic_testandset_32(&ha->sriov_task[i].flags, 8119 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE); 8120 8121 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 8122 &ha->sriov_task[i].pf_task); 8123 } 8124 8125 return; 8126 } 8127 8128 8129 void 8130 qlnx_vf_flr_update(void *p_hwfn) 8131 { 8132 __qlnx_vf_flr_update(p_hwfn); 8133 8134 return; 8135 } 8136 8137 #ifndef QLNX_VF 8138 8139 static void 8140 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn) 8141 { 8142 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 8143 int i; 8144 8145 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 8146 ha, p_hwfn->p_dev, p_hwfn); 8147 8148 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8149 return; 8150 8151 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n", 8152 ha, p_hwfn->p_dev, p_hwfn, i); 8153 8154 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8155 8156 atomic_testandset_32(&ha->sriov_task[i].flags, 8157 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE); 8158 8159 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 8160 &ha->sriov_task[i].pf_task); 8161 } 8162 } 8163 8164 static void 8165 qlnx_initialize_sriov(qlnx_host_t *ha) 8166 { 8167 device_t dev; 8168 nvlist_t *pf_schema, *vf_schema; 8169 int iov_error; 8170 8171 dev = ha->pci_dev; 8172 8173 pf_schema = pci_iov_schema_alloc_node(); 8174 vf_schema = pci_iov_schema_alloc_node(); 8175 8176 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 8177 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 8178 IOV_SCHEMA_HASDEFAULT, FALSE); 8179 pci_iov_schema_add_bool(vf_schema, "allow-promisc", 8180 IOV_SCHEMA_HASDEFAULT, FALSE); 8181 pci_iov_schema_add_uint16(vf_schema, "num-queues", 8182 IOV_SCHEMA_HASDEFAULT, 1); 8183 8184 iov_error = pci_iov_attach(dev, pf_schema, vf_schema); 8185 8186 if (iov_error != 0) { 8187 ha->sriov_initialized = 0; 8188 } else { 8189 device_printf(dev, "SRIOV initialized\n"); 8190 ha->sriov_initialized = 1; 8191 } 8192 8193 return; 8194 } 8195 8196 static void 8197 qlnx_sriov_disable(qlnx_host_t *ha) 8198 { 8199 struct ecore_dev *cdev; 8200 int i, j; 8201 8202 cdev = &ha->cdev; 8203 8204 ecore_iov_set_vfs_to_disable(cdev, true); 8205 8206 8207 for_each_hwfn(cdev, i) { 8208 8209 struct ecore_hwfn *hwfn = &cdev->hwfns[i]; 8210 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 8211 8212 if (!ptt) { 8213 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 8214 return; 8215 } 8216 /* Clean WFQ db and configure equal weight for all vports */ 8217 ecore_clean_wfq_db(hwfn, ptt); 8218 8219 ecore_for_each_vf(hwfn, j) { 8220 int k = 0; 8221 8222 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false)) 8223 continue; 8224 8225 if (ecore_iov_is_vf_started(hwfn, j)) { 8226 /* Wait until VF is disabled before releasing */ 8227 8228 for (k = 0; k < 100; k++) { 8229 if (!ecore_iov_is_vf_stopped(hwfn, j)) { 8230 qlnx_mdelay(__func__, 10); 8231 } else 8232 break; 8233 } 8234 } 8235 8236 if (k < 100) 8237 ecore_iov_release_hw_for_vf(&cdev->hwfns[i], 8238 ptt, j); 8239 else { 8240 QL_DPRINT1(ha, 8241 "Timeout waiting for VF's FLR to end\n"); 8242 } 8243 } 8244 ecore_ptt_release(hwfn, ptt); 8245 } 8246 8247 ecore_iov_set_vfs_to_disable(cdev, false); 8248 8249 return; 8250 } 8251 8252 8253 static void 8254 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid, 8255 struct ecore_iov_vf_init_params *params) 8256 { 8257 u16 base, i; 8258 8259 /* Since we have an equal resource distribution per-VF, and we assume 8260 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting 8261 * sequentially from there. 8262 */ 8263 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues; 8264 8265 params->rel_vf_id = vfid; 8266 8267 for (i = 0; i < params->num_queues; i++) { 8268 params->req_rx_queue[i] = base + i; 8269 params->req_tx_queue[i] = base + i; 8270 } 8271 8272 /* PF uses indices 0 for itself; Set vport/RSS afterwards */ 8273 params->vport_id = vfid + 1; 8274 params->rss_eng_id = vfid + 1; 8275 8276 return; 8277 } 8278 8279 static int 8280 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params) 8281 { 8282 qlnx_host_t *ha; 8283 struct ecore_dev *cdev; 8284 struct ecore_iov_vf_init_params params; 8285 int ret, j, i; 8286 uint32_t max_vfs; 8287 8288 if ((ha = device_get_softc(dev)) == NULL) { 8289 device_printf(dev, "%s: cannot get softc\n", __func__); 8290 return (-1); 8291 } 8292 8293 if (qlnx_create_pf_taskqueues(ha) != 0) 8294 goto qlnx_iov_init_err0; 8295 8296 cdev = &ha->cdev; 8297 8298 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT); 8299 8300 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n", 8301 dev, num_vfs, max_vfs); 8302 8303 if (num_vfs >= max_vfs) { 8304 QL_DPRINT1(ha, "Can start at most %d VFs\n", 8305 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1)); 8306 goto qlnx_iov_init_err0; 8307 } 8308 8309 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF, 8310 M_NOWAIT); 8311 8312 if (ha->vf_attr == NULL) 8313 goto qlnx_iov_init_err0; 8314 8315 8316 memset(¶ms, 0, sizeof(params)); 8317 8318 /* Initialize HW for VF access */ 8319 for_each_hwfn(cdev, j) { 8320 struct ecore_hwfn *hwfn = &cdev->hwfns[j]; 8321 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 8322 8323 /* Make sure not to use more than 16 queues per VF */ 8324 params.num_queues = min_t(int, 8325 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs), 8326 16); 8327 8328 if (!ptt) { 8329 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 8330 goto qlnx_iov_init_err1; 8331 } 8332 8333 for (i = 0; i < num_vfs; i++) { 8334 8335 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true)) 8336 continue; 8337 8338 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms); 8339 8340 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms); 8341 8342 if (ret) { 8343 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i); 8344 ecore_ptt_release(hwfn, ptt); 8345 goto qlnx_iov_init_err1; 8346 } 8347 } 8348 8349 ecore_ptt_release(hwfn, ptt); 8350 } 8351 8352 ha->num_vfs = num_vfs; 8353 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha); 8354 8355 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs); 8356 8357 return (0); 8358 8359 qlnx_iov_init_err1: 8360 qlnx_sriov_disable(ha); 8361 8362 qlnx_iov_init_err0: 8363 qlnx_destroy_pf_taskqueues(ha); 8364 ha->num_vfs = 0; 8365 8366 return (-1); 8367 } 8368 8369 static void 8370 qlnx_iov_uninit(device_t dev) 8371 { 8372 qlnx_host_t *ha; 8373 8374 if ((ha = device_get_softc(dev)) == NULL) { 8375 device_printf(dev, "%s: cannot get softc\n", __func__); 8376 return; 8377 } 8378 8379 QL_DPRINT2(ha," dev = %p enter\n", dev); 8380 8381 qlnx_sriov_disable(ha); 8382 qlnx_destroy_pf_taskqueues(ha); 8383 8384 free(ha->vf_attr, M_QLNXBUF); 8385 ha->vf_attr = NULL; 8386 8387 ha->num_vfs = 0; 8388 8389 QL_DPRINT2(ha," dev = %p exit\n", dev); 8390 return; 8391 } 8392 8393 static int 8394 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 8395 { 8396 qlnx_host_t *ha; 8397 qlnx_vf_attr_t *vf_attr; 8398 unsigned const char *mac; 8399 size_t size; 8400 struct ecore_hwfn *p_hwfn; 8401 8402 if ((ha = device_get_softc(dev)) == NULL) { 8403 device_printf(dev, "%s: cannot get softc\n", __func__); 8404 return (-1); 8405 } 8406 8407 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum); 8408 8409 if (vfnum > (ha->num_vfs - 1)) { 8410 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n", 8411 vfnum, (ha->num_vfs - 1)); 8412 } 8413 8414 vf_attr = &ha->vf_attr[vfnum]; 8415 8416 if (nvlist_exists_binary(params, "mac-addr")) { 8417 mac = nvlist_get_binary(params, "mac-addr", &size); 8418 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN); 8419 device_printf(dev, 8420 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", 8421 __func__, vf_attr->mac_addr[0], 8422 vf_attr->mac_addr[1], vf_attr->mac_addr[2], 8423 vf_attr->mac_addr[3], vf_attr->mac_addr[4], 8424 vf_attr->mac_addr[5]); 8425 p_hwfn = &ha->cdev.hwfns[0]; 8426 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr, 8427 vfnum); 8428 } 8429 8430 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum); 8431 return (0); 8432 } 8433 8434 static void 8435 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8436 { 8437 uint64_t events[ECORE_VF_ARRAY_LENGTH]; 8438 struct ecore_ptt *ptt; 8439 int i; 8440 8441 ptt = ecore_ptt_acquire(p_hwfn); 8442 if (!ptt) { 8443 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8444 __qlnx_pf_vf_msg(p_hwfn, 0); 8445 return; 8446 } 8447 8448 ecore_iov_pf_get_pending_events(p_hwfn, events); 8449 8450 QL_DPRINT2(ha, "Event mask of VF events:" 8451 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n", 8452 events[0], events[1], events[2]); 8453 8454 ecore_for_each_vf(p_hwfn, i) { 8455 8456 /* Skip VFs with no pending messages */ 8457 if (!(events[i / 64] & (1ULL << (i % 64)))) 8458 continue; 8459 8460 QL_DPRINT2(ha, 8461 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", 8462 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i); 8463 8464 /* Copy VF's message to PF's request buffer for that VF */ 8465 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i)) 8466 continue; 8467 8468 ecore_iov_process_mbx_req(p_hwfn, ptt, i); 8469 } 8470 8471 ecore_ptt_release(p_hwfn, ptt); 8472 8473 return; 8474 } 8475 8476 static void 8477 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8478 { 8479 struct ecore_ptt *ptt; 8480 int ret; 8481 8482 ptt = ecore_ptt_acquire(p_hwfn); 8483 8484 if (!ptt) { 8485 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8486 __qlnx_vf_flr_update(p_hwfn); 8487 return; 8488 } 8489 8490 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt); 8491 8492 if (ret) { 8493 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n"); 8494 } 8495 8496 ecore_ptt_release(p_hwfn, ptt); 8497 8498 return; 8499 } 8500 8501 static void 8502 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8503 { 8504 struct ecore_ptt *ptt; 8505 int i; 8506 8507 ptt = ecore_ptt_acquire(p_hwfn); 8508 8509 if (!ptt) { 8510 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8511 qlnx_vf_bulleting_update(p_hwfn); 8512 return; 8513 } 8514 8515 ecore_for_each_vf(p_hwfn, i) { 8516 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n", 8517 p_hwfn, i); 8518 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt); 8519 } 8520 8521 ecore_ptt_release(p_hwfn, ptt); 8522 8523 return; 8524 } 8525 8526 static void 8527 qlnx_pf_taskqueue(void *context, int pending) 8528 { 8529 struct ecore_hwfn *p_hwfn; 8530 qlnx_host_t *ha; 8531 int i; 8532 8533 p_hwfn = context; 8534 8535 if (p_hwfn == NULL) 8536 return; 8537 8538 ha = (qlnx_host_t *)(p_hwfn->p_dev); 8539 8540 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8541 return; 8542 8543 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8544 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG)) 8545 qlnx_handle_vf_msg(ha, p_hwfn); 8546 8547 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8548 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE)) 8549 qlnx_handle_vf_flr_update(ha, p_hwfn); 8550 8551 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8552 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE)) 8553 qlnx_handle_bulletin_update(ha, p_hwfn); 8554 8555 return; 8556 } 8557 8558 static int 8559 qlnx_create_pf_taskqueues(qlnx_host_t *ha) 8560 { 8561 int i; 8562 uint8_t tq_name[32]; 8563 8564 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8565 8566 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 8567 8568 bzero(tq_name, sizeof (tq_name)); 8569 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i); 8570 8571 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn); 8572 8573 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 8574 taskqueue_thread_enqueue, 8575 &ha->sriov_task[i].pf_taskqueue); 8576 8577 if (ha->sriov_task[i].pf_taskqueue == NULL) 8578 return (-1); 8579 8580 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1, 8581 PI_NET, "%s", tq_name); 8582 8583 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue); 8584 } 8585 8586 return (0); 8587 } 8588 8589 static void 8590 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha) 8591 { 8592 int i; 8593 8594 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8595 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8596 taskqueue_drain(ha->sriov_task[i].pf_taskqueue, 8597 &ha->sriov_task[i].pf_task); 8598 taskqueue_free(ha->sriov_task[i].pf_taskqueue); 8599 ha->sriov_task[i].pf_taskqueue = NULL; 8600 } 8601 } 8602 return; 8603 } 8604 8605 static void 8606 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha) 8607 { 8608 struct ecore_mcp_link_capabilities caps; 8609 struct ecore_mcp_link_params params; 8610 struct ecore_mcp_link_state link; 8611 int i; 8612 8613 if (!p_hwfn->pf_iov_info) 8614 return; 8615 8616 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params)); 8617 memset(&link, 0, sizeof(struct ecore_mcp_link_state)); 8618 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities)); 8619 8620 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 8621 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link)); 8622 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params)); 8623 8624 QL_DPRINT2(ha, "called\n"); 8625 8626 /* Update bulletin of all future possible VFs with link configuration */ 8627 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) { 8628 8629 /* Modify link according to the VF's configured link state */ 8630 8631 link.link_up = false; 8632 8633 if (ha->link_up) { 8634 link.link_up = true; 8635 /* Set speed according to maximum supported by HW. 8636 * that is 40G for regular devices and 100G for CMT 8637 * mode devices. 8638 */ 8639 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ? 8640 100000 : link.speed; 8641 } 8642 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up); 8643 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps); 8644 } 8645 8646 qlnx_vf_bulleting_update(p_hwfn); 8647 8648 return; 8649 } 8650 #endif /* #ifndef QLNX_VF */ 8651 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 8652