1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File: qlnx_os.c 30 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "qlnx_os.h" 37 #include "bcm_osal.h" 38 #include "reg_addr.h" 39 #include "ecore_gtt_reg_addr.h" 40 #include "ecore.h" 41 #include "ecore_chain.h" 42 #include "ecore_status.h" 43 #include "ecore_hw.h" 44 #include "ecore_rt_defs.h" 45 #include "ecore_init_ops.h" 46 #include "ecore_int.h" 47 #include "ecore_cxt.h" 48 #include "ecore_spq.h" 49 #include "ecore_init_fw_funcs.h" 50 #include "ecore_sp_commands.h" 51 #include "ecore_dev_api.h" 52 #include "ecore_l2_api.h" 53 #include "ecore_mcp.h" 54 #include "ecore_hw_defs.h" 55 #include "mcp_public.h" 56 #include "ecore_iro.h" 57 #include "nvm_cfg.h" 58 #include "ecore_dev_api.h" 59 #include "ecore_dbg_fw_funcs.h" 60 #include "ecore_iov_api.h" 61 #include "ecore_vf_api.h" 62 63 #include "qlnx_ioctl.h" 64 #include "qlnx_def.h" 65 #include "qlnx_ver.h" 66 67 #ifdef QLNX_ENABLE_IWARP 68 #include "qlnx_rdma.h" 69 #endif /* #ifdef QLNX_ENABLE_IWARP */ 70 71 #include <sys/smp.h> 72 73 /* 74 * static functions 75 */ 76 /* 77 * ioctl related functions 78 */ 79 static void qlnx_add_sysctls(qlnx_host_t *ha); 80 81 /* 82 * main driver 83 */ 84 static void qlnx_release(qlnx_host_t *ha); 85 static void qlnx_fp_isr(void *arg); 86 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha); 87 static void qlnx_init(void *arg); 88 static void qlnx_init_locked(qlnx_host_t *ha); 89 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi); 90 static int qlnx_set_promisc(qlnx_host_t *ha); 91 static int qlnx_set_allmulti(qlnx_host_t *ha); 92 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 93 static int qlnx_media_change(struct ifnet *ifp); 94 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 95 static void qlnx_stop(qlnx_host_t *ha); 96 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, 97 struct mbuf **m_headp); 98 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha); 99 static uint32_t qlnx_get_optics(qlnx_host_t *ha, 100 struct qlnx_link_output *if_link); 101 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp); 102 static int qlnx_transmit_locked(struct ifnet *ifp, struct qlnx_fastpath *fp, 103 struct mbuf *mp); 104 static void qlnx_qflush(struct ifnet *ifp); 105 106 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha); 107 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha); 108 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha); 109 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha); 110 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha); 111 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha); 112 113 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver); 114 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size); 115 116 static int qlnx_nic_setup(struct ecore_dev *cdev, 117 struct ecore_pf_params *func_params); 118 static int qlnx_nic_start(struct ecore_dev *cdev); 119 static int qlnx_slowpath_start(qlnx_host_t *ha); 120 static int qlnx_slowpath_stop(qlnx_host_t *ha); 121 static int qlnx_init_hw(qlnx_host_t *ha); 122 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 123 char ver_str[VER_SIZE]); 124 static void qlnx_unload(qlnx_host_t *ha); 125 static int qlnx_load(qlnx_host_t *ha); 126 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 127 uint32_t add_mac); 128 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, 129 uint32_t len); 130 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq); 131 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq); 132 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, 133 struct qlnx_rx_queue *rxq); 134 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter); 135 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, 136 int hwfn_index); 137 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, 138 int hwfn_index); 139 static void qlnx_timer(void *arg); 140 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 141 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 142 static void qlnx_trigger_dump(qlnx_host_t *ha); 143 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 144 struct qlnx_tx_queue *txq); 145 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 146 struct qlnx_tx_queue *txq); 147 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 148 int lro_enable); 149 static void qlnx_fp_taskqueue(void *context, int pending); 150 static void qlnx_sample_storm_stats(qlnx_host_t *ha); 151 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 152 struct qlnx_agg_info *tpa); 153 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa); 154 155 #if __FreeBSD_version >= 1100000 156 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt); 157 #endif 158 159 /* 160 * Hooks to the Operating Systems 161 */ 162 static int qlnx_pci_probe (device_t); 163 static int qlnx_pci_attach (device_t); 164 static int qlnx_pci_detach (device_t); 165 166 #ifndef QLNX_VF 167 168 #ifdef CONFIG_ECORE_SRIOV 169 170 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params); 171 static void qlnx_iov_uninit(device_t dev); 172 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params); 173 static void qlnx_initialize_sriov(qlnx_host_t *ha); 174 static void qlnx_pf_taskqueue(void *context, int pending); 175 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha); 176 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha); 177 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha); 178 179 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 180 181 static device_method_t qlnx_pci_methods[] = { 182 /* Device interface */ 183 DEVMETHOD(device_probe, qlnx_pci_probe), 184 DEVMETHOD(device_attach, qlnx_pci_attach), 185 DEVMETHOD(device_detach, qlnx_pci_detach), 186 187 #ifdef CONFIG_ECORE_SRIOV 188 DEVMETHOD(pci_iov_init, qlnx_iov_init), 189 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit), 190 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf), 191 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 192 { 0, 0 } 193 }; 194 195 static driver_t qlnx_pci_driver = { 196 "ql", qlnx_pci_methods, sizeof (qlnx_host_t), 197 }; 198 199 static devclass_t qlnx_devclass; 200 201 MODULE_VERSION(if_qlnxe,1); 202 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, qlnx_devclass, 0, 0); 203 204 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1); 205 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1); 206 207 #else 208 209 static device_method_t qlnxv_pci_methods[] = { 210 /* Device interface */ 211 DEVMETHOD(device_probe, qlnx_pci_probe), 212 DEVMETHOD(device_attach, qlnx_pci_attach), 213 DEVMETHOD(device_detach, qlnx_pci_detach), 214 { 0, 0 } 215 }; 216 217 static driver_t qlnxv_pci_driver = { 218 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t), 219 }; 220 221 static devclass_t qlnxv_devclass; 222 MODULE_VERSION(if_qlnxev,1); 223 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, qlnxv_devclass, 0, 0); 224 225 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1); 226 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1); 227 228 #endif /* #ifdef QLNX_VF */ 229 230 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver"); 231 232 char qlnx_dev_str[128]; 233 char qlnx_ver_str[VER_SIZE]; 234 char qlnx_name_str[NAME_SIZE]; 235 236 /* 237 * Some PCI Configuration Space Related Defines 238 */ 239 240 #ifndef PCI_VENDOR_QLOGIC 241 #define PCI_VENDOR_QLOGIC 0x1077 242 #endif 243 244 /* 40G Adapter QLE45xxx*/ 245 #ifndef QLOGIC_PCI_DEVICE_ID_1634 246 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634 247 #endif 248 249 /* 100G Adapter QLE45xxx*/ 250 #ifndef QLOGIC_PCI_DEVICE_ID_1644 251 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644 252 #endif 253 254 /* 25G Adapter QLE45xxx*/ 255 #ifndef QLOGIC_PCI_DEVICE_ID_1656 256 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656 257 #endif 258 259 /* 50G Adapter QLE45xxx*/ 260 #ifndef QLOGIC_PCI_DEVICE_ID_1654 261 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654 262 #endif 263 264 /* 10G/25G/40G Adapter QLE41xxx*/ 265 #ifndef QLOGIC_PCI_DEVICE_ID_8070 266 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070 267 #endif 268 269 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/ 270 #ifndef QLOGIC_PCI_DEVICE_ID_8090 271 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090 272 #endif 273 274 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 275 "qlnxe driver parameters"); 276 277 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */ 278 static int qlnxe_queue_count = QLNX_DEFAULT_RSS; 279 280 #if __FreeBSD_version < 1100000 281 282 TUNABLE_INT("hw.qlnxe.queue_count", &qlnxe_queue_count); 283 284 #endif 285 286 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 287 &qlnxe_queue_count, 0, "Multi-Queue queue count"); 288 289 /* 290 * Note on RDMA personality setting 291 * 292 * Read the personality configured in NVRAM 293 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and 294 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT 295 * use the personality in NVRAM. 296 297 * Otherwise use t the personality configured in sysctl. 298 * 299 */ 300 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */ 301 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */ 302 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */ 303 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */ 304 #define QLNX_PERSONALITY_BITS_PER_FUNC 4 305 #define QLNX_PERSONALIY_MASK 0xF 306 307 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/ 308 static uint64_t qlnxe_rdma_configuration = 0x22222222; 309 310 #if __FreeBSD_version < 1100000 311 312 TUNABLE_QUAD("hw.qlnxe.rdma_configuration", &qlnxe_rdma_configuration); 313 314 SYSCTL_UQUAD(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN, 315 &qlnxe_rdma_configuration, 0, "RDMA Configuration"); 316 317 #else 318 319 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN, 320 &qlnxe_rdma_configuration, 0, "RDMA Configuration"); 321 322 #endif /* #if __FreeBSD_version < 1100000 */ 323 324 int 325 qlnx_vf_device(qlnx_host_t *ha) 326 { 327 uint16_t device_id; 328 329 device_id = ha->device_id; 330 331 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 332 return 0; 333 334 return -1; 335 } 336 337 static int 338 qlnx_valid_device(qlnx_host_t *ha) 339 { 340 uint16_t device_id; 341 342 device_id = ha->device_id; 343 344 #ifndef QLNX_VF 345 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 346 (device_id == QLOGIC_PCI_DEVICE_ID_1644) || 347 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 348 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 349 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 350 return 0; 351 #else 352 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 353 return 0; 354 355 #endif /* #ifndef QLNX_VF */ 356 return -1; 357 } 358 359 #ifdef QLNX_ENABLE_IWARP 360 static int 361 qlnx_rdma_supported(struct qlnx_host *ha) 362 { 363 uint16_t device_id; 364 365 device_id = pci_get_device(ha->pci_dev); 366 367 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 368 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 369 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 370 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 371 return (0); 372 373 return (-1); 374 } 375 #endif /* #ifdef QLNX_ENABLE_IWARP */ 376 377 /* 378 * Name: qlnx_pci_probe 379 * Function: Validate the PCI device to be a QLA80XX device 380 */ 381 static int 382 qlnx_pci_probe(device_t dev) 383 { 384 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d", 385 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); 386 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx"); 387 388 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) { 389 return (ENXIO); 390 } 391 392 switch (pci_get_device(dev)) { 393 #ifndef QLNX_VF 394 395 case QLOGIC_PCI_DEVICE_ID_1644: 396 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 397 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function", 398 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 399 QLNX_VERSION_BUILD); 400 device_set_desc_copy(dev, qlnx_dev_str); 401 402 break; 403 404 case QLOGIC_PCI_DEVICE_ID_1634: 405 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 406 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function", 407 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 408 QLNX_VERSION_BUILD); 409 device_set_desc_copy(dev, qlnx_dev_str); 410 411 break; 412 413 case QLOGIC_PCI_DEVICE_ID_1656: 414 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 415 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function", 416 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 417 QLNX_VERSION_BUILD); 418 device_set_desc_copy(dev, qlnx_dev_str); 419 420 break; 421 422 case QLOGIC_PCI_DEVICE_ID_1654: 423 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 424 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function", 425 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 426 QLNX_VERSION_BUILD); 427 device_set_desc_copy(dev, qlnx_dev_str); 428 429 break; 430 431 case QLOGIC_PCI_DEVICE_ID_8070: 432 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 433 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)" 434 " Adapter-Ethernet Function", 435 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 436 QLNX_VERSION_BUILD); 437 device_set_desc_copy(dev, qlnx_dev_str); 438 439 break; 440 441 #else 442 case QLOGIC_PCI_DEVICE_ID_8090: 443 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 444 "Qlogic SRIOV PCI CNA (AH) " 445 "Adapter-Ethernet Function", 446 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 447 QLNX_VERSION_BUILD); 448 device_set_desc_copy(dev, qlnx_dev_str); 449 450 break; 451 452 #endif /* #ifndef QLNX_VF */ 453 454 default: 455 return (ENXIO); 456 } 457 458 #ifdef QLNX_ENABLE_IWARP 459 qlnx_rdma_init(); 460 #endif /* #ifdef QLNX_ENABLE_IWARP */ 461 462 return (BUS_PROBE_DEFAULT); 463 } 464 465 static uint16_t 466 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 467 struct qlnx_tx_queue *txq) 468 { 469 u16 hw_bd_cons; 470 u16 ecore_cons_idx; 471 uint16_t diff; 472 473 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 474 475 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl); 476 if (hw_bd_cons < ecore_cons_idx) { 477 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 478 } else { 479 diff = hw_bd_cons - ecore_cons_idx; 480 } 481 return diff; 482 } 483 484 static void 485 qlnx_sp_intr(void *arg) 486 { 487 struct ecore_hwfn *p_hwfn; 488 qlnx_host_t *ha; 489 int i; 490 491 p_hwfn = arg; 492 493 if (p_hwfn == NULL) { 494 printf("%s: spurious slowpath intr\n", __func__); 495 return; 496 } 497 498 ha = (qlnx_host_t *)p_hwfn->p_dev; 499 500 QL_DPRINT2(ha, "enter\n"); 501 502 for (i = 0; i < ha->cdev.num_hwfns; i++) { 503 if (&ha->cdev.hwfns[i] == p_hwfn) { 504 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]); 505 break; 506 } 507 } 508 QL_DPRINT2(ha, "exit\n"); 509 510 return; 511 } 512 513 static void 514 qlnx_sp_taskqueue(void *context, int pending) 515 { 516 struct ecore_hwfn *p_hwfn; 517 518 p_hwfn = context; 519 520 if (p_hwfn != NULL) { 521 qlnx_sp_isr(p_hwfn); 522 } 523 return; 524 } 525 526 static int 527 qlnx_create_sp_taskqueues(qlnx_host_t *ha) 528 { 529 int i; 530 uint8_t tq_name[32]; 531 532 for (i = 0; i < ha->cdev.num_hwfns; i++) { 533 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 534 535 bzero(tq_name, sizeof (tq_name)); 536 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i); 537 538 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn); 539 540 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT, 541 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]); 542 543 if (ha->sp_taskqueue[i] == NULL) 544 return (-1); 545 546 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s", 547 tq_name); 548 549 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]); 550 } 551 552 return (0); 553 } 554 555 static void 556 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha) 557 { 558 int i; 559 560 for (i = 0; i < ha->cdev.num_hwfns; i++) { 561 if (ha->sp_taskqueue[i] != NULL) { 562 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]); 563 taskqueue_free(ha->sp_taskqueue[i]); 564 } 565 } 566 return; 567 } 568 569 static void 570 qlnx_fp_taskqueue(void *context, int pending) 571 { 572 struct qlnx_fastpath *fp; 573 qlnx_host_t *ha; 574 struct ifnet *ifp; 575 576 fp = context; 577 578 if (fp == NULL) 579 return; 580 581 ha = (qlnx_host_t *)fp->edev; 582 583 ifp = ha->ifp; 584 585 if(ifp->if_drv_flags & IFF_DRV_RUNNING) { 586 if (!drbr_empty(ifp, fp->tx_br)) { 587 if(mtx_trylock(&fp->tx_mtx)) { 588 #ifdef QLNX_TRACE_PERF_DATA 589 tx_pkts = fp->tx_pkts_transmitted; 590 tx_compl = fp->tx_pkts_completed; 591 #endif 592 593 qlnx_transmit_locked(ifp, fp, NULL); 594 595 #ifdef QLNX_TRACE_PERF_DATA 596 fp->tx_pkts_trans_fp += 597 (fp->tx_pkts_transmitted - tx_pkts); 598 fp->tx_pkts_compl_fp += 599 (fp->tx_pkts_completed - tx_compl); 600 #endif 601 mtx_unlock(&fp->tx_mtx); 602 } 603 } 604 } 605 606 QL_DPRINT2(ha, "exit \n"); 607 return; 608 } 609 610 static int 611 qlnx_create_fp_taskqueues(qlnx_host_t *ha) 612 { 613 int i; 614 uint8_t tq_name[32]; 615 struct qlnx_fastpath *fp; 616 617 for (i = 0; i < ha->num_rss; i++) { 618 fp = &ha->fp_array[i]; 619 620 bzero(tq_name, sizeof (tq_name)); 621 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 622 623 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp); 624 625 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 626 taskqueue_thread_enqueue, 627 &fp->fp_taskqueue); 628 629 if (fp->fp_taskqueue == NULL) 630 return (-1); 631 632 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 633 tq_name); 634 635 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue); 636 } 637 638 return (0); 639 } 640 641 static void 642 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha) 643 { 644 int i; 645 struct qlnx_fastpath *fp; 646 647 for (i = 0; i < ha->num_rss; i++) { 648 fp = &ha->fp_array[i]; 649 650 if (fp->fp_taskqueue != NULL) { 651 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 652 taskqueue_free(fp->fp_taskqueue); 653 fp->fp_taskqueue = NULL; 654 } 655 } 656 return; 657 } 658 659 static void 660 qlnx_drain_fp_taskqueues(qlnx_host_t *ha) 661 { 662 int i; 663 struct qlnx_fastpath *fp; 664 665 for (i = 0; i < ha->num_rss; i++) { 666 fp = &ha->fp_array[i]; 667 668 if (fp->fp_taskqueue != NULL) { 669 QLNX_UNLOCK(ha); 670 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 671 QLNX_LOCK(ha); 672 } 673 } 674 return; 675 } 676 677 static void 678 qlnx_get_params(qlnx_host_t *ha) 679 { 680 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) { 681 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n", 682 qlnxe_queue_count); 683 qlnxe_queue_count = 0; 684 } 685 return; 686 } 687 688 static void 689 qlnx_error_recovery_taskqueue(void *context, int pending) 690 { 691 qlnx_host_t *ha; 692 693 ha = context; 694 695 QL_DPRINT2(ha, "enter\n"); 696 697 QLNX_LOCK(ha); 698 qlnx_stop(ha); 699 QLNX_UNLOCK(ha); 700 701 #ifdef QLNX_ENABLE_IWARP 702 qlnx_rdma_dev_remove(ha); 703 #endif /* #ifdef QLNX_ENABLE_IWARP */ 704 705 qlnx_slowpath_stop(ha); 706 qlnx_slowpath_start(ha); 707 708 #ifdef QLNX_ENABLE_IWARP 709 qlnx_rdma_dev_add(ha); 710 #endif /* #ifdef QLNX_ENABLE_IWARP */ 711 712 qlnx_init(ha); 713 714 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 715 716 QL_DPRINT2(ha, "exit\n"); 717 718 return; 719 } 720 721 static int 722 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha) 723 { 724 uint8_t tq_name[32]; 725 726 bzero(tq_name, sizeof (tq_name)); 727 snprintf(tq_name, sizeof (tq_name), "ql_err_tq"); 728 729 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha); 730 731 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 732 taskqueue_thread_enqueue, &ha->err_taskqueue); 733 734 if (ha->err_taskqueue == NULL) 735 return (-1); 736 737 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name); 738 739 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue); 740 741 return (0); 742 } 743 744 static void 745 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha) 746 { 747 if (ha->err_taskqueue != NULL) { 748 taskqueue_drain(ha->err_taskqueue, &ha->err_task); 749 taskqueue_free(ha->err_taskqueue); 750 } 751 752 ha->err_taskqueue = NULL; 753 754 return; 755 } 756 757 /* 758 * Name: qlnx_pci_attach 759 * Function: attaches the device to the operating system 760 */ 761 static int 762 qlnx_pci_attach(device_t dev) 763 { 764 qlnx_host_t *ha = NULL; 765 uint32_t rsrc_len_reg = 0; 766 uint32_t rsrc_len_dbells = 0; 767 uint32_t rsrc_len_msix = 0; 768 int i; 769 uint32_t mfw_ver; 770 uint32_t num_sp_msix = 0; 771 uint32_t num_rdma_irqs = 0; 772 773 if ((ha = device_get_softc(dev)) == NULL) { 774 device_printf(dev, "cannot get softc\n"); 775 return (ENOMEM); 776 } 777 778 memset(ha, 0, sizeof (qlnx_host_t)); 779 780 ha->device_id = pci_get_device(dev); 781 782 if (qlnx_valid_device(ha) != 0) { 783 device_printf(dev, "device is not valid device\n"); 784 return (ENXIO); 785 } 786 ha->pci_func = pci_get_function(dev); 787 788 ha->pci_dev = dev; 789 790 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 791 792 ha->flags.lock_init = 1; 793 794 pci_enable_busmaster(dev); 795 796 /* 797 * map the PCI BARs 798 */ 799 800 ha->reg_rid = PCIR_BAR(0); 801 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 802 RF_ACTIVE); 803 804 if (ha->pci_reg == NULL) { 805 device_printf(dev, "unable to map BAR0\n"); 806 goto qlnx_pci_attach_err; 807 } 808 809 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 810 ha->reg_rid); 811 812 ha->dbells_rid = PCIR_BAR(2); 813 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, 814 SYS_RES_MEMORY, 815 ha->dbells_rid); 816 if (rsrc_len_dbells) { 817 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 818 &ha->dbells_rid, RF_ACTIVE); 819 820 if (ha->pci_dbells == NULL) { 821 device_printf(dev, "unable to map BAR1\n"); 822 goto qlnx_pci_attach_err; 823 } 824 ha->dbells_phys_addr = (uint64_t) 825 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid); 826 827 ha->dbells_size = rsrc_len_dbells; 828 } else { 829 if (qlnx_vf_device(ha) != 0) { 830 device_printf(dev, " BAR1 size is zero\n"); 831 goto qlnx_pci_attach_err; 832 } 833 } 834 835 ha->msix_rid = PCIR_BAR(4); 836 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 837 &ha->msix_rid, RF_ACTIVE); 838 839 if (ha->msix_bar == NULL) { 840 device_printf(dev, "unable to map BAR2\n"); 841 goto qlnx_pci_attach_err; 842 } 843 844 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 845 ha->msix_rid); 846 847 ha->dbg_level = 0x0000; 848 849 QL_DPRINT1(ha, "\n\t\t\t" 850 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 851 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 852 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 853 " msix_avail = 0x%x " 854 "\n\t\t\t[ncpus = %d]\n", 855 ha->pci_dev, ha->pci_reg, rsrc_len_reg, 856 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 857 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 858 mp_ncpus); 859 /* 860 * allocate dma tags 861 */ 862 863 if (qlnx_alloc_parent_dma_tag(ha)) 864 goto qlnx_pci_attach_err; 865 866 if (qlnx_alloc_tx_dma_tag(ha)) 867 goto qlnx_pci_attach_err; 868 869 if (qlnx_alloc_rx_dma_tag(ha)) 870 goto qlnx_pci_attach_err; 871 872 873 if (qlnx_init_hw(ha) != 0) 874 goto qlnx_pci_attach_err; 875 876 ha->flags.hw_init = 1; 877 878 qlnx_get_params(ha); 879 880 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) && 881 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) { 882 qlnxe_queue_count = QLNX_MAX_RSS; 883 } 884 885 /* 886 * Allocate MSI-x vectors 887 */ 888 if (qlnx_vf_device(ha) != 0) { 889 if (qlnxe_queue_count == 0) 890 ha->num_rss = QLNX_DEFAULT_RSS; 891 else 892 ha->num_rss = qlnxe_queue_count; 893 894 num_sp_msix = ha->cdev.num_hwfns; 895 } else { 896 uint8_t max_rxq; 897 uint8_t max_txq; 898 899 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq); 900 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq); 901 902 if (max_rxq < max_txq) 903 ha->num_rss = max_rxq; 904 else 905 ha->num_rss = max_txq; 906 907 if (ha->num_rss > QLNX_MAX_VF_RSS) 908 ha->num_rss = QLNX_MAX_VF_RSS; 909 910 num_sp_msix = 0; 911 } 912 913 if (ha->num_rss > mp_ncpus) 914 ha->num_rss = mp_ncpus; 915 916 ha->num_tc = QLNX_MAX_TC; 917 918 ha->msix_count = pci_msix_count(dev); 919 920 #ifdef QLNX_ENABLE_IWARP 921 922 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha); 923 924 #endif /* #ifdef QLNX_ENABLE_IWARP */ 925 926 if (!ha->msix_count || 927 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) { 928 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 929 ha->msix_count); 930 goto qlnx_pci_attach_err; 931 } 932 933 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs)) 934 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs; 935 else 936 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs); 937 938 QL_DPRINT1(ha, "\n\t\t\t" 939 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 940 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 941 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 942 " msix_avail = 0x%x msix_alloc = 0x%x" 943 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n", 944 ha->pci_reg, rsrc_len_reg, 945 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 946 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 947 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc); 948 949 if (pci_alloc_msix(dev, &ha->msix_count)) { 950 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__, 951 ha->msix_count); 952 ha->msix_count = 0; 953 goto qlnx_pci_attach_err; 954 } 955 956 /* 957 * Initialize slow path interrupt and task queue 958 */ 959 960 if (num_sp_msix) { 961 if (qlnx_create_sp_taskqueues(ha) != 0) 962 goto qlnx_pci_attach_err; 963 964 for (i = 0; i < ha->cdev.num_hwfns; i++) { 965 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 966 967 ha->sp_irq_rid[i] = i + 1; 968 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, 969 &ha->sp_irq_rid[i], 970 (RF_ACTIVE | RF_SHAREABLE)); 971 if (ha->sp_irq[i] == NULL) { 972 device_printf(dev, 973 "could not allocate mbx interrupt\n"); 974 goto qlnx_pci_attach_err; 975 } 976 977 if (bus_setup_intr(dev, ha->sp_irq[i], 978 (INTR_TYPE_NET | INTR_MPSAFE), NULL, 979 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) { 980 device_printf(dev, 981 "could not setup slow path interrupt\n"); 982 goto qlnx_pci_attach_err; 983 } 984 985 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d" 986 " sp_irq %p sp_handle %p\n", p_hwfn, 987 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]); 988 } 989 } 990 991 /* 992 * initialize fast path interrupt 993 */ 994 if (qlnx_create_fp_taskqueues(ha) != 0) 995 goto qlnx_pci_attach_err; 996 997 for (i = 0; i < ha->num_rss; i++) { 998 ha->irq_vec[i].rss_idx = i; 999 ha->irq_vec[i].ha = ha; 1000 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i; 1001 1002 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 1003 &ha->irq_vec[i].irq_rid, 1004 (RF_ACTIVE | RF_SHAREABLE)); 1005 1006 if (ha->irq_vec[i].irq == NULL) { 1007 device_printf(dev, 1008 "could not allocate interrupt[%d] irq_rid = %d\n", 1009 i, ha->irq_vec[i].irq_rid); 1010 goto qlnx_pci_attach_err; 1011 } 1012 1013 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) { 1014 device_printf(dev, "could not allocate tx_br[%d]\n", i); 1015 goto qlnx_pci_attach_err; 1016 } 1017 } 1018 1019 if (qlnx_vf_device(ha) != 0) { 1020 callout_init(&ha->qlnx_callout, 1); 1021 ha->flags.callout_init = 1; 1022 1023 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1024 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0) 1025 goto qlnx_pci_attach_err; 1026 if (ha->grcdump_size[i] == 0) 1027 goto qlnx_pci_attach_err; 1028 1029 ha->grcdump_size[i] = ha->grcdump_size[i] << 2; 1030 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n", 1031 i, ha->grcdump_size[i]); 1032 1033 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]); 1034 if (ha->grcdump[i] == NULL) { 1035 device_printf(dev, "grcdump alloc[%d] failed\n", i); 1036 goto qlnx_pci_attach_err; 1037 } 1038 1039 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0) 1040 goto qlnx_pci_attach_err; 1041 if (ha->idle_chk_size[i] == 0) 1042 goto qlnx_pci_attach_err; 1043 1044 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2; 1045 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n", 1046 i, ha->idle_chk_size[i]); 1047 1048 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]); 1049 1050 if (ha->idle_chk[i] == NULL) { 1051 device_printf(dev, "idle_chk alloc failed\n"); 1052 goto qlnx_pci_attach_err; 1053 } 1054 } 1055 1056 if (qlnx_create_error_recovery_taskqueue(ha) != 0) 1057 goto qlnx_pci_attach_err; 1058 } 1059 1060 if (qlnx_slowpath_start(ha) != 0) 1061 goto qlnx_pci_attach_err; 1062 else 1063 ha->flags.slowpath_start = 1; 1064 1065 if (qlnx_vf_device(ha) != 0) { 1066 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) { 1067 qlnx_mdelay(__func__, 1000); 1068 qlnx_trigger_dump(ha); 1069 1070 goto qlnx_pci_attach_err0; 1071 } 1072 1073 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) { 1074 qlnx_mdelay(__func__, 1000); 1075 qlnx_trigger_dump(ha); 1076 1077 goto qlnx_pci_attach_err0; 1078 } 1079 } else { 1080 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 1081 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL); 1082 } 1083 1084 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d", 1085 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF), 1086 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF)); 1087 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d", 1088 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, 1089 FW_ENGINEERING_VERSION); 1090 1091 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n", 1092 ha->stormfw_ver, ha->mfw_ver); 1093 1094 qlnx_init_ifnet(dev, ha); 1095 1096 /* 1097 * add sysctls 1098 */ 1099 qlnx_add_sysctls(ha); 1100 1101 qlnx_pci_attach_err0: 1102 /* 1103 * create ioctl device interface 1104 */ 1105 if (qlnx_vf_device(ha) != 0) { 1106 if (qlnx_make_cdev(ha)) { 1107 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 1108 goto qlnx_pci_attach_err; 1109 } 1110 1111 #ifdef QLNX_ENABLE_IWARP 1112 qlnx_rdma_dev_add(ha); 1113 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1114 } 1115 1116 #ifndef QLNX_VF 1117 #ifdef CONFIG_ECORE_SRIOV 1118 1119 if (qlnx_vf_device(ha) != 0) 1120 qlnx_initialize_sriov(ha); 1121 1122 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1123 #endif /* #ifdef QLNX_VF */ 1124 1125 QL_DPRINT2(ha, "success\n"); 1126 1127 return (0); 1128 1129 qlnx_pci_attach_err: 1130 1131 qlnx_release(ha); 1132 1133 return (ENXIO); 1134 } 1135 1136 /* 1137 * Name: qlnx_pci_detach 1138 * Function: Unhooks the device from the operating system 1139 */ 1140 static int 1141 qlnx_pci_detach(device_t dev) 1142 { 1143 qlnx_host_t *ha = NULL; 1144 1145 if ((ha = device_get_softc(dev)) == NULL) { 1146 device_printf(dev, "%s: cannot get softc\n", __func__); 1147 return (ENOMEM); 1148 } 1149 1150 if (qlnx_vf_device(ha) != 0) { 1151 #ifdef CONFIG_ECORE_SRIOV 1152 int ret; 1153 1154 ret = pci_iov_detach(dev); 1155 if (ret) { 1156 device_printf(dev, "%s: SRIOV in use\n", __func__); 1157 return (ret); 1158 } 1159 1160 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1161 1162 #ifdef QLNX_ENABLE_IWARP 1163 if (qlnx_rdma_dev_remove(ha) != 0) 1164 return (EBUSY); 1165 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1166 } 1167 1168 QLNX_LOCK(ha); 1169 qlnx_stop(ha); 1170 QLNX_UNLOCK(ha); 1171 1172 qlnx_release(ha); 1173 1174 return (0); 1175 } 1176 1177 #ifdef QLNX_ENABLE_IWARP 1178 1179 static uint8_t 1180 qlnx_get_personality(uint8_t pci_func) 1181 { 1182 uint8_t personality; 1183 1184 personality = (qlnxe_rdma_configuration >> 1185 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) & 1186 QLNX_PERSONALIY_MASK; 1187 return (personality); 1188 } 1189 1190 static void 1191 qlnx_set_personality(qlnx_host_t *ha) 1192 { 1193 struct ecore_hwfn *p_hwfn; 1194 uint8_t personality; 1195 1196 p_hwfn = &ha->cdev.hwfns[0]; 1197 1198 personality = qlnx_get_personality(ha->pci_func); 1199 1200 switch (personality) { 1201 case QLNX_PERSONALITY_DEFAULT: 1202 device_printf(ha->pci_dev, "%s: DEFAULT\n", 1203 __func__); 1204 ha->personality = ECORE_PCI_DEFAULT; 1205 break; 1206 1207 case QLNX_PERSONALITY_ETH_ONLY: 1208 device_printf(ha->pci_dev, "%s: ETH_ONLY\n", 1209 __func__); 1210 ha->personality = ECORE_PCI_ETH; 1211 break; 1212 1213 case QLNX_PERSONALITY_ETH_IWARP: 1214 device_printf(ha->pci_dev, "%s: ETH_IWARP\n", 1215 __func__); 1216 ha->personality = ECORE_PCI_ETH_IWARP; 1217 break; 1218 1219 case QLNX_PERSONALITY_ETH_ROCE: 1220 device_printf(ha->pci_dev, "%s: ETH_ROCE\n", 1221 __func__); 1222 ha->personality = ECORE_PCI_ETH_ROCE; 1223 break; 1224 } 1225 1226 return; 1227 } 1228 1229 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1230 1231 static int 1232 qlnx_init_hw(qlnx_host_t *ha) 1233 { 1234 int rval = 0; 1235 struct ecore_hw_prepare_params params; 1236 1237 ecore_init_struct(&ha->cdev); 1238 1239 /* ha->dp_module = ECORE_MSG_PROBE | 1240 ECORE_MSG_INTR | 1241 ECORE_MSG_SP | 1242 ECORE_MSG_LINK | 1243 ECORE_MSG_SPQ | 1244 ECORE_MSG_RDMA; 1245 ha->dp_level = ECORE_LEVEL_VERBOSE;*/ 1246 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2; 1247 ha->dp_level = ECORE_LEVEL_NOTICE; 1248 //ha->dp_level = ECORE_LEVEL_VERBOSE; 1249 1250 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev); 1251 1252 ha->cdev.regview = ha->pci_reg; 1253 1254 ha->personality = ECORE_PCI_DEFAULT; 1255 1256 if (qlnx_vf_device(ha) == 0) { 1257 ha->cdev.b_is_vf = true; 1258 1259 if (ha->pci_dbells != NULL) { 1260 ha->cdev.doorbells = ha->pci_dbells; 1261 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1262 ha->cdev.db_size = ha->dbells_size; 1263 } else { 1264 ha->pci_dbells = ha->pci_reg; 1265 } 1266 } else { 1267 ha->cdev.doorbells = ha->pci_dbells; 1268 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1269 ha->cdev.db_size = ha->dbells_size; 1270 1271 #ifdef QLNX_ENABLE_IWARP 1272 1273 if (qlnx_rdma_supported(ha) == 0) 1274 qlnx_set_personality(ha); 1275 1276 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1277 } 1278 QL_DPRINT2(ha, "%s: %s\n", __func__, 1279 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet")); 1280 1281 bzero(¶ms, sizeof (struct ecore_hw_prepare_params)); 1282 1283 params.personality = ha->personality; 1284 1285 params.drv_resc_alloc = false; 1286 params.chk_reg_fifo = false; 1287 params.initiate_pf_flr = true; 1288 params.epoch = 0; 1289 1290 ecore_hw_prepare(&ha->cdev, ¶ms); 1291 1292 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str); 1293 1294 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n", 1295 ha, &ha->cdev, &ha->cdev.hwfns[0]); 1296 1297 return (rval); 1298 } 1299 1300 static void 1301 qlnx_release(qlnx_host_t *ha) 1302 { 1303 device_t dev; 1304 int i; 1305 1306 dev = ha->pci_dev; 1307 1308 QL_DPRINT2(ha, "enter\n"); 1309 1310 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) { 1311 if (ha->idle_chk[i] != NULL) { 1312 free(ha->idle_chk[i], M_QLNXBUF); 1313 ha->idle_chk[i] = NULL; 1314 } 1315 1316 if (ha->grcdump[i] != NULL) { 1317 free(ha->grcdump[i], M_QLNXBUF); 1318 ha->grcdump[i] = NULL; 1319 } 1320 } 1321 1322 if (ha->flags.callout_init) 1323 callout_drain(&ha->qlnx_callout); 1324 1325 if (ha->flags.slowpath_start) { 1326 qlnx_slowpath_stop(ha); 1327 } 1328 1329 if (ha->flags.hw_init) 1330 ecore_hw_remove(&ha->cdev); 1331 1332 qlnx_del_cdev(ha); 1333 1334 if (ha->ifp != NULL) 1335 ether_ifdetach(ha->ifp); 1336 1337 qlnx_free_tx_dma_tag(ha); 1338 1339 qlnx_free_rx_dma_tag(ha); 1340 1341 qlnx_free_parent_dma_tag(ha); 1342 1343 if (qlnx_vf_device(ha) != 0) { 1344 qlnx_destroy_error_recovery_taskqueue(ha); 1345 } 1346 1347 for (i = 0; i < ha->num_rss; i++) { 1348 struct qlnx_fastpath *fp = &ha->fp_array[i]; 1349 1350 if (ha->irq_vec[i].handle) { 1351 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 1352 ha->irq_vec[i].handle); 1353 } 1354 1355 if (ha->irq_vec[i].irq) { 1356 (void)bus_release_resource(dev, SYS_RES_IRQ, 1357 ha->irq_vec[i].irq_rid, 1358 ha->irq_vec[i].irq); 1359 } 1360 1361 qlnx_free_tx_br(ha, fp); 1362 } 1363 qlnx_destroy_fp_taskqueues(ha); 1364 1365 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1366 if (ha->sp_handle[i]) 1367 (void)bus_teardown_intr(dev, ha->sp_irq[i], 1368 ha->sp_handle[i]); 1369 1370 if (ha->sp_irq[i]) 1371 (void) bus_release_resource(dev, SYS_RES_IRQ, 1372 ha->sp_irq_rid[i], ha->sp_irq[i]); 1373 } 1374 1375 qlnx_destroy_sp_taskqueues(ha); 1376 1377 if (ha->msix_count) 1378 pci_release_msi(dev); 1379 1380 if (ha->flags.lock_init) { 1381 mtx_destroy(&ha->hw_lock); 1382 } 1383 1384 if (ha->pci_reg) 1385 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 1386 ha->pci_reg); 1387 1388 if (ha->dbells_size && ha->pci_dbells) 1389 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid, 1390 ha->pci_dbells); 1391 1392 if (ha->msix_bar) 1393 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid, 1394 ha->msix_bar); 1395 1396 QL_DPRINT2(ha, "exit\n"); 1397 return; 1398 } 1399 1400 static void 1401 qlnx_trigger_dump(qlnx_host_t *ha) 1402 { 1403 int i; 1404 1405 if (ha->ifp != NULL) 1406 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 1407 1408 QL_DPRINT2(ha, "enter\n"); 1409 1410 if (qlnx_vf_device(ha) == 0) 1411 return; 1412 1413 ha->error_recovery = 1; 1414 1415 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1416 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i); 1417 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i); 1418 } 1419 1420 QL_DPRINT2(ha, "exit\n"); 1421 1422 return; 1423 } 1424 1425 static int 1426 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS) 1427 { 1428 int err, ret = 0; 1429 qlnx_host_t *ha; 1430 1431 err = sysctl_handle_int(oidp, &ret, 0, req); 1432 1433 if (err || !req->newptr) 1434 return (err); 1435 1436 if (ret == 1) { 1437 ha = (qlnx_host_t *)arg1; 1438 qlnx_trigger_dump(ha); 1439 } 1440 return (err); 1441 } 1442 1443 static int 1444 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS) 1445 { 1446 int err, i, ret = 0, usecs = 0; 1447 qlnx_host_t *ha; 1448 struct ecore_hwfn *p_hwfn; 1449 struct qlnx_fastpath *fp; 1450 1451 err = sysctl_handle_int(oidp, &usecs, 0, req); 1452 1453 if (err || !req->newptr || !usecs || (usecs > 255)) 1454 return (err); 1455 1456 ha = (qlnx_host_t *)arg1; 1457 1458 if (qlnx_vf_device(ha) == 0) 1459 return (-1); 1460 1461 for (i = 0; i < ha->num_rss; i++) { 1462 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1463 1464 fp = &ha->fp_array[i]; 1465 1466 if (fp->txq[0]->handle != NULL) { 1467 ret = ecore_set_queue_coalesce(p_hwfn, 0, 1468 (uint16_t)usecs, fp->txq[0]->handle); 1469 } 1470 } 1471 1472 if (!ret) 1473 ha->tx_coalesce_usecs = (uint8_t)usecs; 1474 1475 return (err); 1476 } 1477 1478 static int 1479 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS) 1480 { 1481 int err, i, ret = 0, usecs = 0; 1482 qlnx_host_t *ha; 1483 struct ecore_hwfn *p_hwfn; 1484 struct qlnx_fastpath *fp; 1485 1486 err = sysctl_handle_int(oidp, &usecs, 0, req); 1487 1488 if (err || !req->newptr || !usecs || (usecs > 255)) 1489 return (err); 1490 1491 ha = (qlnx_host_t *)arg1; 1492 1493 if (qlnx_vf_device(ha) == 0) 1494 return (-1); 1495 1496 for (i = 0; i < ha->num_rss; i++) { 1497 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1498 1499 fp = &ha->fp_array[i]; 1500 1501 if (fp->rxq->handle != NULL) { 1502 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs, 1503 0, fp->rxq->handle); 1504 } 1505 } 1506 1507 if (!ret) 1508 ha->rx_coalesce_usecs = (uint8_t)usecs; 1509 1510 return (err); 1511 } 1512 1513 static void 1514 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha) 1515 { 1516 struct sysctl_ctx_list *ctx; 1517 struct sysctl_oid_list *children; 1518 struct sysctl_oid *ctx_oid; 1519 1520 ctx = device_get_sysctl_ctx(ha->pci_dev); 1521 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1522 1523 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat", 1524 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat"); 1525 children = SYSCTL_CHILDREN(ctx_oid); 1526 1527 SYSCTL_ADD_QUAD(ctx, children, 1528 OID_AUTO, "sp_interrupts", 1529 CTLFLAG_RD, &ha->sp_interrupts, 1530 "No. of slowpath interrupts"); 1531 1532 return; 1533 } 1534 1535 static void 1536 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha) 1537 { 1538 struct sysctl_ctx_list *ctx; 1539 struct sysctl_oid_list *children; 1540 struct sysctl_oid_list *node_children; 1541 struct sysctl_oid *ctx_oid; 1542 int i, j; 1543 uint8_t name_str[16]; 1544 1545 ctx = device_get_sysctl_ctx(ha->pci_dev); 1546 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1547 1548 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat", 1549 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat"); 1550 children = SYSCTL_CHILDREN(ctx_oid); 1551 1552 for (i = 0; i < ha->num_rss; i++) { 1553 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1554 snprintf(name_str, sizeof(name_str), "%d", i); 1555 1556 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 1557 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str); 1558 node_children = SYSCTL_CHILDREN(ctx_oid); 1559 1560 /* Tx Related */ 1561 1562 SYSCTL_ADD_QUAD(ctx, node_children, 1563 OID_AUTO, "tx_pkts_processed", 1564 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed, 1565 "No. of packets processed for transmission"); 1566 1567 SYSCTL_ADD_QUAD(ctx, node_children, 1568 OID_AUTO, "tx_pkts_freed", 1569 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed, 1570 "No. of freed packets"); 1571 1572 SYSCTL_ADD_QUAD(ctx, node_children, 1573 OID_AUTO, "tx_pkts_transmitted", 1574 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted, 1575 "No. of transmitted packets"); 1576 1577 SYSCTL_ADD_QUAD(ctx, node_children, 1578 OID_AUTO, "tx_pkts_completed", 1579 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed, 1580 "No. of transmit completions"); 1581 1582 SYSCTL_ADD_QUAD(ctx, node_children, 1583 OID_AUTO, "tx_non_tso_pkts", 1584 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts, 1585 "No. of non LSO transmited packets"); 1586 1587 #ifdef QLNX_TRACE_PERF_DATA 1588 1589 SYSCTL_ADD_QUAD(ctx, node_children, 1590 OID_AUTO, "tx_pkts_trans_ctx", 1591 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx, 1592 "No. of transmitted packets in transmit context"); 1593 1594 SYSCTL_ADD_QUAD(ctx, node_children, 1595 OID_AUTO, "tx_pkts_compl_ctx", 1596 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx, 1597 "No. of transmit completions in transmit context"); 1598 1599 SYSCTL_ADD_QUAD(ctx, node_children, 1600 OID_AUTO, "tx_pkts_trans_fp", 1601 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp, 1602 "No. of transmitted packets in taskqueue"); 1603 1604 SYSCTL_ADD_QUAD(ctx, node_children, 1605 OID_AUTO, "tx_pkts_compl_fp", 1606 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp, 1607 "No. of transmit completions in taskqueue"); 1608 1609 SYSCTL_ADD_QUAD(ctx, node_children, 1610 OID_AUTO, "tx_pkts_compl_intr", 1611 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr, 1612 "No. of transmit completions in interrupt ctx"); 1613 #endif 1614 1615 SYSCTL_ADD_QUAD(ctx, node_children, 1616 OID_AUTO, "tx_tso_pkts", 1617 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts, 1618 "No. of LSO transmited packets"); 1619 1620 SYSCTL_ADD_QUAD(ctx, node_children, 1621 OID_AUTO, "tx_lso_wnd_min_len", 1622 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len, 1623 "tx_lso_wnd_min_len"); 1624 1625 SYSCTL_ADD_QUAD(ctx, node_children, 1626 OID_AUTO, "tx_defrag", 1627 CTLFLAG_RD, &ha->fp_array[i].tx_defrag, 1628 "tx_defrag"); 1629 1630 SYSCTL_ADD_QUAD(ctx, node_children, 1631 OID_AUTO, "tx_nsegs_gt_elem_left", 1632 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left, 1633 "tx_nsegs_gt_elem_left"); 1634 1635 SYSCTL_ADD_UINT(ctx, node_children, 1636 OID_AUTO, "tx_tso_max_nsegs", 1637 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs, 1638 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs"); 1639 1640 SYSCTL_ADD_UINT(ctx, node_children, 1641 OID_AUTO, "tx_tso_min_nsegs", 1642 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs, 1643 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs"); 1644 1645 SYSCTL_ADD_UINT(ctx, node_children, 1646 OID_AUTO, "tx_tso_max_pkt_len", 1647 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len, 1648 ha->fp_array[i].tx_tso_max_pkt_len, 1649 "tx_tso_max_pkt_len"); 1650 1651 SYSCTL_ADD_UINT(ctx, node_children, 1652 OID_AUTO, "tx_tso_min_pkt_len", 1653 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len, 1654 ha->fp_array[i].tx_tso_min_pkt_len, 1655 "tx_tso_min_pkt_len"); 1656 1657 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) { 1658 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1659 snprintf(name_str, sizeof(name_str), 1660 "tx_pkts_nseg_%02d", (j+1)); 1661 1662 SYSCTL_ADD_QUAD(ctx, node_children, 1663 OID_AUTO, name_str, CTLFLAG_RD, 1664 &ha->fp_array[i].tx_pkts[j], name_str); 1665 } 1666 1667 #ifdef QLNX_TRACE_PERF_DATA 1668 for (j = 0; j < 18; j++) { 1669 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1670 snprintf(name_str, sizeof(name_str), 1671 "tx_pkts_hist_%02d", (j+1)); 1672 1673 SYSCTL_ADD_QUAD(ctx, node_children, 1674 OID_AUTO, name_str, CTLFLAG_RD, 1675 &ha->fp_array[i].tx_pkts_hist[j], name_str); 1676 } 1677 for (j = 0; j < 5; j++) { 1678 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1679 snprintf(name_str, sizeof(name_str), 1680 "tx_comInt_%02d", (j+1)); 1681 1682 SYSCTL_ADD_QUAD(ctx, node_children, 1683 OID_AUTO, name_str, CTLFLAG_RD, 1684 &ha->fp_array[i].tx_comInt[j], name_str); 1685 } 1686 for (j = 0; j < 18; j++) { 1687 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1688 snprintf(name_str, sizeof(name_str), 1689 "tx_pkts_q_%02d", (j+1)); 1690 1691 SYSCTL_ADD_QUAD(ctx, node_children, 1692 OID_AUTO, name_str, CTLFLAG_RD, 1693 &ha->fp_array[i].tx_pkts_q[j], name_str); 1694 } 1695 #endif 1696 1697 SYSCTL_ADD_QUAD(ctx, node_children, 1698 OID_AUTO, "err_tx_nsegs_gt_elem_left", 1699 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left, 1700 "err_tx_nsegs_gt_elem_left"); 1701 1702 SYSCTL_ADD_QUAD(ctx, node_children, 1703 OID_AUTO, "err_tx_dmamap_create", 1704 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create, 1705 "err_tx_dmamap_create"); 1706 1707 SYSCTL_ADD_QUAD(ctx, node_children, 1708 OID_AUTO, "err_tx_defrag_dmamap_load", 1709 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load, 1710 "err_tx_defrag_dmamap_load"); 1711 1712 SYSCTL_ADD_QUAD(ctx, node_children, 1713 OID_AUTO, "err_tx_non_tso_max_seg", 1714 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg, 1715 "err_tx_non_tso_max_seg"); 1716 1717 SYSCTL_ADD_QUAD(ctx, node_children, 1718 OID_AUTO, "err_tx_dmamap_load", 1719 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load, 1720 "err_tx_dmamap_load"); 1721 1722 SYSCTL_ADD_QUAD(ctx, node_children, 1723 OID_AUTO, "err_tx_defrag", 1724 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag, 1725 "err_tx_defrag"); 1726 1727 SYSCTL_ADD_QUAD(ctx, node_children, 1728 OID_AUTO, "err_tx_free_pkt_null", 1729 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null, 1730 "err_tx_free_pkt_null"); 1731 1732 SYSCTL_ADD_QUAD(ctx, node_children, 1733 OID_AUTO, "err_tx_cons_idx_conflict", 1734 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict, 1735 "err_tx_cons_idx_conflict"); 1736 1737 SYSCTL_ADD_QUAD(ctx, node_children, 1738 OID_AUTO, "lro_cnt_64", 1739 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64, 1740 "lro_cnt_64"); 1741 1742 SYSCTL_ADD_QUAD(ctx, node_children, 1743 OID_AUTO, "lro_cnt_128", 1744 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128, 1745 "lro_cnt_128"); 1746 1747 SYSCTL_ADD_QUAD(ctx, node_children, 1748 OID_AUTO, "lro_cnt_256", 1749 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256, 1750 "lro_cnt_256"); 1751 1752 SYSCTL_ADD_QUAD(ctx, node_children, 1753 OID_AUTO, "lro_cnt_512", 1754 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512, 1755 "lro_cnt_512"); 1756 1757 SYSCTL_ADD_QUAD(ctx, node_children, 1758 OID_AUTO, "lro_cnt_1024", 1759 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024, 1760 "lro_cnt_1024"); 1761 1762 /* Rx Related */ 1763 1764 SYSCTL_ADD_QUAD(ctx, node_children, 1765 OID_AUTO, "rx_pkts", 1766 CTLFLAG_RD, &ha->fp_array[i].rx_pkts, 1767 "No. of received packets"); 1768 1769 SYSCTL_ADD_QUAD(ctx, node_children, 1770 OID_AUTO, "tpa_start", 1771 CTLFLAG_RD, &ha->fp_array[i].tpa_start, 1772 "No. of tpa_start packets"); 1773 1774 SYSCTL_ADD_QUAD(ctx, node_children, 1775 OID_AUTO, "tpa_cont", 1776 CTLFLAG_RD, &ha->fp_array[i].tpa_cont, 1777 "No. of tpa_cont packets"); 1778 1779 SYSCTL_ADD_QUAD(ctx, node_children, 1780 OID_AUTO, "tpa_end", 1781 CTLFLAG_RD, &ha->fp_array[i].tpa_end, 1782 "No. of tpa_end packets"); 1783 1784 SYSCTL_ADD_QUAD(ctx, node_children, 1785 OID_AUTO, "err_m_getcl", 1786 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl, 1787 "err_m_getcl"); 1788 1789 SYSCTL_ADD_QUAD(ctx, node_children, 1790 OID_AUTO, "err_m_getjcl", 1791 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl, 1792 "err_m_getjcl"); 1793 1794 SYSCTL_ADD_QUAD(ctx, node_children, 1795 OID_AUTO, "err_rx_hw_errors", 1796 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors, 1797 "err_rx_hw_errors"); 1798 1799 SYSCTL_ADD_QUAD(ctx, node_children, 1800 OID_AUTO, "err_rx_alloc_errors", 1801 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors, 1802 "err_rx_alloc_errors"); 1803 } 1804 1805 return; 1806 } 1807 1808 static void 1809 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha) 1810 { 1811 struct sysctl_ctx_list *ctx; 1812 struct sysctl_oid_list *children; 1813 struct sysctl_oid *ctx_oid; 1814 1815 ctx = device_get_sysctl_ctx(ha->pci_dev); 1816 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1817 1818 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat", 1819 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat"); 1820 children = SYSCTL_CHILDREN(ctx_oid); 1821 1822 SYSCTL_ADD_QUAD(ctx, children, 1823 OID_AUTO, "no_buff_discards", 1824 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards, 1825 "No. of packets discarded due to lack of buffer"); 1826 1827 SYSCTL_ADD_QUAD(ctx, children, 1828 OID_AUTO, "packet_too_big_discard", 1829 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard, 1830 "No. of packets discarded because packet was too big"); 1831 1832 SYSCTL_ADD_QUAD(ctx, children, 1833 OID_AUTO, "ttl0_discard", 1834 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard, 1835 "ttl0_discard"); 1836 1837 SYSCTL_ADD_QUAD(ctx, children, 1838 OID_AUTO, "rx_ucast_bytes", 1839 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes, 1840 "rx_ucast_bytes"); 1841 1842 SYSCTL_ADD_QUAD(ctx, children, 1843 OID_AUTO, "rx_mcast_bytes", 1844 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes, 1845 "rx_mcast_bytes"); 1846 1847 SYSCTL_ADD_QUAD(ctx, children, 1848 OID_AUTO, "rx_bcast_bytes", 1849 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes, 1850 "rx_bcast_bytes"); 1851 1852 SYSCTL_ADD_QUAD(ctx, children, 1853 OID_AUTO, "rx_ucast_pkts", 1854 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts, 1855 "rx_ucast_pkts"); 1856 1857 SYSCTL_ADD_QUAD(ctx, children, 1858 OID_AUTO, "rx_mcast_pkts", 1859 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts, 1860 "rx_mcast_pkts"); 1861 1862 SYSCTL_ADD_QUAD(ctx, children, 1863 OID_AUTO, "rx_bcast_pkts", 1864 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts, 1865 "rx_bcast_pkts"); 1866 1867 SYSCTL_ADD_QUAD(ctx, children, 1868 OID_AUTO, "mftag_filter_discards", 1869 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards, 1870 "mftag_filter_discards"); 1871 1872 SYSCTL_ADD_QUAD(ctx, children, 1873 OID_AUTO, "mac_filter_discards", 1874 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards, 1875 "mac_filter_discards"); 1876 1877 SYSCTL_ADD_QUAD(ctx, children, 1878 OID_AUTO, "tx_ucast_bytes", 1879 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes, 1880 "tx_ucast_bytes"); 1881 1882 SYSCTL_ADD_QUAD(ctx, children, 1883 OID_AUTO, "tx_mcast_bytes", 1884 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes, 1885 "tx_mcast_bytes"); 1886 1887 SYSCTL_ADD_QUAD(ctx, children, 1888 OID_AUTO, "tx_bcast_bytes", 1889 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes, 1890 "tx_bcast_bytes"); 1891 1892 SYSCTL_ADD_QUAD(ctx, children, 1893 OID_AUTO, "tx_ucast_pkts", 1894 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts, 1895 "tx_ucast_pkts"); 1896 1897 SYSCTL_ADD_QUAD(ctx, children, 1898 OID_AUTO, "tx_mcast_pkts", 1899 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts, 1900 "tx_mcast_pkts"); 1901 1902 SYSCTL_ADD_QUAD(ctx, children, 1903 OID_AUTO, "tx_bcast_pkts", 1904 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts, 1905 "tx_bcast_pkts"); 1906 1907 SYSCTL_ADD_QUAD(ctx, children, 1908 OID_AUTO, "tx_err_drop_pkts", 1909 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts, 1910 "tx_err_drop_pkts"); 1911 1912 SYSCTL_ADD_QUAD(ctx, children, 1913 OID_AUTO, "tpa_coalesced_pkts", 1914 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts, 1915 "tpa_coalesced_pkts"); 1916 1917 SYSCTL_ADD_QUAD(ctx, children, 1918 OID_AUTO, "tpa_coalesced_events", 1919 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events, 1920 "tpa_coalesced_events"); 1921 1922 SYSCTL_ADD_QUAD(ctx, children, 1923 OID_AUTO, "tpa_aborts_num", 1924 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num, 1925 "tpa_aborts_num"); 1926 1927 SYSCTL_ADD_QUAD(ctx, children, 1928 OID_AUTO, "tpa_not_coalesced_pkts", 1929 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts, 1930 "tpa_not_coalesced_pkts"); 1931 1932 SYSCTL_ADD_QUAD(ctx, children, 1933 OID_AUTO, "tpa_coalesced_bytes", 1934 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes, 1935 "tpa_coalesced_bytes"); 1936 1937 SYSCTL_ADD_QUAD(ctx, children, 1938 OID_AUTO, "rx_64_byte_packets", 1939 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets, 1940 "rx_64_byte_packets"); 1941 1942 SYSCTL_ADD_QUAD(ctx, children, 1943 OID_AUTO, "rx_65_to_127_byte_packets", 1944 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets, 1945 "rx_65_to_127_byte_packets"); 1946 1947 SYSCTL_ADD_QUAD(ctx, children, 1948 OID_AUTO, "rx_128_to_255_byte_packets", 1949 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets, 1950 "rx_128_to_255_byte_packets"); 1951 1952 SYSCTL_ADD_QUAD(ctx, children, 1953 OID_AUTO, "rx_256_to_511_byte_packets", 1954 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets, 1955 "rx_256_to_511_byte_packets"); 1956 1957 SYSCTL_ADD_QUAD(ctx, children, 1958 OID_AUTO, "rx_512_to_1023_byte_packets", 1959 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets, 1960 "rx_512_to_1023_byte_packets"); 1961 1962 SYSCTL_ADD_QUAD(ctx, children, 1963 OID_AUTO, "rx_1024_to_1518_byte_packets", 1964 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets, 1965 "rx_1024_to_1518_byte_packets"); 1966 1967 SYSCTL_ADD_QUAD(ctx, children, 1968 OID_AUTO, "rx_1519_to_1522_byte_packets", 1969 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets, 1970 "rx_1519_to_1522_byte_packets"); 1971 1972 SYSCTL_ADD_QUAD(ctx, children, 1973 OID_AUTO, "rx_1523_to_2047_byte_packets", 1974 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets, 1975 "rx_1523_to_2047_byte_packets"); 1976 1977 SYSCTL_ADD_QUAD(ctx, children, 1978 OID_AUTO, "rx_2048_to_4095_byte_packets", 1979 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets, 1980 "rx_2048_to_4095_byte_packets"); 1981 1982 SYSCTL_ADD_QUAD(ctx, children, 1983 OID_AUTO, "rx_4096_to_9216_byte_packets", 1984 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets, 1985 "rx_4096_to_9216_byte_packets"); 1986 1987 SYSCTL_ADD_QUAD(ctx, children, 1988 OID_AUTO, "rx_9217_to_16383_byte_packets", 1989 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets, 1990 "rx_9217_to_16383_byte_packets"); 1991 1992 SYSCTL_ADD_QUAD(ctx, children, 1993 OID_AUTO, "rx_crc_errors", 1994 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors, 1995 "rx_crc_errors"); 1996 1997 SYSCTL_ADD_QUAD(ctx, children, 1998 OID_AUTO, "rx_mac_crtl_frames", 1999 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames, 2000 "rx_mac_crtl_frames"); 2001 2002 SYSCTL_ADD_QUAD(ctx, children, 2003 OID_AUTO, "rx_pause_frames", 2004 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames, 2005 "rx_pause_frames"); 2006 2007 SYSCTL_ADD_QUAD(ctx, children, 2008 OID_AUTO, "rx_pfc_frames", 2009 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames, 2010 "rx_pfc_frames"); 2011 2012 SYSCTL_ADD_QUAD(ctx, children, 2013 OID_AUTO, "rx_align_errors", 2014 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors, 2015 "rx_align_errors"); 2016 2017 SYSCTL_ADD_QUAD(ctx, children, 2018 OID_AUTO, "rx_carrier_errors", 2019 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors, 2020 "rx_carrier_errors"); 2021 2022 SYSCTL_ADD_QUAD(ctx, children, 2023 OID_AUTO, "rx_oversize_packets", 2024 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets, 2025 "rx_oversize_packets"); 2026 2027 SYSCTL_ADD_QUAD(ctx, children, 2028 OID_AUTO, "rx_jabbers", 2029 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers, 2030 "rx_jabbers"); 2031 2032 SYSCTL_ADD_QUAD(ctx, children, 2033 OID_AUTO, "rx_undersize_packets", 2034 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets, 2035 "rx_undersize_packets"); 2036 2037 SYSCTL_ADD_QUAD(ctx, children, 2038 OID_AUTO, "rx_fragments", 2039 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments, 2040 "rx_fragments"); 2041 2042 SYSCTL_ADD_QUAD(ctx, children, 2043 OID_AUTO, "tx_64_byte_packets", 2044 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets, 2045 "tx_64_byte_packets"); 2046 2047 SYSCTL_ADD_QUAD(ctx, children, 2048 OID_AUTO, "tx_65_to_127_byte_packets", 2049 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets, 2050 "tx_65_to_127_byte_packets"); 2051 2052 SYSCTL_ADD_QUAD(ctx, children, 2053 OID_AUTO, "tx_128_to_255_byte_packets", 2054 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets, 2055 "tx_128_to_255_byte_packets"); 2056 2057 SYSCTL_ADD_QUAD(ctx, children, 2058 OID_AUTO, "tx_256_to_511_byte_packets", 2059 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets, 2060 "tx_256_to_511_byte_packets"); 2061 2062 SYSCTL_ADD_QUAD(ctx, children, 2063 OID_AUTO, "tx_512_to_1023_byte_packets", 2064 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets, 2065 "tx_512_to_1023_byte_packets"); 2066 2067 SYSCTL_ADD_QUAD(ctx, children, 2068 OID_AUTO, "tx_1024_to_1518_byte_packets", 2069 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets, 2070 "tx_1024_to_1518_byte_packets"); 2071 2072 SYSCTL_ADD_QUAD(ctx, children, 2073 OID_AUTO, "tx_1519_to_2047_byte_packets", 2074 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets, 2075 "tx_1519_to_2047_byte_packets"); 2076 2077 SYSCTL_ADD_QUAD(ctx, children, 2078 OID_AUTO, "tx_2048_to_4095_byte_packets", 2079 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets, 2080 "tx_2048_to_4095_byte_packets"); 2081 2082 SYSCTL_ADD_QUAD(ctx, children, 2083 OID_AUTO, "tx_4096_to_9216_byte_packets", 2084 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets, 2085 "tx_4096_to_9216_byte_packets"); 2086 2087 SYSCTL_ADD_QUAD(ctx, children, 2088 OID_AUTO, "tx_9217_to_16383_byte_packets", 2089 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets, 2090 "tx_9217_to_16383_byte_packets"); 2091 2092 SYSCTL_ADD_QUAD(ctx, children, 2093 OID_AUTO, "tx_pause_frames", 2094 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames, 2095 "tx_pause_frames"); 2096 2097 SYSCTL_ADD_QUAD(ctx, children, 2098 OID_AUTO, "tx_pfc_frames", 2099 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames, 2100 "tx_pfc_frames"); 2101 2102 SYSCTL_ADD_QUAD(ctx, children, 2103 OID_AUTO, "tx_lpi_entry_count", 2104 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count, 2105 "tx_lpi_entry_count"); 2106 2107 SYSCTL_ADD_QUAD(ctx, children, 2108 OID_AUTO, "tx_total_collisions", 2109 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions, 2110 "tx_total_collisions"); 2111 2112 SYSCTL_ADD_QUAD(ctx, children, 2113 OID_AUTO, "brb_truncates", 2114 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates, 2115 "brb_truncates"); 2116 2117 SYSCTL_ADD_QUAD(ctx, children, 2118 OID_AUTO, "brb_discards", 2119 CTLFLAG_RD, &ha->hw_stats.common.brb_discards, 2120 "brb_discards"); 2121 2122 SYSCTL_ADD_QUAD(ctx, children, 2123 OID_AUTO, "rx_mac_bytes", 2124 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes, 2125 "rx_mac_bytes"); 2126 2127 SYSCTL_ADD_QUAD(ctx, children, 2128 OID_AUTO, "rx_mac_uc_packets", 2129 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets, 2130 "rx_mac_uc_packets"); 2131 2132 SYSCTL_ADD_QUAD(ctx, children, 2133 OID_AUTO, "rx_mac_mc_packets", 2134 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets, 2135 "rx_mac_mc_packets"); 2136 2137 SYSCTL_ADD_QUAD(ctx, children, 2138 OID_AUTO, "rx_mac_bc_packets", 2139 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets, 2140 "rx_mac_bc_packets"); 2141 2142 SYSCTL_ADD_QUAD(ctx, children, 2143 OID_AUTO, "rx_mac_frames_ok", 2144 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok, 2145 "rx_mac_frames_ok"); 2146 2147 SYSCTL_ADD_QUAD(ctx, children, 2148 OID_AUTO, "tx_mac_bytes", 2149 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes, 2150 "tx_mac_bytes"); 2151 2152 SYSCTL_ADD_QUAD(ctx, children, 2153 OID_AUTO, "tx_mac_uc_packets", 2154 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets, 2155 "tx_mac_uc_packets"); 2156 2157 SYSCTL_ADD_QUAD(ctx, children, 2158 OID_AUTO, "tx_mac_mc_packets", 2159 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets, 2160 "tx_mac_mc_packets"); 2161 2162 SYSCTL_ADD_QUAD(ctx, children, 2163 OID_AUTO, "tx_mac_bc_packets", 2164 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets, 2165 "tx_mac_bc_packets"); 2166 2167 SYSCTL_ADD_QUAD(ctx, children, 2168 OID_AUTO, "tx_mac_ctrl_frames", 2169 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames, 2170 "tx_mac_ctrl_frames"); 2171 return; 2172 } 2173 2174 static void 2175 qlnx_add_sysctls(qlnx_host_t *ha) 2176 { 2177 device_t dev = ha->pci_dev; 2178 struct sysctl_ctx_list *ctx; 2179 struct sysctl_oid_list *children; 2180 2181 ctx = device_get_sysctl_ctx(dev); 2182 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2183 2184 qlnx_add_fp_stats_sysctls(ha); 2185 qlnx_add_sp_stats_sysctls(ha); 2186 2187 if (qlnx_vf_device(ha) != 0) 2188 qlnx_add_hw_stats_sysctls(ha); 2189 2190 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version", 2191 CTLFLAG_RD, qlnx_ver_str, 0, 2192 "Driver Version"); 2193 2194 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version", 2195 CTLFLAG_RD, ha->stormfw_ver, 0, 2196 "STORM Firmware Version"); 2197 2198 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version", 2199 CTLFLAG_RD, ha->mfw_ver, 0, 2200 "Management Firmware Version"); 2201 2202 SYSCTL_ADD_UINT(ctx, children, 2203 OID_AUTO, "personality", CTLFLAG_RD, 2204 &ha->personality, ha->personality, 2205 "\tpersonality = 0 => Ethernet Only\n" 2206 "\tpersonality = 3 => Ethernet and RoCE\n" 2207 "\tpersonality = 4 => Ethernet and iWARP\n" 2208 "\tpersonality = 6 => Default in Shared Memory\n"); 2209 2210 ha->dbg_level = 0; 2211 SYSCTL_ADD_UINT(ctx, children, 2212 OID_AUTO, "debug", CTLFLAG_RW, 2213 &ha->dbg_level, ha->dbg_level, "Debug Level"); 2214 2215 ha->dp_level = 0x01; 2216 SYSCTL_ADD_UINT(ctx, children, 2217 OID_AUTO, "dp_level", CTLFLAG_RW, 2218 &ha->dp_level, ha->dp_level, "DP Level"); 2219 2220 ha->dbg_trace_lro_cnt = 0; 2221 SYSCTL_ADD_UINT(ctx, children, 2222 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW, 2223 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt, 2224 "Trace LRO Counts"); 2225 2226 ha->dbg_trace_tso_pkt_len = 0; 2227 SYSCTL_ADD_UINT(ctx, children, 2228 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW, 2229 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len, 2230 "Trace TSO packet lengths"); 2231 2232 ha->dp_module = 0; 2233 SYSCTL_ADD_UINT(ctx, children, 2234 OID_AUTO, "dp_module", CTLFLAG_RW, 2235 &ha->dp_module, ha->dp_module, "DP Module"); 2236 2237 ha->err_inject = 0; 2238 2239 SYSCTL_ADD_UINT(ctx, children, 2240 OID_AUTO, "err_inject", CTLFLAG_RW, 2241 &ha->err_inject, ha->err_inject, "Error Inject"); 2242 2243 ha->storm_stats_enable = 0; 2244 2245 SYSCTL_ADD_UINT(ctx, children, 2246 OID_AUTO, "storm_stats_enable", CTLFLAG_RW, 2247 &ha->storm_stats_enable, ha->storm_stats_enable, 2248 "Enable Storm Statistics Gathering"); 2249 2250 ha->storm_stats_index = 0; 2251 2252 SYSCTL_ADD_UINT(ctx, children, 2253 OID_AUTO, "storm_stats_index", CTLFLAG_RD, 2254 &ha->storm_stats_index, ha->storm_stats_index, 2255 "Enable Storm Statistics Gathering Current Index"); 2256 2257 ha->grcdump_taken = 0; 2258 SYSCTL_ADD_UINT(ctx, children, 2259 OID_AUTO, "grcdump_taken", CTLFLAG_RD, 2260 &ha->grcdump_taken, ha->grcdump_taken, 2261 "grcdump_taken"); 2262 2263 ha->idle_chk_taken = 0; 2264 SYSCTL_ADD_UINT(ctx, children, 2265 OID_AUTO, "idle_chk_taken", CTLFLAG_RD, 2266 &ha->idle_chk_taken, ha->idle_chk_taken, 2267 "idle_chk_taken"); 2268 2269 SYSCTL_ADD_UINT(ctx, children, 2270 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD, 2271 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs, 2272 "rx_coalesce_usecs"); 2273 2274 SYSCTL_ADD_UINT(ctx, children, 2275 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD, 2276 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs, 2277 "tx_coalesce_usecs"); 2278 2279 SYSCTL_ADD_PROC(ctx, children, 2280 OID_AUTO, "trigger_dump", 2281 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2282 (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump"); 2283 2284 SYSCTL_ADD_PROC(ctx, children, 2285 OID_AUTO, "set_rx_coalesce_usecs", 2286 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2287 (void *)ha, 0, qlnx_set_rx_coalesce, "I", 2288 "rx interrupt coalesce period microseconds"); 2289 2290 SYSCTL_ADD_PROC(ctx, children, 2291 OID_AUTO, "set_tx_coalesce_usecs", 2292 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2293 (void *)ha, 0, qlnx_set_tx_coalesce, "I", 2294 "tx interrupt coalesce period microseconds"); 2295 2296 ha->rx_pkt_threshold = 128; 2297 SYSCTL_ADD_UINT(ctx, children, 2298 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW, 2299 &ha->rx_pkt_threshold, ha->rx_pkt_threshold, 2300 "No. of Rx Pkts to process at a time"); 2301 2302 ha->rx_jumbo_buf_eq_mtu = 0; 2303 SYSCTL_ADD_UINT(ctx, children, 2304 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW, 2305 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu, 2306 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n" 2307 "otherwise Rx Jumbo buffers are set to >= MTU size\n"); 2308 2309 SYSCTL_ADD_QUAD(ctx, children, 2310 OID_AUTO, "err_illegal_intr", CTLFLAG_RD, 2311 &ha->err_illegal_intr, "err_illegal_intr"); 2312 2313 SYSCTL_ADD_QUAD(ctx, children, 2314 OID_AUTO, "err_fp_null", CTLFLAG_RD, 2315 &ha->err_fp_null, "err_fp_null"); 2316 2317 SYSCTL_ADD_QUAD(ctx, children, 2318 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD, 2319 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type"); 2320 return; 2321 } 2322 2323 /***************************************************************************** 2324 * Operating System Network Interface Functions 2325 *****************************************************************************/ 2326 2327 static void 2328 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) 2329 { 2330 uint16_t device_id; 2331 struct ifnet *ifp; 2332 2333 ifp = ha->ifp = if_alloc(IFT_ETHER); 2334 2335 if (ifp == NULL) 2336 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 2337 2338 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2339 2340 device_id = pci_get_device(ha->pci_dev); 2341 2342 #if __FreeBSD_version >= 1000000 2343 2344 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) 2345 ifp->if_baudrate = IF_Gbps(40); 2346 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2347 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 2348 ifp->if_baudrate = IF_Gbps(25); 2349 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) 2350 ifp->if_baudrate = IF_Gbps(50); 2351 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) 2352 ifp->if_baudrate = IF_Gbps(100); 2353 2354 ifp->if_capabilities = IFCAP_LINKSTATE; 2355 #else 2356 ifp->if_mtu = ETHERMTU; 2357 ifp->if_baudrate = (1 * 1000 * 1000 *1000); 2358 2359 #endif /* #if __FreeBSD_version >= 1000000 */ 2360 2361 ifp->if_init = qlnx_init; 2362 ifp->if_softc = ha; 2363 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2364 ifp->if_ioctl = qlnx_ioctl; 2365 ifp->if_transmit = qlnx_transmit; 2366 ifp->if_qflush = qlnx_qflush; 2367 2368 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha)); 2369 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha); 2370 IFQ_SET_READY(&ifp->if_snd); 2371 2372 #if __FreeBSD_version >= 1100036 2373 if_setgetcounterfn(ifp, qlnx_get_counter); 2374 #endif 2375 2376 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2377 2378 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN); 2379 2380 if (!ha->primary_mac[0] && !ha->primary_mac[1] && 2381 !ha->primary_mac[2] && !ha->primary_mac[3] && 2382 !ha->primary_mac[4] && !ha->primary_mac[5]) { 2383 uint32_t rnd; 2384 2385 rnd = arc4random(); 2386 2387 ha->primary_mac[0] = 0x00; 2388 ha->primary_mac[1] = 0x0e; 2389 ha->primary_mac[2] = 0x1e; 2390 ha->primary_mac[3] = rnd & 0xFF; 2391 ha->primary_mac[4] = (rnd >> 8) & 0xFF; 2392 ha->primary_mac[5] = (rnd >> 16) & 0xFF; 2393 } 2394 2395 ether_ifattach(ifp, ha->primary_mac); 2396 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN); 2397 2398 ifp->if_capabilities = IFCAP_HWCSUM; 2399 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 2400 2401 ifp->if_capabilities |= IFCAP_VLAN_MTU; 2402 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 2403 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 2404 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 2405 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 2406 ifp->if_capabilities |= IFCAP_TSO4; 2407 ifp->if_capabilities |= IFCAP_TSO6; 2408 ifp->if_capabilities |= IFCAP_LRO; 2409 2410 ifp->if_hw_tsomax = QLNX_MAX_TSO_FRAME_SIZE - 2411 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 2412 ifp->if_hw_tsomaxsegcount = QLNX_MAX_SEGMENTS - 1 /* hdr */; 2413 ifp->if_hw_tsomaxsegsize = QLNX_MAX_TX_MBUF_SIZE; 2414 2415 ifp->if_capenable = ifp->if_capabilities; 2416 2417 ifp->if_hwassist = CSUM_IP; 2418 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP; 2419 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; 2420 ifp->if_hwassist |= CSUM_TSO; 2421 2422 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 2423 2424 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\ 2425 qlnx_media_status); 2426 2427 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) { 2428 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL); 2429 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL); 2430 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL); 2431 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2432 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) { 2433 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL); 2434 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL); 2435 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) { 2436 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL); 2437 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL); 2438 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) { 2439 ifmedia_add(&ha->media, 2440 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL); 2441 ifmedia_add(&ha->media, 2442 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL); 2443 ifmedia_add(&ha->media, 2444 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL); 2445 } 2446 2447 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL); 2448 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 2449 2450 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 2451 2452 QL_DPRINT2(ha, "exit\n"); 2453 2454 return; 2455 } 2456 2457 static void 2458 qlnx_init_locked(qlnx_host_t *ha) 2459 { 2460 struct ifnet *ifp = ha->ifp; 2461 2462 QL_DPRINT1(ha, "Driver Initialization start \n"); 2463 2464 qlnx_stop(ha); 2465 2466 if (qlnx_load(ha) == 0) { 2467 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2468 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2469 2470 #ifdef QLNX_ENABLE_IWARP 2471 if (qlnx_vf_device(ha) != 0) { 2472 qlnx_rdma_dev_open(ha); 2473 } 2474 #endif /* #ifdef QLNX_ENABLE_IWARP */ 2475 } 2476 2477 return; 2478 } 2479 2480 static void 2481 qlnx_init(void *arg) 2482 { 2483 qlnx_host_t *ha; 2484 2485 ha = (qlnx_host_t *)arg; 2486 2487 QL_DPRINT2(ha, "enter\n"); 2488 2489 QLNX_LOCK(ha); 2490 qlnx_init_locked(ha); 2491 QLNX_UNLOCK(ha); 2492 2493 QL_DPRINT2(ha, "exit\n"); 2494 2495 return; 2496 } 2497 2498 static int 2499 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac) 2500 { 2501 struct ecore_filter_mcast *mcast; 2502 struct ecore_dev *cdev; 2503 int rc; 2504 2505 cdev = &ha->cdev; 2506 2507 mcast = &ha->ecore_mcast; 2508 bzero(mcast, sizeof(struct ecore_filter_mcast)); 2509 2510 if (add_mac) 2511 mcast->opcode = ECORE_FILTER_ADD; 2512 else 2513 mcast->opcode = ECORE_FILTER_REMOVE; 2514 2515 mcast->num_mc_addrs = 1; 2516 memcpy(mcast->mac, mac_addr, ETH_ALEN); 2517 2518 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 2519 2520 return (rc); 2521 } 2522 2523 static int 2524 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta) 2525 { 2526 int i; 2527 2528 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2529 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) 2530 return 0; /* its been already added */ 2531 } 2532 2533 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2534 if ((ha->mcast[i].addr[0] == 0) && 2535 (ha->mcast[i].addr[1] == 0) && 2536 (ha->mcast[i].addr[2] == 0) && 2537 (ha->mcast[i].addr[3] == 0) && 2538 (ha->mcast[i].addr[4] == 0) && 2539 (ha->mcast[i].addr[5] == 0)) { 2540 if (qlnx_config_mcast_mac_addr(ha, mta, 1)) 2541 return (-1); 2542 2543 bcopy(mta, ha->mcast[i].addr, ETH_ALEN); 2544 ha->nmcast++; 2545 2546 return 0; 2547 } 2548 } 2549 return 0; 2550 } 2551 2552 static int 2553 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta) 2554 { 2555 int i; 2556 2557 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2558 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { 2559 if (qlnx_config_mcast_mac_addr(ha, mta, 0)) 2560 return (-1); 2561 2562 ha->mcast[i].addr[0] = 0; 2563 ha->mcast[i].addr[1] = 0; 2564 ha->mcast[i].addr[2] = 0; 2565 ha->mcast[i].addr[3] = 0; 2566 ha->mcast[i].addr[4] = 0; 2567 ha->mcast[i].addr[5] = 0; 2568 2569 ha->nmcast--; 2570 2571 return 0; 2572 } 2573 } 2574 return 0; 2575 } 2576 2577 /* 2578 * Name: qls_hw_set_multi 2579 * Function: Sets the Multicast Addresses provided the host O.S into the 2580 * hardware (for the given interface) 2581 */ 2582 static void 2583 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 2584 uint32_t add_mac) 2585 { 2586 int i; 2587 2588 for (i = 0; i < mcnt; i++) { 2589 if (add_mac) { 2590 if (qlnx_hw_add_mcast(ha, mta)) 2591 break; 2592 } else { 2593 if (qlnx_hw_del_mcast(ha, mta)) 2594 break; 2595 } 2596 2597 mta += ETHER_HDR_LEN; 2598 } 2599 return; 2600 } 2601 2602 static u_int 2603 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) 2604 { 2605 uint8_t *mta = arg; 2606 2607 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS) 2608 return (0); 2609 2610 bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN); 2611 2612 return (1); 2613 } 2614 2615 static int 2616 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi) 2617 { 2618 uint8_t mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN]; 2619 struct ifnet *ifp = ha->ifp; 2620 u_int mcnt; 2621 2622 if (qlnx_vf_device(ha) == 0) 2623 return (0); 2624 2625 mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta); 2626 2627 QLNX_LOCK(ha); 2628 qlnx_hw_set_multi(ha, mta, mcnt, add_multi); 2629 QLNX_UNLOCK(ha); 2630 2631 return (0); 2632 } 2633 2634 static int 2635 qlnx_set_promisc(qlnx_host_t *ha) 2636 { 2637 int rc = 0; 2638 uint8_t filter; 2639 2640 if (qlnx_vf_device(ha) == 0) 2641 return (0); 2642 2643 filter = ha->filter; 2644 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2645 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 2646 2647 rc = qlnx_set_rx_accept_filter(ha, filter); 2648 return (rc); 2649 } 2650 2651 static int 2652 qlnx_set_allmulti(qlnx_host_t *ha) 2653 { 2654 int rc = 0; 2655 uint8_t filter; 2656 2657 if (qlnx_vf_device(ha) == 0) 2658 return (0); 2659 2660 filter = ha->filter; 2661 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2662 rc = qlnx_set_rx_accept_filter(ha, filter); 2663 2664 return (rc); 2665 } 2666 2667 static int 2668 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2669 { 2670 int ret = 0, mask; 2671 struct ifreq *ifr = (struct ifreq *)data; 2672 struct ifaddr *ifa = (struct ifaddr *)data; 2673 qlnx_host_t *ha; 2674 2675 ha = (qlnx_host_t *)ifp->if_softc; 2676 2677 switch (cmd) { 2678 case SIOCSIFADDR: 2679 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd); 2680 2681 if (ifa->ifa_addr->sa_family == AF_INET) { 2682 ifp->if_flags |= IFF_UP; 2683 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2684 QLNX_LOCK(ha); 2685 qlnx_init_locked(ha); 2686 QLNX_UNLOCK(ha); 2687 } 2688 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 2689 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)); 2690 2691 arp_ifinit(ifp, ifa); 2692 } else { 2693 ether_ioctl(ifp, cmd, data); 2694 } 2695 break; 2696 2697 case SIOCSIFMTU: 2698 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd); 2699 2700 if (ifr->ifr_mtu > QLNX_MAX_MTU) { 2701 ret = EINVAL; 2702 } else { 2703 QLNX_LOCK(ha); 2704 ifp->if_mtu = ifr->ifr_mtu; 2705 ha->max_frame_size = 2706 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2707 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2708 qlnx_init_locked(ha); 2709 } 2710 2711 QLNX_UNLOCK(ha); 2712 } 2713 2714 break; 2715 2716 case SIOCSIFFLAGS: 2717 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd); 2718 2719 QLNX_LOCK(ha); 2720 2721 if (ifp->if_flags & IFF_UP) { 2722 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2723 if ((ifp->if_flags ^ ha->if_flags) & 2724 IFF_PROMISC) { 2725 ret = qlnx_set_promisc(ha); 2726 } else if ((ifp->if_flags ^ ha->if_flags) & 2727 IFF_ALLMULTI) { 2728 ret = qlnx_set_allmulti(ha); 2729 } 2730 } else { 2731 ha->max_frame_size = ifp->if_mtu + 2732 ETHER_HDR_LEN + ETHER_CRC_LEN; 2733 qlnx_init_locked(ha); 2734 } 2735 } else { 2736 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2737 qlnx_stop(ha); 2738 ha->if_flags = ifp->if_flags; 2739 } 2740 2741 QLNX_UNLOCK(ha); 2742 break; 2743 2744 case SIOCADDMULTI: 2745 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd); 2746 2747 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2748 if (qlnx_set_multi(ha, 1)) 2749 ret = EINVAL; 2750 } 2751 break; 2752 2753 case SIOCDELMULTI: 2754 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd); 2755 2756 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2757 if (qlnx_set_multi(ha, 0)) 2758 ret = EINVAL; 2759 } 2760 break; 2761 2762 case SIOCSIFMEDIA: 2763 case SIOCGIFMEDIA: 2764 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd); 2765 2766 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 2767 break; 2768 2769 case SIOCSIFCAP: 2770 2771 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2772 2773 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd); 2774 2775 if (mask & IFCAP_HWCSUM) 2776 ifp->if_capenable ^= IFCAP_HWCSUM; 2777 if (mask & IFCAP_TSO4) 2778 ifp->if_capenable ^= IFCAP_TSO4; 2779 if (mask & IFCAP_TSO6) 2780 ifp->if_capenable ^= IFCAP_TSO6; 2781 if (mask & IFCAP_VLAN_HWTAGGING) 2782 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2783 if (mask & IFCAP_VLAN_HWTSO) 2784 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2785 if (mask & IFCAP_LRO) 2786 ifp->if_capenable ^= IFCAP_LRO; 2787 2788 QLNX_LOCK(ha); 2789 2790 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2791 qlnx_init_locked(ha); 2792 2793 QLNX_UNLOCK(ha); 2794 2795 VLAN_CAPABILITIES(ifp); 2796 break; 2797 2798 #if (__FreeBSD_version >= 1100101) 2799 2800 case SIOCGI2C: 2801 { 2802 struct ifi2creq i2c; 2803 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 2804 struct ecore_ptt *p_ptt; 2805 2806 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 2807 2808 if (ret) 2809 break; 2810 2811 if ((i2c.len > sizeof (i2c.data)) || 2812 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) { 2813 ret = EINVAL; 2814 break; 2815 } 2816 2817 p_ptt = ecore_ptt_acquire(p_hwfn); 2818 2819 if (!p_ptt) { 2820 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 2821 ret = -1; 2822 break; 2823 } 2824 2825 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt, 2826 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset, 2827 i2c.len, &i2c.data[0]); 2828 2829 ecore_ptt_release(p_hwfn, p_ptt); 2830 2831 if (ret) { 2832 ret = -1; 2833 break; 2834 } 2835 2836 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 2837 2838 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \ 2839 len = %d addr = 0x%02x offset = 0x%04x \ 2840 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \ 2841 0x%02x 0x%02x 0x%02x\n", 2842 ret, i2c.len, i2c.dev_addr, i2c.offset, 2843 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3], 2844 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]); 2845 break; 2846 } 2847 #endif /* #if (__FreeBSD_version >= 1100101) */ 2848 2849 default: 2850 QL_DPRINT4(ha, "default (0x%lx)\n", cmd); 2851 ret = ether_ioctl(ifp, cmd, data); 2852 break; 2853 } 2854 2855 return (ret); 2856 } 2857 2858 static int 2859 qlnx_media_change(struct ifnet *ifp) 2860 { 2861 qlnx_host_t *ha; 2862 struct ifmedia *ifm; 2863 int ret = 0; 2864 2865 ha = (qlnx_host_t *)ifp->if_softc; 2866 2867 QL_DPRINT2(ha, "enter\n"); 2868 2869 ifm = &ha->media; 2870 2871 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2872 ret = EINVAL; 2873 2874 QL_DPRINT2(ha, "exit\n"); 2875 2876 return (ret); 2877 } 2878 2879 static void 2880 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2881 { 2882 qlnx_host_t *ha; 2883 2884 ha = (qlnx_host_t *)ifp->if_softc; 2885 2886 QL_DPRINT2(ha, "enter\n"); 2887 2888 ifmr->ifm_status = IFM_AVALID; 2889 ifmr->ifm_active = IFM_ETHER; 2890 2891 if (ha->link_up) { 2892 ifmr->ifm_status |= IFM_ACTIVE; 2893 ifmr->ifm_active |= 2894 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link)); 2895 2896 if (ha->if_link.link_partner_caps & 2897 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause)) 2898 ifmr->ifm_active |= 2899 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 2900 } 2901 2902 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down")); 2903 2904 return; 2905 } 2906 2907 static void 2908 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2909 struct qlnx_tx_queue *txq) 2910 { 2911 u16 idx; 2912 struct mbuf *mp; 2913 bus_dmamap_t map; 2914 int i; 2915 struct eth_tx_bd *tx_data_bd; 2916 struct eth_tx_1st_bd *first_bd; 2917 int nbds = 0; 2918 2919 idx = txq->sw_tx_cons; 2920 mp = txq->sw_tx_ring[idx].mp; 2921 map = txq->sw_tx_ring[idx].map; 2922 2923 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){ 2924 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL); 2925 2926 QL_DPRINT1(ha, "(mp == NULL) " 2927 " tx_idx = 0x%x" 2928 " ecore_prod_idx = 0x%x" 2929 " ecore_cons_idx = 0x%x" 2930 " hw_bd_cons = 0x%x" 2931 " txq_db_last = 0x%x" 2932 " elem_left = 0x%x\n", 2933 fp->rss_id, 2934 ecore_chain_get_prod_idx(&txq->tx_pbl), 2935 ecore_chain_get_cons_idx(&txq->tx_pbl), 2936 le16toh(*txq->hw_cons_ptr), 2937 txq->tx_db.raw, 2938 ecore_chain_get_elem_left(&txq->tx_pbl)); 2939 2940 fp->err_tx_free_pkt_null++; 2941 2942 //DEBUG 2943 qlnx_trigger_dump(ha); 2944 2945 return; 2946 } else { 2947 QLNX_INC_OPACKETS((ha->ifp)); 2948 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len)); 2949 2950 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE); 2951 bus_dmamap_unload(ha->tx_tag, map); 2952 2953 fp->tx_pkts_freed++; 2954 fp->tx_pkts_completed++; 2955 2956 m_freem(mp); 2957 } 2958 2959 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl); 2960 nbds = first_bd->data.nbds; 2961 2962 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0); 2963 2964 for (i = 1; i < nbds; i++) { 2965 tx_data_bd = ecore_chain_consume(&txq->tx_pbl); 2966 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0); 2967 } 2968 txq->sw_tx_ring[idx].flags = 0; 2969 txq->sw_tx_ring[idx].mp = NULL; 2970 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0; 2971 2972 return; 2973 } 2974 2975 static void 2976 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2977 struct qlnx_tx_queue *txq) 2978 { 2979 u16 hw_bd_cons; 2980 u16 ecore_cons_idx; 2981 uint16_t diff; 2982 uint16_t idx, idx2; 2983 2984 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 2985 2986 while (hw_bd_cons != 2987 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 2988 if (hw_bd_cons < ecore_cons_idx) { 2989 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 2990 } else { 2991 diff = hw_bd_cons - ecore_cons_idx; 2992 } 2993 if ((diff > TX_RING_SIZE) || 2994 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){ 2995 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF); 2996 2997 QL_DPRINT1(ha, "(diff = 0x%x) " 2998 " tx_idx = 0x%x" 2999 " ecore_prod_idx = 0x%x" 3000 " ecore_cons_idx = 0x%x" 3001 " hw_bd_cons = 0x%x" 3002 " txq_db_last = 0x%x" 3003 " elem_left = 0x%x\n", 3004 diff, 3005 fp->rss_id, 3006 ecore_chain_get_prod_idx(&txq->tx_pbl), 3007 ecore_chain_get_cons_idx(&txq->tx_pbl), 3008 le16toh(*txq->hw_cons_ptr), 3009 txq->tx_db.raw, 3010 ecore_chain_get_elem_left(&txq->tx_pbl)); 3011 3012 fp->err_tx_cons_idx_conflict++; 3013 3014 //DEBUG 3015 qlnx_trigger_dump(ha); 3016 } 3017 3018 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 3019 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1); 3020 prefetch(txq->sw_tx_ring[idx].mp); 3021 prefetch(txq->sw_tx_ring[idx2].mp); 3022 3023 qlnx_free_tx_pkt(ha, fp, txq); 3024 3025 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 3026 } 3027 return; 3028 } 3029 3030 static int 3031 qlnx_transmit_locked(struct ifnet *ifp,struct qlnx_fastpath *fp, struct mbuf *mp) 3032 { 3033 int ret = 0; 3034 struct qlnx_tx_queue *txq; 3035 qlnx_host_t * ha; 3036 uint16_t elem_left; 3037 3038 txq = fp->txq[0]; 3039 ha = (qlnx_host_t *)fp->edev; 3040 3041 if ((!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || (!ha->link_up)) { 3042 if(mp != NULL) 3043 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3044 return (ret); 3045 } 3046 3047 if(mp != NULL) 3048 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3049 3050 mp = drbr_peek(ifp, fp->tx_br); 3051 3052 while (mp != NULL) { 3053 if (qlnx_send(ha, fp, &mp)) { 3054 if (mp != NULL) { 3055 drbr_putback(ifp, fp->tx_br, mp); 3056 } else { 3057 fp->tx_pkts_processed++; 3058 drbr_advance(ifp, fp->tx_br); 3059 } 3060 goto qlnx_transmit_locked_exit; 3061 3062 } else { 3063 drbr_advance(ifp, fp->tx_br); 3064 fp->tx_pkts_transmitted++; 3065 fp->tx_pkts_processed++; 3066 } 3067 3068 mp = drbr_peek(ifp, fp->tx_br); 3069 } 3070 3071 qlnx_transmit_locked_exit: 3072 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) || 3073 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) 3074 < QLNX_TX_ELEM_MAX_THRESH)) 3075 (void)qlnx_tx_int(ha, fp, fp->txq[0]); 3076 3077 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret); 3078 return ret; 3079 } 3080 3081 static int 3082 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp) 3083 { 3084 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc; 3085 struct qlnx_fastpath *fp; 3086 int rss_id = 0, ret = 0; 3087 3088 #ifdef QLNX_TRACEPERF_DATA 3089 uint64_t tx_pkts = 0, tx_compl = 0; 3090 #endif 3091 3092 QL_DPRINT2(ha, "enter\n"); 3093 3094 #if __FreeBSD_version >= 1100000 3095 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 3096 #else 3097 if (mp->m_flags & M_FLOWID) 3098 #endif 3099 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) % 3100 ha->num_rss; 3101 3102 fp = &ha->fp_array[rss_id]; 3103 3104 if (fp->tx_br == NULL) { 3105 ret = EINVAL; 3106 goto qlnx_transmit_exit; 3107 } 3108 3109 if (mtx_trylock(&fp->tx_mtx)) { 3110 #ifdef QLNX_TRACEPERF_DATA 3111 tx_pkts = fp->tx_pkts_transmitted; 3112 tx_compl = fp->tx_pkts_completed; 3113 #endif 3114 3115 ret = qlnx_transmit_locked(ifp, fp, mp); 3116 3117 #ifdef QLNX_TRACEPERF_DATA 3118 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts); 3119 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl); 3120 #endif 3121 mtx_unlock(&fp->tx_mtx); 3122 } else { 3123 if (mp != NULL && (fp->fp_taskqueue != NULL)) { 3124 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3125 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 3126 } 3127 } 3128 3129 qlnx_transmit_exit: 3130 3131 QL_DPRINT2(ha, "exit ret = %d\n", ret); 3132 return ret; 3133 } 3134 3135 static void 3136 qlnx_qflush(struct ifnet *ifp) 3137 { 3138 int rss_id; 3139 struct qlnx_fastpath *fp; 3140 struct mbuf *mp; 3141 qlnx_host_t *ha; 3142 3143 ha = (qlnx_host_t *)ifp->if_softc; 3144 3145 QL_DPRINT2(ha, "enter\n"); 3146 3147 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 3148 fp = &ha->fp_array[rss_id]; 3149 3150 if (fp == NULL) 3151 continue; 3152 3153 if (fp->tx_br) { 3154 mtx_lock(&fp->tx_mtx); 3155 3156 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 3157 fp->tx_pkts_freed++; 3158 m_freem(mp); 3159 } 3160 mtx_unlock(&fp->tx_mtx); 3161 } 3162 } 3163 QL_DPRINT2(ha, "exit\n"); 3164 3165 return; 3166 } 3167 3168 static void 3169 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value) 3170 { 3171 struct ecore_dev *cdev; 3172 uint32_t offset; 3173 3174 cdev = &ha->cdev; 3175 3176 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells); 3177 3178 bus_write_4(ha->pci_dbells, offset, value); 3179 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ); 3180 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ); 3181 3182 return; 3183 } 3184 3185 static uint32_t 3186 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp) 3187 { 3188 struct ether_vlan_header *eh = NULL; 3189 struct ip *ip = NULL; 3190 struct ip6_hdr *ip6 = NULL; 3191 struct tcphdr *th = NULL; 3192 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0; 3193 uint16_t etype = 0; 3194 device_t dev; 3195 uint8_t buf[sizeof(struct ip6_hdr)]; 3196 3197 dev = ha->pci_dev; 3198 3199 eh = mtod(mp, struct ether_vlan_header *); 3200 3201 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 3202 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3203 etype = ntohs(eh->evl_proto); 3204 } else { 3205 ehdrlen = ETHER_HDR_LEN; 3206 etype = ntohs(eh->evl_encap_proto); 3207 } 3208 3209 switch (etype) { 3210 case ETHERTYPE_IP: 3211 ip = (struct ip *)(mp->m_data + ehdrlen); 3212 3213 ip_hlen = sizeof (struct ip); 3214 3215 if (mp->m_len < (ehdrlen + ip_hlen)) { 3216 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 3217 ip = (struct ip *)buf; 3218 } 3219 3220 th = (struct tcphdr *)(ip + 1); 3221 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3222 break; 3223 3224 case ETHERTYPE_IPV6: 3225 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 3226 3227 ip_hlen = sizeof(struct ip6_hdr); 3228 3229 if (mp->m_len < (ehdrlen + ip_hlen)) { 3230 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 3231 buf); 3232 ip6 = (struct ip6_hdr *)buf; 3233 } 3234 th = (struct tcphdr *)(ip6 + 1); 3235 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3236 break; 3237 3238 default: 3239 break; 3240 } 3241 3242 return (offset); 3243 } 3244 3245 static __inline int 3246 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs, 3247 uint32_t offset) 3248 { 3249 int i; 3250 uint32_t sum, nbds_in_hdr = 1; 3251 uint32_t window; 3252 bus_dma_segment_t *s_seg; 3253 3254 /* If the header spans mulitple segments, skip those segments */ 3255 3256 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM) 3257 return (0); 3258 3259 i = 0; 3260 3261 while ((i < nsegs) && (offset >= segs->ds_len)) { 3262 offset = offset - segs->ds_len; 3263 segs++; 3264 i++; 3265 nbds_in_hdr++; 3266 } 3267 3268 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr; 3269 3270 nsegs = nsegs - i; 3271 3272 while (nsegs >= window) { 3273 sum = 0; 3274 s_seg = segs; 3275 3276 for (i = 0; i < window; i++){ 3277 sum += s_seg->ds_len; 3278 s_seg++; 3279 } 3280 3281 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) { 3282 fp->tx_lso_wnd_min_len++; 3283 return (-1); 3284 } 3285 3286 nsegs = nsegs - 1; 3287 segs++; 3288 } 3289 3290 return (0); 3291 } 3292 3293 static int 3294 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp) 3295 { 3296 bus_dma_segment_t *segs; 3297 bus_dmamap_t map = 0; 3298 uint32_t nsegs = 0; 3299 int ret = -1; 3300 struct mbuf *m_head = *m_headp; 3301 uint16_t idx = 0; 3302 uint16_t elem_left; 3303 3304 uint8_t nbd = 0; 3305 struct qlnx_tx_queue *txq; 3306 3307 struct eth_tx_1st_bd *first_bd; 3308 struct eth_tx_2nd_bd *second_bd; 3309 struct eth_tx_3rd_bd *third_bd; 3310 struct eth_tx_bd *tx_data_bd; 3311 3312 int seg_idx = 0; 3313 uint32_t nbds_in_hdr = 0; 3314 uint32_t offset = 0; 3315 3316 #ifdef QLNX_TRACE_PERF_DATA 3317 uint16_t bd_used; 3318 #endif 3319 3320 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id); 3321 3322 if (!ha->link_up) 3323 return (-1); 3324 3325 first_bd = NULL; 3326 second_bd = NULL; 3327 third_bd = NULL; 3328 tx_data_bd = NULL; 3329 3330 txq = fp->txq[0]; 3331 3332 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) < 3333 QLNX_TX_ELEM_MIN_THRESH) { 3334 fp->tx_nsegs_gt_elem_left++; 3335 fp->err_tx_nsegs_gt_elem_left++; 3336 3337 return (ENOBUFS); 3338 } 3339 3340 idx = txq->sw_tx_prod; 3341 3342 map = txq->sw_tx_ring[idx].map; 3343 segs = txq->segs; 3344 3345 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 3346 BUS_DMA_NOWAIT); 3347 3348 if (ha->dbg_trace_tso_pkt_len) { 3349 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3350 if (!fp->tx_tso_min_pkt_len) { 3351 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3352 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3353 } else { 3354 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len) 3355 fp->tx_tso_min_pkt_len = 3356 m_head->m_pkthdr.len; 3357 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len) 3358 fp->tx_tso_max_pkt_len = 3359 m_head->m_pkthdr.len; 3360 } 3361 } 3362 } 3363 3364 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3365 offset = qlnx_tcp_offset(ha, m_head); 3366 3367 if ((ret == EFBIG) || 3368 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && ( 3369 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) || 3370 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) && 3371 qlnx_tso_check(fp, segs, nsegs, offset))))) { 3372 struct mbuf *m; 3373 3374 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len); 3375 3376 fp->tx_defrag++; 3377 3378 m = m_defrag(m_head, M_NOWAIT); 3379 if (m == NULL) { 3380 fp->err_tx_defrag++; 3381 fp->tx_pkts_freed++; 3382 m_freem(m_head); 3383 *m_headp = NULL; 3384 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret); 3385 return (ENOBUFS); 3386 } 3387 3388 m_head = m; 3389 *m_headp = m_head; 3390 3391 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 3392 segs, &nsegs, BUS_DMA_NOWAIT))) { 3393 fp->err_tx_defrag_dmamap_load++; 3394 3395 QL_DPRINT1(ha, 3396 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n", 3397 ret, m_head->m_pkthdr.len); 3398 3399 fp->tx_pkts_freed++; 3400 m_freem(m_head); 3401 *m_headp = NULL; 3402 3403 return (ret); 3404 } 3405 3406 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && 3407 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) { 3408 fp->err_tx_non_tso_max_seg++; 3409 3410 QL_DPRINT1(ha, 3411 "(%d) nsegs too many for non-TSO [%d, %d]\n", 3412 ret, nsegs, m_head->m_pkthdr.len); 3413 3414 fp->tx_pkts_freed++; 3415 m_freem(m_head); 3416 *m_headp = NULL; 3417 3418 return (ret); 3419 } 3420 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3421 offset = qlnx_tcp_offset(ha, m_head); 3422 3423 } else if (ret) { 3424 fp->err_tx_dmamap_load++; 3425 3426 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n", 3427 ret, m_head->m_pkthdr.len); 3428 fp->tx_pkts_freed++; 3429 m_freem(m_head); 3430 *m_headp = NULL; 3431 return (ret); 3432 } 3433 3434 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet")); 3435 3436 if (ha->dbg_trace_tso_pkt_len) { 3437 if (nsegs < QLNX_FP_MAX_SEGS) 3438 fp->tx_pkts[(nsegs - 1)]++; 3439 else 3440 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; 3441 } 3442 3443 #ifdef QLNX_TRACE_PERF_DATA 3444 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3445 if(m_head->m_pkthdr.len <= 2048) 3446 fp->tx_pkts_hist[0]++; 3447 else if((m_head->m_pkthdr.len > 2048) && 3448 (m_head->m_pkthdr.len <= 4096)) 3449 fp->tx_pkts_hist[1]++; 3450 else if((m_head->m_pkthdr.len > 4096) && 3451 (m_head->m_pkthdr.len <= 8192)) 3452 fp->tx_pkts_hist[2]++; 3453 else if((m_head->m_pkthdr.len > 8192) && 3454 (m_head->m_pkthdr.len <= 12288 )) 3455 fp->tx_pkts_hist[3]++; 3456 else if((m_head->m_pkthdr.len > 11288) && 3457 (m_head->m_pkthdr.len <= 16394)) 3458 fp->tx_pkts_hist[4]++; 3459 else if((m_head->m_pkthdr.len > 16384) && 3460 (m_head->m_pkthdr.len <= 20480)) 3461 fp->tx_pkts_hist[5]++; 3462 else if((m_head->m_pkthdr.len > 20480) && 3463 (m_head->m_pkthdr.len <= 24576)) 3464 fp->tx_pkts_hist[6]++; 3465 else if((m_head->m_pkthdr.len > 24576) && 3466 (m_head->m_pkthdr.len <= 28672)) 3467 fp->tx_pkts_hist[7]++; 3468 else if((m_head->m_pkthdr.len > 28762) && 3469 (m_head->m_pkthdr.len <= 32768)) 3470 fp->tx_pkts_hist[8]++; 3471 else if((m_head->m_pkthdr.len > 32768) && 3472 (m_head->m_pkthdr.len <= 36864)) 3473 fp->tx_pkts_hist[9]++; 3474 else if((m_head->m_pkthdr.len > 36864) && 3475 (m_head->m_pkthdr.len <= 40960)) 3476 fp->tx_pkts_hist[10]++; 3477 else if((m_head->m_pkthdr.len > 40960) && 3478 (m_head->m_pkthdr.len <= 45056)) 3479 fp->tx_pkts_hist[11]++; 3480 else if((m_head->m_pkthdr.len > 45056) && 3481 (m_head->m_pkthdr.len <= 49152)) 3482 fp->tx_pkts_hist[12]++; 3483 else if((m_head->m_pkthdr.len > 49512) && 3484 m_head->m_pkthdr.len <= 53248)) 3485 fp->tx_pkts_hist[13]++; 3486 else if((m_head->m_pkthdr.len > 53248) && 3487 (m_head->m_pkthdr.len <= 57344)) 3488 fp->tx_pkts_hist[14]++; 3489 else if((m_head->m_pkthdr.len > 53248) && 3490 (m_head->m_pkthdr.len <= 57344)) 3491 fp->tx_pkts_hist[15]++; 3492 else if((m_head->m_pkthdr.len > 57344) && 3493 (m_head->m_pkthdr.len <= 61440)) 3494 fp->tx_pkts_hist[16]++; 3495 else 3496 fp->tx_pkts_hist[17]++; 3497 } 3498 3499 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3500 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl); 3501 bd_used = TX_RING_SIZE - elem_left; 3502 3503 if(bd_used <= 100) 3504 fp->tx_pkts_q[0]++; 3505 else if((bd_used > 100) && (bd_used <= 500)) 3506 fp->tx_pkts_q[1]++; 3507 else if((bd_used > 500) && (bd_used <= 1000)) 3508 fp->tx_pkts_q[2]++; 3509 else if((bd_used > 1000) && (bd_used <= 2000)) 3510 fp->tx_pkts_q[3]++; 3511 else if((bd_used > 3000) && (bd_used <= 4000)) 3512 fp->tx_pkts_q[4]++; 3513 else if((bd_used > 4000) && (bd_used <= 5000)) 3514 fp->tx_pkts_q[5]++; 3515 else if((bd_used > 6000) && (bd_used <= 7000)) 3516 fp->tx_pkts_q[6]++; 3517 else if((bd_used > 7000) && (bd_used <= 8000)) 3518 fp->tx_pkts_q[7]++; 3519 else if((bd_used > 8000) && (bd_used <= 9000)) 3520 fp->tx_pkts_q[8]++; 3521 else if((bd_used > 9000) && (bd_used <= 10000)) 3522 fp->tx_pkts_q[9]++; 3523 else if((bd_used > 10000) && (bd_used <= 11000)) 3524 fp->tx_pkts_q[10]++; 3525 else if((bd_used > 11000) && (bd_used <= 12000)) 3526 fp->tx_pkts_q[11]++; 3527 else if((bd_used > 12000) && (bd_used <= 13000)) 3528 fp->tx_pkts_q[12]++; 3529 else if((bd_used > 13000) && (bd_used <= 14000)) 3530 fp->tx_pkts_q[13]++; 3531 else if((bd_used > 14000) && (bd_used <= 15000)) 3532 fp->tx_pkts_q[14]++; 3533 else if((bd_used > 15000) && (bd_used <= 16000)) 3534 fp->tx_pkts_q[15]++; 3535 else 3536 fp->tx_pkts_q[16]++; 3537 } 3538 3539 #endif /* end of QLNX_TRACE_PERF_DATA */ 3540 3541 if ((nsegs + QLNX_TX_ELEM_RESERVE) > 3542 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) { 3543 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs" 3544 " in chain[%d] trying to free packets\n", 3545 nsegs, elem_left, fp->rss_id); 3546 3547 fp->tx_nsegs_gt_elem_left++; 3548 3549 (void)qlnx_tx_int(ha, fp, txq); 3550 3551 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left = 3552 ecore_chain_get_elem_left(&txq->tx_pbl))) { 3553 QL_DPRINT1(ha, 3554 "(%d, 0x%x) insuffient BDs in chain[%d]\n", 3555 nsegs, elem_left, fp->rss_id); 3556 3557 fp->err_tx_nsegs_gt_elem_left++; 3558 fp->tx_ring_full = 1; 3559 if (ha->storm_stats_enable) 3560 ha->storm_stats_gather = 1; 3561 return (ENOBUFS); 3562 } 3563 } 3564 3565 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 3566 3567 txq->sw_tx_ring[idx].mp = m_head; 3568 3569 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); 3570 3571 memset(first_bd, 0, sizeof(*first_bd)); 3572 3573 first_bd->data.bd_flags.bitfields = 3574 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 3575 3576 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len); 3577 3578 nbd++; 3579 3580 if (m_head->m_pkthdr.csum_flags & CSUM_IP) { 3581 first_bd->data.bd_flags.bitfields |= 3582 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3583 } 3584 3585 if (m_head->m_pkthdr.csum_flags & 3586 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) { 3587 first_bd->data.bd_flags.bitfields |= 3588 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT); 3589 } 3590 3591 if (m_head->m_flags & M_VLANTAG) { 3592 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag; 3593 first_bd->data.bd_flags.bitfields |= 3594 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT); 3595 } 3596 3597 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3598 first_bd->data.bd_flags.bitfields |= 3599 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); 3600 first_bd->data.bd_flags.bitfields |= 3601 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3602 3603 nbds_in_hdr = 1; 3604 3605 if (offset == segs->ds_len) { 3606 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3607 segs++; 3608 seg_idx++; 3609 3610 second_bd = (struct eth_tx_2nd_bd *) 3611 ecore_chain_produce(&txq->tx_pbl); 3612 memset(second_bd, 0, sizeof(*second_bd)); 3613 nbd++; 3614 3615 if (seg_idx < nsegs) { 3616 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3617 (segs->ds_addr), (segs->ds_len)); 3618 segs++; 3619 seg_idx++; 3620 } 3621 3622 third_bd = (struct eth_tx_3rd_bd *) 3623 ecore_chain_produce(&txq->tx_pbl); 3624 memset(third_bd, 0, sizeof(*third_bd)); 3625 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3626 third_bd->data.bitfields |= 3627 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3628 nbd++; 3629 3630 if (seg_idx < nsegs) { 3631 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3632 (segs->ds_addr), (segs->ds_len)); 3633 segs++; 3634 seg_idx++; 3635 } 3636 3637 for (; seg_idx < nsegs; seg_idx++) { 3638 tx_data_bd = (struct eth_tx_bd *) 3639 ecore_chain_produce(&txq->tx_pbl); 3640 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3641 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3642 segs->ds_addr,\ 3643 segs->ds_len); 3644 segs++; 3645 nbd++; 3646 } 3647 3648 } else if (offset < segs->ds_len) { 3649 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3650 3651 second_bd = (struct eth_tx_2nd_bd *) 3652 ecore_chain_produce(&txq->tx_pbl); 3653 memset(second_bd, 0, sizeof(*second_bd)); 3654 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3655 (segs->ds_addr + offset),\ 3656 (segs->ds_len - offset)); 3657 nbd++; 3658 segs++; 3659 3660 third_bd = (struct eth_tx_3rd_bd *) 3661 ecore_chain_produce(&txq->tx_pbl); 3662 memset(third_bd, 0, sizeof(*third_bd)); 3663 3664 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3665 segs->ds_addr,\ 3666 segs->ds_len); 3667 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3668 third_bd->data.bitfields |= 3669 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3670 segs++; 3671 nbd++; 3672 3673 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) { 3674 tx_data_bd = (struct eth_tx_bd *) 3675 ecore_chain_produce(&txq->tx_pbl); 3676 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3677 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3678 segs->ds_addr,\ 3679 segs->ds_len); 3680 segs++; 3681 nbd++; 3682 } 3683 3684 } else { 3685 offset = offset - segs->ds_len; 3686 segs++; 3687 3688 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3689 if (offset) 3690 nbds_in_hdr++; 3691 3692 tx_data_bd = (struct eth_tx_bd *) 3693 ecore_chain_produce(&txq->tx_pbl); 3694 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3695 3696 if (second_bd == NULL) { 3697 second_bd = (struct eth_tx_2nd_bd *) 3698 tx_data_bd; 3699 } else if (third_bd == NULL) { 3700 third_bd = (struct eth_tx_3rd_bd *) 3701 tx_data_bd; 3702 } 3703 3704 if (offset && (offset < segs->ds_len)) { 3705 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3706 segs->ds_addr, offset); 3707 3708 tx_data_bd = (struct eth_tx_bd *) 3709 ecore_chain_produce(&txq->tx_pbl); 3710 3711 memset(tx_data_bd, 0, 3712 sizeof(*tx_data_bd)); 3713 3714 if (second_bd == NULL) { 3715 second_bd = 3716 (struct eth_tx_2nd_bd *)tx_data_bd; 3717 } else if (third_bd == NULL) { 3718 third_bd = 3719 (struct eth_tx_3rd_bd *)tx_data_bd; 3720 } 3721 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3722 (segs->ds_addr + offset), \ 3723 (segs->ds_len - offset)); 3724 nbd++; 3725 offset = 0; 3726 } else { 3727 if (offset) 3728 offset = offset - segs->ds_len; 3729 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3730 segs->ds_addr, segs->ds_len); 3731 } 3732 segs++; 3733 nbd++; 3734 } 3735 3736 if (third_bd == NULL) { 3737 third_bd = (struct eth_tx_3rd_bd *) 3738 ecore_chain_produce(&txq->tx_pbl); 3739 memset(third_bd, 0, sizeof(*third_bd)); 3740 } 3741 3742 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3743 third_bd->data.bitfields |= 3744 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3745 } 3746 fp->tx_tso_pkts++; 3747 } else { 3748 segs++; 3749 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3750 tx_data_bd = (struct eth_tx_bd *) 3751 ecore_chain_produce(&txq->tx_pbl); 3752 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3753 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\ 3754 segs->ds_len); 3755 segs++; 3756 nbd++; 3757 } 3758 first_bd->data.bitfields = 3759 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) 3760 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 3761 first_bd->data.bitfields = 3762 htole16(first_bd->data.bitfields); 3763 fp->tx_non_tso_pkts++; 3764 } 3765 3766 first_bd->data.nbds = nbd; 3767 3768 if (ha->dbg_trace_tso_pkt_len) { 3769 if (fp->tx_tso_max_nsegs < nsegs) 3770 fp->tx_tso_max_nsegs = nsegs; 3771 3772 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs)) 3773 fp->tx_tso_min_nsegs = nsegs; 3774 } 3775 3776 txq->sw_tx_ring[idx].nsegs = nsegs; 3777 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1); 3778 3779 txq->tx_db.data.bd_prod = 3780 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl)); 3781 3782 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw); 3783 3784 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id); 3785 return (0); 3786 } 3787 3788 static void 3789 qlnx_stop(qlnx_host_t *ha) 3790 { 3791 struct ifnet *ifp = ha->ifp; 3792 device_t dev; 3793 int i; 3794 3795 dev = ha->pci_dev; 3796 3797 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 3798 3799 /* 3800 * We simply lock and unlock each fp->tx_mtx to 3801 * propagate the if_drv_flags 3802 * state to each tx thread 3803 */ 3804 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state); 3805 3806 if (ha->state == QLNX_STATE_OPEN) { 3807 for (i = 0; i < ha->num_rss; i++) { 3808 struct qlnx_fastpath *fp = &ha->fp_array[i]; 3809 3810 mtx_lock(&fp->tx_mtx); 3811 mtx_unlock(&fp->tx_mtx); 3812 3813 if (fp->fp_taskqueue != NULL) 3814 taskqueue_enqueue(fp->fp_taskqueue, 3815 &fp->fp_task); 3816 } 3817 } 3818 #ifdef QLNX_ENABLE_IWARP 3819 if (qlnx_vf_device(ha) != 0) { 3820 qlnx_rdma_dev_close(ha); 3821 } 3822 #endif /* #ifdef QLNX_ENABLE_IWARP */ 3823 3824 qlnx_unload(ha); 3825 3826 return; 3827 } 3828 3829 static int 3830 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha) 3831 { 3832 return(TX_RING_SIZE - 1); 3833 } 3834 3835 uint8_t * 3836 qlnx_get_mac_addr(qlnx_host_t *ha) 3837 { 3838 struct ecore_hwfn *p_hwfn; 3839 unsigned char mac[ETHER_ADDR_LEN]; 3840 uint8_t p_is_forced; 3841 3842 p_hwfn = &ha->cdev.hwfns[0]; 3843 3844 if (qlnx_vf_device(ha) != 0) 3845 return (p_hwfn->hw_info.hw_mac_addr); 3846 3847 ecore_vf_read_bulletin(p_hwfn, &p_is_forced); 3848 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) == 3849 true) { 3850 device_printf(ha->pci_dev, "%s: p_is_forced = %d" 3851 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, 3852 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3853 memcpy(ha->primary_mac, mac, ETH_ALEN); 3854 } 3855 3856 return (ha->primary_mac); 3857 } 3858 3859 static uint32_t 3860 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link) 3861 { 3862 uint32_t ifm_type = 0; 3863 3864 switch (if_link->media_type) { 3865 case MEDIA_MODULE_FIBER: 3866 case MEDIA_UNSPECIFIED: 3867 if (if_link->speed == (100 * 1000)) 3868 ifm_type = QLNX_IFM_100G_SR4; 3869 else if (if_link->speed == (40 * 1000)) 3870 ifm_type = IFM_40G_SR4; 3871 else if (if_link->speed == (25 * 1000)) 3872 ifm_type = QLNX_IFM_25G_SR; 3873 else if (if_link->speed == (10 * 1000)) 3874 ifm_type = (IFM_10G_LR | IFM_10G_SR); 3875 else if (if_link->speed == (1 * 1000)) 3876 ifm_type = (IFM_1000_SX | IFM_1000_LX); 3877 3878 break; 3879 3880 case MEDIA_DA_TWINAX: 3881 if (if_link->speed == (100 * 1000)) 3882 ifm_type = QLNX_IFM_100G_CR4; 3883 else if (if_link->speed == (40 * 1000)) 3884 ifm_type = IFM_40G_CR4; 3885 else if (if_link->speed == (25 * 1000)) 3886 ifm_type = QLNX_IFM_25G_CR; 3887 else if (if_link->speed == (10 * 1000)) 3888 ifm_type = IFM_10G_TWINAX; 3889 3890 break; 3891 3892 default : 3893 ifm_type = IFM_UNKNOWN; 3894 break; 3895 } 3896 return (ifm_type); 3897 } 3898 3899 /***************************************************************************** 3900 * Interrupt Service Functions 3901 *****************************************************************************/ 3902 3903 static int 3904 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3905 struct mbuf *mp_head, uint16_t len) 3906 { 3907 struct mbuf *mp, *mpf, *mpl; 3908 struct sw_rx_data *sw_rx_data; 3909 struct qlnx_rx_queue *rxq; 3910 uint16_t len_in_buffer; 3911 3912 rxq = fp->rxq; 3913 mpf = mpl = mp = NULL; 3914 3915 while (len) { 3916 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3917 3918 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3919 mp = sw_rx_data->data; 3920 3921 if (mp == NULL) { 3922 QL_DPRINT1(ha, "mp = NULL\n"); 3923 fp->err_rx_mp_null++; 3924 rxq->sw_rx_cons = 3925 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3926 3927 if (mpf != NULL) 3928 m_freem(mpf); 3929 3930 return (-1); 3931 } 3932 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3933 BUS_DMASYNC_POSTREAD); 3934 3935 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3936 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 3937 " incoming packet and reusing its buffer\n"); 3938 3939 qlnx_reuse_rx_data(rxq); 3940 fp->err_rx_alloc_errors++; 3941 3942 if (mpf != NULL) 3943 m_freem(mpf); 3944 3945 return (-1); 3946 } 3947 ecore_chain_consume(&rxq->rx_bd_ring); 3948 3949 if (len > rxq->rx_buf_size) 3950 len_in_buffer = rxq->rx_buf_size; 3951 else 3952 len_in_buffer = len; 3953 3954 len = len - len_in_buffer; 3955 3956 mp->m_flags &= ~M_PKTHDR; 3957 mp->m_next = NULL; 3958 mp->m_len = len_in_buffer; 3959 3960 if (mpf == NULL) 3961 mpf = mpl = mp; 3962 else { 3963 mpl->m_next = mp; 3964 mpl = mp; 3965 } 3966 } 3967 3968 if (mpf != NULL) 3969 mp_head->m_next = mpf; 3970 3971 return (0); 3972 } 3973 3974 static void 3975 qlnx_tpa_start(qlnx_host_t *ha, 3976 struct qlnx_fastpath *fp, 3977 struct qlnx_rx_queue *rxq, 3978 struct eth_fast_path_rx_tpa_start_cqe *cqe) 3979 { 3980 uint32_t agg_index; 3981 struct ifnet *ifp = ha->ifp; 3982 struct mbuf *mp; 3983 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3984 struct sw_rx_data *sw_rx_data; 3985 dma_addr_t addr; 3986 bus_dmamap_t map; 3987 struct eth_rx_bd *rx_bd; 3988 int i; 3989 device_t dev; 3990 #if __FreeBSD_version >= 1100000 3991 uint8_t hash_type; 3992 #endif /* #if __FreeBSD_version >= 1100000 */ 3993 3994 dev = ha->pci_dev; 3995 agg_index = cqe->tpa_agg_index; 3996 3997 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \ 3998 \t type = 0x%x\n \ 3999 \t bitfields = 0x%x\n \ 4000 \t seg_len = 0x%x\n \ 4001 \t pars_flags = 0x%x\n \ 4002 \t vlan_tag = 0x%x\n \ 4003 \t rss_hash = 0x%x\n \ 4004 \t len_on_first_bd = 0x%x\n \ 4005 \t placement_offset = 0x%x\n \ 4006 \t tpa_agg_index = 0x%x\n \ 4007 \t header_len = 0x%x\n \ 4008 \t ext_bd_len_list[0] = 0x%x\n \ 4009 \t ext_bd_len_list[1] = 0x%x\n \ 4010 \t ext_bd_len_list[2] = 0x%x\n \ 4011 \t ext_bd_len_list[3] = 0x%x\n \ 4012 \t ext_bd_len_list[4] = 0x%x\n", 4013 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len, 4014 cqe->pars_flags.flags, cqe->vlan_tag, 4015 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset, 4016 cqe->tpa_agg_index, cqe->header_len, 4017 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1], 4018 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3], 4019 cqe->ext_bd_len_list[4]); 4020 4021 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4022 fp->err_rx_tpa_invalid_agg_num++; 4023 return; 4024 } 4025 4026 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4027 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); 4028 mp = sw_rx_data->data; 4029 4030 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp); 4031 4032 if (mp == NULL) { 4033 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id); 4034 fp->err_rx_mp_null++; 4035 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4036 4037 return; 4038 } 4039 4040 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) { 4041 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error," 4042 " flags = %x, dropping incoming packet\n", fp->rss_id, 4043 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags)); 4044 4045 fp->err_rx_hw_errors++; 4046 4047 qlnx_reuse_rx_data(rxq); 4048 4049 QLNX_INC_IERRORS(ifp); 4050 4051 return; 4052 } 4053 4054 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4055 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4056 " dropping incoming packet and reusing its buffer\n", 4057 fp->rss_id); 4058 4059 fp->err_rx_alloc_errors++; 4060 QLNX_INC_IQDROPS(ifp); 4061 4062 /* 4063 * Load the tpa mbuf into the rx ring and save the 4064 * posted mbuf 4065 */ 4066 4067 map = sw_rx_data->map; 4068 addr = sw_rx_data->dma_addr; 4069 4070 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 4071 4072 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data; 4073 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr; 4074 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map; 4075 4076 rxq->tpa_info[agg_index].rx_buf.data = mp; 4077 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr; 4078 rxq->tpa_info[agg_index].rx_buf.map = map; 4079 4080 rx_bd = (struct eth_rx_bd *) 4081 ecore_chain_produce(&rxq->rx_bd_ring); 4082 4083 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr)); 4084 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr)); 4085 4086 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4087 BUS_DMASYNC_PREREAD); 4088 4089 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 4090 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4091 4092 ecore_chain_consume(&rxq->rx_bd_ring); 4093 4094 /* Now reuse any buffers posted in ext_bd_len_list */ 4095 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4096 if (cqe->ext_bd_len_list[i] == 0) 4097 break; 4098 4099 qlnx_reuse_rx_data(rxq); 4100 } 4101 4102 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4103 return; 4104 } 4105 4106 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4107 QL_DPRINT7(ha, "[%d]: invalid aggregation state," 4108 " dropping incoming packet and reusing its buffer\n", 4109 fp->rss_id); 4110 4111 QLNX_INC_IQDROPS(ifp); 4112 4113 /* if we already have mbuf head in aggregation free it */ 4114 if (rxq->tpa_info[agg_index].mpf) { 4115 m_freem(rxq->tpa_info[agg_index].mpf); 4116 rxq->tpa_info[agg_index].mpl = NULL; 4117 } 4118 rxq->tpa_info[agg_index].mpf = mp; 4119 rxq->tpa_info[agg_index].mpl = NULL; 4120 4121 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4122 ecore_chain_consume(&rxq->rx_bd_ring); 4123 4124 /* Now reuse any buffers posted in ext_bd_len_list */ 4125 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4126 if (cqe->ext_bd_len_list[i] == 0) 4127 break; 4128 4129 qlnx_reuse_rx_data(rxq); 4130 } 4131 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4132 4133 return; 4134 } 4135 4136 /* 4137 * first process the ext_bd_len_list 4138 * if this fails then we simply drop the packet 4139 */ 4140 ecore_chain_consume(&rxq->rx_bd_ring); 4141 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4142 4143 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4144 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id); 4145 4146 if (cqe->ext_bd_len_list[i] == 0) 4147 break; 4148 4149 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4150 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4151 BUS_DMASYNC_POSTREAD); 4152 4153 mpc = sw_rx_data->data; 4154 4155 if (mpc == NULL) { 4156 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4157 fp->err_rx_mp_null++; 4158 if (mpf != NULL) 4159 m_freem(mpf); 4160 mpf = mpl = NULL; 4161 rxq->tpa_info[agg_index].agg_state = 4162 QLNX_AGG_STATE_ERROR; 4163 ecore_chain_consume(&rxq->rx_bd_ring); 4164 rxq->sw_rx_cons = 4165 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4166 continue; 4167 } 4168 4169 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4170 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4171 " dropping incoming packet and reusing its" 4172 " buffer\n", fp->rss_id); 4173 4174 qlnx_reuse_rx_data(rxq); 4175 4176 if (mpf != NULL) 4177 m_freem(mpf); 4178 mpf = mpl = NULL; 4179 4180 rxq->tpa_info[agg_index].agg_state = 4181 QLNX_AGG_STATE_ERROR; 4182 4183 ecore_chain_consume(&rxq->rx_bd_ring); 4184 rxq->sw_rx_cons = 4185 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4186 4187 continue; 4188 } 4189 4190 mpc->m_flags &= ~M_PKTHDR; 4191 mpc->m_next = NULL; 4192 mpc->m_len = cqe->ext_bd_len_list[i]; 4193 4194 if (mpf == NULL) { 4195 mpf = mpl = mpc; 4196 } else { 4197 mpl->m_len = ha->rx_buf_size; 4198 mpl->m_next = mpc; 4199 mpl = mpc; 4200 } 4201 4202 ecore_chain_consume(&rxq->rx_bd_ring); 4203 rxq->sw_rx_cons = 4204 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4205 } 4206 4207 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4208 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping" 4209 " incoming packet and reusing its buffer\n", 4210 fp->rss_id); 4211 4212 QLNX_INC_IQDROPS(ifp); 4213 4214 rxq->tpa_info[agg_index].mpf = mp; 4215 rxq->tpa_info[agg_index].mpl = NULL; 4216 4217 return; 4218 } 4219 4220 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset; 4221 4222 if (mpf != NULL) { 4223 mp->m_len = ha->rx_buf_size; 4224 mp->m_next = mpf; 4225 rxq->tpa_info[agg_index].mpf = mp; 4226 rxq->tpa_info[agg_index].mpl = mpl; 4227 } else { 4228 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset; 4229 rxq->tpa_info[agg_index].mpf = mp; 4230 rxq->tpa_info[agg_index].mpl = mp; 4231 mp->m_next = NULL; 4232 } 4233 4234 mp->m_flags |= M_PKTHDR; 4235 4236 /* assign packet to this interface interface */ 4237 mp->m_pkthdr.rcvif = ifp; 4238 4239 /* assume no hardware checksum has complated */ 4240 mp->m_pkthdr.csum_flags = 0; 4241 4242 //mp->m_pkthdr.flowid = fp->rss_id; 4243 mp->m_pkthdr.flowid = cqe->rss_hash; 4244 4245 #if __FreeBSD_version >= 1100000 4246 4247 hash_type = cqe->bitfields & 4248 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4249 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4250 4251 switch (hash_type) { 4252 case RSS_HASH_TYPE_IPV4: 4253 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4254 break; 4255 4256 case RSS_HASH_TYPE_TCP_IPV4: 4257 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4258 break; 4259 4260 case RSS_HASH_TYPE_IPV6: 4261 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4262 break; 4263 4264 case RSS_HASH_TYPE_TCP_IPV6: 4265 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4266 break; 4267 4268 default: 4269 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4270 break; 4271 } 4272 4273 #else 4274 mp->m_flags |= M_FLOWID; 4275 #endif 4276 4277 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | 4278 CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4279 4280 mp->m_pkthdr.csum_data = 0xFFFF; 4281 4282 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) { 4283 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag); 4284 mp->m_flags |= M_VLANTAG; 4285 } 4286 4287 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START; 4288 4289 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n", 4290 fp->rss_id, rxq->tpa_info[agg_index].agg_state, 4291 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl); 4292 4293 return; 4294 } 4295 4296 static void 4297 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4298 struct qlnx_rx_queue *rxq, 4299 struct eth_fast_path_rx_tpa_cont_cqe *cqe) 4300 { 4301 struct sw_rx_data *sw_rx_data; 4302 int i; 4303 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4304 struct mbuf *mp; 4305 uint32_t agg_index; 4306 device_t dev; 4307 4308 dev = ha->pci_dev; 4309 4310 QL_DPRINT7(ha, "[%d]: enter\n \ 4311 \t type = 0x%x\n \ 4312 \t tpa_agg_index = 0x%x\n \ 4313 \t len_list[0] = 0x%x\n \ 4314 \t len_list[1] = 0x%x\n \ 4315 \t len_list[2] = 0x%x\n \ 4316 \t len_list[3] = 0x%x\n \ 4317 \t len_list[4] = 0x%x\n \ 4318 \t len_list[5] = 0x%x\n", 4319 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4320 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4321 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]); 4322 4323 agg_index = cqe->tpa_agg_index; 4324 4325 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4326 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4327 fp->err_rx_tpa_invalid_agg_num++; 4328 return; 4329 } 4330 4331 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) { 4332 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4333 4334 if (cqe->len_list[i] == 0) 4335 break; 4336 4337 if (rxq->tpa_info[agg_index].agg_state != 4338 QLNX_AGG_STATE_START) { 4339 qlnx_reuse_rx_data(rxq); 4340 continue; 4341 } 4342 4343 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4344 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4345 BUS_DMASYNC_POSTREAD); 4346 4347 mpc = sw_rx_data->data; 4348 4349 if (mpc == NULL) { 4350 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4351 4352 fp->err_rx_mp_null++; 4353 if (mpf != NULL) 4354 m_freem(mpf); 4355 mpf = mpl = NULL; 4356 rxq->tpa_info[agg_index].agg_state = 4357 QLNX_AGG_STATE_ERROR; 4358 ecore_chain_consume(&rxq->rx_bd_ring); 4359 rxq->sw_rx_cons = 4360 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4361 continue; 4362 } 4363 4364 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4365 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4366 " dropping incoming packet and reusing its" 4367 " buffer\n", fp->rss_id); 4368 4369 qlnx_reuse_rx_data(rxq); 4370 4371 if (mpf != NULL) 4372 m_freem(mpf); 4373 mpf = mpl = NULL; 4374 4375 rxq->tpa_info[agg_index].agg_state = 4376 QLNX_AGG_STATE_ERROR; 4377 4378 ecore_chain_consume(&rxq->rx_bd_ring); 4379 rxq->sw_rx_cons = 4380 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4381 4382 continue; 4383 } 4384 4385 mpc->m_flags &= ~M_PKTHDR; 4386 mpc->m_next = NULL; 4387 mpc->m_len = cqe->len_list[i]; 4388 4389 if (mpf == NULL) { 4390 mpf = mpl = mpc; 4391 } else { 4392 mpl->m_len = ha->rx_buf_size; 4393 mpl->m_next = mpc; 4394 mpl = mpc; 4395 } 4396 4397 ecore_chain_consume(&rxq->rx_bd_ring); 4398 rxq->sw_rx_cons = 4399 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4400 } 4401 4402 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n", 4403 fp->rss_id, mpf, mpl); 4404 4405 if (mpf != NULL) { 4406 mp = rxq->tpa_info[agg_index].mpl; 4407 mp->m_len = ha->rx_buf_size; 4408 mp->m_next = mpf; 4409 rxq->tpa_info[agg_index].mpl = mpl; 4410 } 4411 4412 return; 4413 } 4414 4415 static int 4416 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4417 struct qlnx_rx_queue *rxq, 4418 struct eth_fast_path_rx_tpa_end_cqe *cqe) 4419 { 4420 struct sw_rx_data *sw_rx_data; 4421 int i; 4422 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4423 struct mbuf *mp; 4424 uint32_t agg_index; 4425 uint32_t len = 0; 4426 struct ifnet *ifp = ha->ifp; 4427 device_t dev; 4428 4429 dev = ha->pci_dev; 4430 4431 QL_DPRINT7(ha, "[%d]: enter\n \ 4432 \t type = 0x%x\n \ 4433 \t tpa_agg_index = 0x%x\n \ 4434 \t total_packet_len = 0x%x\n \ 4435 \t num_of_bds = 0x%x\n \ 4436 \t end_reason = 0x%x\n \ 4437 \t num_of_coalesced_segs = 0x%x\n \ 4438 \t ts_delta = 0x%x\n \ 4439 \t len_list[0] = 0x%x\n \ 4440 \t len_list[1] = 0x%x\n \ 4441 \t len_list[2] = 0x%x\n \ 4442 \t len_list[3] = 0x%x\n", 4443 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4444 cqe->total_packet_len, cqe->num_of_bds, 4445 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta, 4446 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4447 cqe->len_list[3]); 4448 4449 agg_index = cqe->tpa_agg_index; 4450 4451 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4452 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4453 4454 fp->err_rx_tpa_invalid_agg_num++; 4455 return (0); 4456 } 4457 4458 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) { 4459 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4460 4461 if (cqe->len_list[i] == 0) 4462 break; 4463 4464 if (rxq->tpa_info[agg_index].agg_state != 4465 QLNX_AGG_STATE_START) { 4466 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id); 4467 4468 qlnx_reuse_rx_data(rxq); 4469 continue; 4470 } 4471 4472 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4473 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4474 BUS_DMASYNC_POSTREAD); 4475 4476 mpc = sw_rx_data->data; 4477 4478 if (mpc == NULL) { 4479 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4480 4481 fp->err_rx_mp_null++; 4482 if (mpf != NULL) 4483 m_freem(mpf); 4484 mpf = mpl = NULL; 4485 rxq->tpa_info[agg_index].agg_state = 4486 QLNX_AGG_STATE_ERROR; 4487 ecore_chain_consume(&rxq->rx_bd_ring); 4488 rxq->sw_rx_cons = 4489 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4490 continue; 4491 } 4492 4493 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4494 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4495 " dropping incoming packet and reusing its" 4496 " buffer\n", fp->rss_id); 4497 4498 qlnx_reuse_rx_data(rxq); 4499 4500 if (mpf != NULL) 4501 m_freem(mpf); 4502 mpf = mpl = NULL; 4503 4504 rxq->tpa_info[agg_index].agg_state = 4505 QLNX_AGG_STATE_ERROR; 4506 4507 ecore_chain_consume(&rxq->rx_bd_ring); 4508 rxq->sw_rx_cons = 4509 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4510 4511 continue; 4512 } 4513 4514 mpc->m_flags &= ~M_PKTHDR; 4515 mpc->m_next = NULL; 4516 mpc->m_len = cqe->len_list[i]; 4517 4518 if (mpf == NULL) { 4519 mpf = mpl = mpc; 4520 } else { 4521 mpl->m_len = ha->rx_buf_size; 4522 mpl->m_next = mpc; 4523 mpl = mpc; 4524 } 4525 4526 ecore_chain_consume(&rxq->rx_bd_ring); 4527 rxq->sw_rx_cons = 4528 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4529 } 4530 4531 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id); 4532 4533 if (mpf != NULL) { 4534 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id); 4535 4536 mp = rxq->tpa_info[agg_index].mpl; 4537 mp->m_len = ha->rx_buf_size; 4538 mp->m_next = mpf; 4539 } 4540 4541 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { 4542 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id); 4543 4544 if (rxq->tpa_info[agg_index].mpf != NULL) 4545 m_freem(rxq->tpa_info[agg_index].mpf); 4546 rxq->tpa_info[agg_index].mpf = NULL; 4547 rxq->tpa_info[agg_index].mpl = NULL; 4548 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4549 return (0); 4550 } 4551 4552 mp = rxq->tpa_info[agg_index].mpf; 4553 m_adj(mp, rxq->tpa_info[agg_index].placement_offset); 4554 mp->m_pkthdr.len = cqe->total_packet_len; 4555 4556 if (mp->m_next == NULL) 4557 mp->m_len = mp->m_pkthdr.len; 4558 else { 4559 /* compute the total packet length */ 4560 mpf = mp; 4561 while (mpf != NULL) { 4562 len += mpf->m_len; 4563 mpf = mpf->m_next; 4564 } 4565 4566 if (cqe->total_packet_len > len) { 4567 mpl = rxq->tpa_info[agg_index].mpl; 4568 mpl->m_len += (cqe->total_packet_len - len); 4569 } 4570 } 4571 4572 QLNX_INC_IPACKETS(ifp); 4573 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len)); 4574 4575 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \ 4576 m_len = 0x%x m_pkthdr_len = 0x%x\n", 4577 fp->rss_id, mp->m_pkthdr.csum_data, 4578 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len); 4579 4580 (*ifp->if_input)(ifp, mp); 4581 4582 rxq->tpa_info[agg_index].mpf = NULL; 4583 rxq->tpa_info[agg_index].mpl = NULL; 4584 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4585 4586 return (cqe->num_of_coalesced_segs); 4587 } 4588 4589 static int 4590 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 4591 int lro_enable) 4592 { 4593 uint16_t hw_comp_cons, sw_comp_cons; 4594 int rx_pkt = 0; 4595 struct qlnx_rx_queue *rxq = fp->rxq; 4596 struct ifnet *ifp = ha->ifp; 4597 struct ecore_dev *cdev = &ha->cdev; 4598 struct ecore_hwfn *p_hwfn; 4599 4600 #ifdef QLNX_SOFT_LRO 4601 struct lro_ctrl *lro; 4602 4603 lro = &rxq->lro; 4604 #endif /* #ifdef QLNX_SOFT_LRO */ 4605 4606 hw_comp_cons = le16toh(*rxq->hw_cons_ptr); 4607 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4608 4609 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)]; 4610 4611 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 4612 * / BD in the while-loop before reading hw_comp_cons. If the CQE is 4613 * read before it is written by FW, then FW writes CQE and SB, and then 4614 * the CPU reads the hw_comp_cons, it will use an old CQE. 4615 */ 4616 4617 /* Loop to complete all indicated BDs */ 4618 while (sw_comp_cons != hw_comp_cons) { 4619 union eth_rx_cqe *cqe; 4620 struct eth_fast_path_rx_reg_cqe *fp_cqe; 4621 struct sw_rx_data *sw_rx_data; 4622 register struct mbuf *mp; 4623 enum eth_rx_cqe_type cqe_type; 4624 uint16_t len, pad, len_on_first_bd; 4625 uint8_t *data; 4626 #if __FreeBSD_version >= 1100000 4627 uint8_t hash_type; 4628 #endif /* #if __FreeBSD_version >= 1100000 */ 4629 4630 /* Get the CQE from the completion ring */ 4631 cqe = (union eth_rx_cqe *) 4632 ecore_chain_consume(&rxq->rx_comp_ring); 4633 cqe_type = cqe->fast_path_regular.type; 4634 4635 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { 4636 QL_DPRINT3(ha, "Got a slowath CQE\n"); 4637 4638 ecore_eth_cqe_completion(p_hwfn, 4639 (struct eth_slow_path_rx_cqe *)cqe); 4640 goto next_cqe; 4641 } 4642 4643 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { 4644 switch (cqe_type) { 4645 case ETH_RX_CQE_TYPE_TPA_START: 4646 qlnx_tpa_start(ha, fp, rxq, 4647 &cqe->fast_path_tpa_start); 4648 fp->tpa_start++; 4649 break; 4650 4651 case ETH_RX_CQE_TYPE_TPA_CONT: 4652 qlnx_tpa_cont(ha, fp, rxq, 4653 &cqe->fast_path_tpa_cont); 4654 fp->tpa_cont++; 4655 break; 4656 4657 case ETH_RX_CQE_TYPE_TPA_END: 4658 rx_pkt += qlnx_tpa_end(ha, fp, rxq, 4659 &cqe->fast_path_tpa_end); 4660 fp->tpa_end++; 4661 break; 4662 4663 default: 4664 break; 4665 } 4666 4667 goto next_cqe; 4668 } 4669 4670 /* Get the data from the SW ring */ 4671 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4672 mp = sw_rx_data->data; 4673 4674 if (mp == NULL) { 4675 QL_DPRINT1(ha, "mp = NULL\n"); 4676 fp->err_rx_mp_null++; 4677 rxq->sw_rx_cons = 4678 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4679 goto next_cqe; 4680 } 4681 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4682 BUS_DMASYNC_POSTREAD); 4683 4684 /* non GRO */ 4685 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */ 4686 len = le16toh(fp_cqe->pkt_len); 4687 pad = fp_cqe->placement_offset; 4688 #if 0 4689 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x," 4690 " len %u, parsing flags = %d pad = %d\n", 4691 cqe_type, fp_cqe->bitfields, 4692 le16toh(fp_cqe->vlan_tag), 4693 len, le16toh(fp_cqe->pars_flags.flags), pad); 4694 #endif 4695 data = mtod(mp, uint8_t *); 4696 data = data + pad; 4697 4698 if (0) 4699 qlnx_dump_buf8(ha, __func__, data, len); 4700 4701 /* For every Rx BD consumed, we allocate a new BD so the BD ring 4702 * is always with a fixed size. If allocation fails, we take the 4703 * consumed BD and return it to the ring in the PROD position. 4704 * The packet that was received on that BD will be dropped (and 4705 * not passed to the upper stack). 4706 */ 4707 /* If this is an error packet then drop it */ 4708 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) & 4709 CQE_FLAGS_ERR) { 4710 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x," 4711 " dropping incoming packet\n", sw_comp_cons, 4712 le16toh(cqe->fast_path_regular.pars_flags.flags)); 4713 fp->err_rx_hw_errors++; 4714 4715 qlnx_reuse_rx_data(rxq); 4716 4717 QLNX_INC_IERRORS(ifp); 4718 4719 goto next_cqe; 4720 } 4721 4722 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4723 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 4724 " incoming packet and reusing its buffer\n"); 4725 qlnx_reuse_rx_data(rxq); 4726 4727 fp->err_rx_alloc_errors++; 4728 4729 QLNX_INC_IQDROPS(ifp); 4730 4731 goto next_cqe; 4732 } 4733 4734 ecore_chain_consume(&rxq->rx_bd_ring); 4735 4736 len_on_first_bd = fp_cqe->len_on_first_bd; 4737 m_adj(mp, pad); 4738 mp->m_pkthdr.len = len; 4739 4740 if ((len > 60 ) && (len > len_on_first_bd)) { 4741 mp->m_len = len_on_first_bd; 4742 4743 if (qlnx_rx_jumbo_chain(ha, fp, mp, 4744 (len - len_on_first_bd)) != 0) { 4745 m_freem(mp); 4746 4747 QLNX_INC_IQDROPS(ifp); 4748 4749 goto next_cqe; 4750 } 4751 4752 } else if (len_on_first_bd < len) { 4753 fp->err_rx_jumbo_chain_pkts++; 4754 } else { 4755 mp->m_len = len; 4756 } 4757 4758 mp->m_flags |= M_PKTHDR; 4759 4760 /* assign packet to this interface interface */ 4761 mp->m_pkthdr.rcvif = ifp; 4762 4763 /* assume no hardware checksum has complated */ 4764 mp->m_pkthdr.csum_flags = 0; 4765 4766 mp->m_pkthdr.flowid = fp_cqe->rss_hash; 4767 4768 #if __FreeBSD_version >= 1100000 4769 4770 hash_type = fp_cqe->bitfields & 4771 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4772 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4773 4774 switch (hash_type) { 4775 case RSS_HASH_TYPE_IPV4: 4776 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4777 break; 4778 4779 case RSS_HASH_TYPE_TCP_IPV4: 4780 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4781 break; 4782 4783 case RSS_HASH_TYPE_IPV6: 4784 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4785 break; 4786 4787 case RSS_HASH_TYPE_TCP_IPV6: 4788 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4789 break; 4790 4791 default: 4792 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4793 break; 4794 } 4795 4796 #else 4797 mp->m_flags |= M_FLOWID; 4798 #endif 4799 4800 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) { 4801 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4802 } 4803 4804 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) { 4805 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4806 } 4807 4808 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) { 4809 mp->m_pkthdr.csum_data = 0xFFFF; 4810 mp->m_pkthdr.csum_flags |= 4811 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4812 } 4813 4814 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) { 4815 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag); 4816 mp->m_flags |= M_VLANTAG; 4817 } 4818 4819 QLNX_INC_IPACKETS(ifp); 4820 QLNX_INC_IBYTES(ifp, len); 4821 4822 #ifdef QLNX_SOFT_LRO 4823 4824 if (lro_enable) { 4825 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 4826 4827 tcp_lro_queue_mbuf(lro, mp); 4828 4829 #else 4830 4831 if (tcp_lro_rx(lro, mp, 0)) 4832 (*ifp->if_input)(ifp, mp); 4833 4834 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 4835 4836 } else { 4837 (*ifp->if_input)(ifp, mp); 4838 } 4839 #else 4840 4841 (*ifp->if_input)(ifp, mp); 4842 4843 #endif /* #ifdef QLNX_SOFT_LRO */ 4844 4845 rx_pkt++; 4846 4847 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4848 4849 next_cqe: /* don't consume bd rx buffer */ 4850 ecore_chain_recycle_consumed(&rxq->rx_comp_ring); 4851 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4852 4853 /* CR TPA - revisit how to handle budget in TPA perhaps 4854 increase on "end" */ 4855 if (rx_pkt == budget) 4856 break; 4857 } /* repeat while sw_comp_cons != hw_comp_cons... */ 4858 4859 /* Update producers */ 4860 qlnx_update_rx_prod(p_hwfn, rxq); 4861 4862 return rx_pkt; 4863 } 4864 4865 /* 4866 * fast path interrupt 4867 */ 4868 4869 static void 4870 qlnx_fp_isr(void *arg) 4871 { 4872 qlnx_ivec_t *ivec = arg; 4873 qlnx_host_t *ha; 4874 struct qlnx_fastpath *fp = NULL; 4875 int idx; 4876 4877 ha = ivec->ha; 4878 4879 if (ha->state != QLNX_STATE_OPEN) { 4880 return; 4881 } 4882 4883 idx = ivec->rss_idx; 4884 4885 if ((idx = ivec->rss_idx) >= ha->num_rss) { 4886 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx); 4887 ha->err_illegal_intr++; 4888 return; 4889 } 4890 fp = &ha->fp_array[idx]; 4891 4892 if (fp == NULL) { 4893 ha->err_fp_null++; 4894 } else { 4895 int rx_int = 0, total_rx_count = 0; 4896 int lro_enable, tc; 4897 struct qlnx_tx_queue *txq; 4898 uint16_t elem_left; 4899 4900 lro_enable = ha->ifp->if_capenable & IFCAP_LRO; 4901 4902 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); 4903 4904 do { 4905 for (tc = 0; tc < ha->num_tc; tc++) { 4906 txq = fp->txq[tc]; 4907 4908 if((int)(elem_left = 4909 ecore_chain_get_elem_left(&txq->tx_pbl)) < 4910 QLNX_TX_ELEM_THRESH) { 4911 if (mtx_trylock(&fp->tx_mtx)) { 4912 #ifdef QLNX_TRACE_PERF_DATA 4913 tx_compl = fp->tx_pkts_completed; 4914 #endif 4915 4916 qlnx_tx_int(ha, fp, fp->txq[tc]); 4917 #ifdef QLNX_TRACE_PERF_DATA 4918 fp->tx_pkts_compl_intr += 4919 (fp->tx_pkts_completed - tx_compl); 4920 if ((fp->tx_pkts_completed - tx_compl) <= 32) 4921 fp->tx_comInt[0]++; 4922 else if (((fp->tx_pkts_completed - tx_compl) > 32) && 4923 ((fp->tx_pkts_completed - tx_compl) <= 64)) 4924 fp->tx_comInt[1]++; 4925 else if(((fp->tx_pkts_completed - tx_compl) > 64) && 4926 ((fp->tx_pkts_completed - tx_compl) <= 128)) 4927 fp->tx_comInt[2]++; 4928 else if(((fp->tx_pkts_completed - tx_compl) > 128)) 4929 fp->tx_comInt[3]++; 4930 #endif 4931 mtx_unlock(&fp->tx_mtx); 4932 } 4933 } 4934 } 4935 4936 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, 4937 lro_enable); 4938 4939 if (rx_int) { 4940 fp->rx_pkts += rx_int; 4941 total_rx_count += rx_int; 4942 } 4943 4944 } while (rx_int); 4945 4946 #ifdef QLNX_SOFT_LRO 4947 { 4948 struct lro_ctrl *lro; 4949 4950 lro = &fp->rxq->lro; 4951 4952 if (lro_enable && total_rx_count) { 4953 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 4954 4955 #ifdef QLNX_TRACE_LRO_CNT 4956 if (lro->lro_mbuf_count & ~1023) 4957 fp->lro_cnt_1024++; 4958 else if (lro->lro_mbuf_count & ~511) 4959 fp->lro_cnt_512++; 4960 else if (lro->lro_mbuf_count & ~255) 4961 fp->lro_cnt_256++; 4962 else if (lro->lro_mbuf_count & ~127) 4963 fp->lro_cnt_128++; 4964 else if (lro->lro_mbuf_count & ~63) 4965 fp->lro_cnt_64++; 4966 #endif /* #ifdef QLNX_TRACE_LRO_CNT */ 4967 4968 tcp_lro_flush_all(lro); 4969 4970 #else 4971 struct lro_entry *queued; 4972 4973 while ((!SLIST_EMPTY(&lro->lro_active))) { 4974 queued = SLIST_FIRST(&lro->lro_active); 4975 SLIST_REMOVE_HEAD(&lro->lro_active, \ 4976 next); 4977 tcp_lro_flush(lro, queued); 4978 } 4979 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 4980 } 4981 } 4982 #endif /* #ifdef QLNX_SOFT_LRO */ 4983 4984 ecore_sb_update_sb_idx(fp->sb_info); 4985 rmb(); 4986 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 4987 } 4988 4989 return; 4990 } 4991 4992 /* 4993 * slow path interrupt processing function 4994 * can be invoked in polled mode or in interrupt mode via taskqueue. 4995 */ 4996 void 4997 qlnx_sp_isr(void *arg) 4998 { 4999 struct ecore_hwfn *p_hwfn; 5000 qlnx_host_t *ha; 5001 5002 p_hwfn = arg; 5003 5004 ha = (qlnx_host_t *)p_hwfn->p_dev; 5005 5006 ha->sp_interrupts++; 5007 5008 QL_DPRINT2(ha, "enter\n"); 5009 5010 ecore_int_sp_dpc(p_hwfn); 5011 5012 QL_DPRINT2(ha, "exit\n"); 5013 5014 return; 5015 } 5016 5017 /***************************************************************************** 5018 * Support Functions for DMA'able Memory 5019 *****************************************************************************/ 5020 5021 static void 5022 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 5023 { 5024 *((bus_addr_t *)arg) = 0; 5025 5026 if (error) { 5027 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 5028 return; 5029 } 5030 5031 *((bus_addr_t *)arg) = segs[0].ds_addr; 5032 5033 return; 5034 } 5035 5036 static int 5037 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 5038 { 5039 int ret = 0; 5040 device_t dev; 5041 bus_addr_t b_addr; 5042 5043 dev = ha->pci_dev; 5044 5045 ret = bus_dma_tag_create( 5046 ha->parent_tag,/* parent */ 5047 dma_buf->alignment, 5048 ((bus_size_t)(1ULL << 32)),/* boundary */ 5049 BUS_SPACE_MAXADDR, /* lowaddr */ 5050 BUS_SPACE_MAXADDR, /* highaddr */ 5051 NULL, NULL, /* filter, filterarg */ 5052 dma_buf->size, /* maxsize */ 5053 1, /* nsegments */ 5054 dma_buf->size, /* maxsegsize */ 5055 0, /* flags */ 5056 NULL, NULL, /* lockfunc, lockarg */ 5057 &dma_buf->dma_tag); 5058 5059 if (ret) { 5060 QL_DPRINT1(ha, "could not create dma tag\n"); 5061 goto qlnx_alloc_dmabuf_exit; 5062 } 5063 ret = bus_dmamem_alloc(dma_buf->dma_tag, 5064 (void **)&dma_buf->dma_b, 5065 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 5066 &dma_buf->dma_map); 5067 if (ret) { 5068 bus_dma_tag_destroy(dma_buf->dma_tag); 5069 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n"); 5070 goto qlnx_alloc_dmabuf_exit; 5071 } 5072 5073 ret = bus_dmamap_load(dma_buf->dma_tag, 5074 dma_buf->dma_map, 5075 dma_buf->dma_b, 5076 dma_buf->size, 5077 qlnx_dmamap_callback, 5078 &b_addr, BUS_DMA_NOWAIT); 5079 5080 if (ret || !b_addr) { 5081 bus_dma_tag_destroy(dma_buf->dma_tag); 5082 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 5083 dma_buf->dma_map); 5084 ret = -1; 5085 goto qlnx_alloc_dmabuf_exit; 5086 } 5087 5088 dma_buf->dma_addr = b_addr; 5089 5090 qlnx_alloc_dmabuf_exit: 5091 5092 return ret; 5093 } 5094 5095 static void 5096 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 5097 { 5098 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 5099 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 5100 bus_dma_tag_destroy(dma_buf->dma_tag); 5101 return; 5102 } 5103 5104 void * 5105 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size) 5106 { 5107 qlnx_dma_t dma_buf; 5108 qlnx_dma_t *dma_p; 5109 qlnx_host_t *ha; 5110 device_t dev; 5111 5112 ha = (qlnx_host_t *)ecore_dev; 5113 dev = ha->pci_dev; 5114 5115 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5116 5117 memset(&dma_buf, 0, sizeof (qlnx_dma_t)); 5118 5119 dma_buf.size = size + PAGE_SIZE; 5120 dma_buf.alignment = 8; 5121 5122 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0) 5123 return (NULL); 5124 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size); 5125 5126 *phys = dma_buf.dma_addr; 5127 5128 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size); 5129 5130 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t)); 5131 5132 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5133 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag, 5134 dma_buf.dma_b, (void *)dma_buf.dma_addr, size); 5135 5136 return (dma_buf.dma_b); 5137 } 5138 5139 void 5140 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys, 5141 uint32_t size) 5142 { 5143 qlnx_dma_t dma_buf, *dma_p; 5144 qlnx_host_t *ha; 5145 device_t dev; 5146 5147 ha = (qlnx_host_t *)ecore_dev; 5148 dev = ha->pci_dev; 5149 5150 if (v_addr == NULL) 5151 return; 5152 5153 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5154 5155 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size); 5156 5157 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5158 (void *)dma_p->dma_map, (void *)dma_p->dma_tag, 5159 dma_p->dma_b, (void *)dma_p->dma_addr, size); 5160 5161 dma_buf = *dma_p; 5162 5163 if (!ha->qlnxr_debug) 5164 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf); 5165 return; 5166 } 5167 5168 static int 5169 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha) 5170 { 5171 int ret; 5172 device_t dev; 5173 5174 dev = ha->pci_dev; 5175 5176 /* 5177 * Allocate parent DMA Tag 5178 */ 5179 ret = bus_dma_tag_create( 5180 bus_get_dma_tag(dev), /* parent */ 5181 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 5182 BUS_SPACE_MAXADDR, /* lowaddr */ 5183 BUS_SPACE_MAXADDR, /* highaddr */ 5184 NULL, NULL, /* filter, filterarg */ 5185 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 5186 0, /* nsegments */ 5187 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 5188 0, /* flags */ 5189 NULL, NULL, /* lockfunc, lockarg */ 5190 &ha->parent_tag); 5191 5192 if (ret) { 5193 QL_DPRINT1(ha, "could not create parent dma tag\n"); 5194 return (-1); 5195 } 5196 5197 ha->flags.parent_tag = 1; 5198 5199 return (0); 5200 } 5201 5202 static void 5203 qlnx_free_parent_dma_tag(qlnx_host_t *ha) 5204 { 5205 if (ha->parent_tag != NULL) { 5206 bus_dma_tag_destroy(ha->parent_tag); 5207 ha->parent_tag = NULL; 5208 } 5209 return; 5210 } 5211 5212 static int 5213 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha) 5214 { 5215 if (bus_dma_tag_create(NULL, /* parent */ 5216 1, 0, /* alignment, bounds */ 5217 BUS_SPACE_MAXADDR, /* lowaddr */ 5218 BUS_SPACE_MAXADDR, /* highaddr */ 5219 NULL, NULL, /* filter, filterarg */ 5220 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */ 5221 QLNX_MAX_SEGMENTS, /* nsegments */ 5222 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */ 5223 0, /* flags */ 5224 NULL, /* lockfunc */ 5225 NULL, /* lockfuncarg */ 5226 &ha->tx_tag)) { 5227 QL_DPRINT1(ha, "tx_tag alloc failed\n"); 5228 return (-1); 5229 } 5230 5231 return (0); 5232 } 5233 5234 static void 5235 qlnx_free_tx_dma_tag(qlnx_host_t *ha) 5236 { 5237 if (ha->tx_tag != NULL) { 5238 bus_dma_tag_destroy(ha->tx_tag); 5239 ha->tx_tag = NULL; 5240 } 5241 return; 5242 } 5243 5244 static int 5245 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha) 5246 { 5247 if (bus_dma_tag_create(NULL, /* parent */ 5248 1, 0, /* alignment, bounds */ 5249 BUS_SPACE_MAXADDR, /* lowaddr */ 5250 BUS_SPACE_MAXADDR, /* highaddr */ 5251 NULL, NULL, /* filter, filterarg */ 5252 MJUM9BYTES, /* maxsize */ 5253 1, /* nsegments */ 5254 MJUM9BYTES, /* maxsegsize */ 5255 0, /* flags */ 5256 NULL, /* lockfunc */ 5257 NULL, /* lockfuncarg */ 5258 &ha->rx_tag)) { 5259 QL_DPRINT1(ha, " rx_tag alloc failed\n"); 5260 5261 return (-1); 5262 } 5263 return (0); 5264 } 5265 5266 static void 5267 qlnx_free_rx_dma_tag(qlnx_host_t *ha) 5268 { 5269 if (ha->rx_tag != NULL) { 5270 bus_dma_tag_destroy(ha->rx_tag); 5271 ha->rx_tag = NULL; 5272 } 5273 return; 5274 } 5275 5276 /********************************* 5277 * Exported functions 5278 *********************************/ 5279 uint32_t 5280 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id) 5281 { 5282 uint32_t bar_size; 5283 5284 bar_id = bar_id * 2; 5285 5286 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev, 5287 SYS_RES_MEMORY, 5288 PCIR_BAR(bar_id)); 5289 5290 return (bar_size); 5291 } 5292 5293 uint32_t 5294 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value) 5295 { 5296 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5297 pci_reg, 1); 5298 return 0; 5299 } 5300 5301 uint32_t 5302 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg, 5303 uint16_t *reg_value) 5304 { 5305 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5306 pci_reg, 2); 5307 return 0; 5308 } 5309 5310 uint32_t 5311 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg, 5312 uint32_t *reg_value) 5313 { 5314 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5315 pci_reg, 4); 5316 return 0; 5317 } 5318 5319 void 5320 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value) 5321 { 5322 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5323 pci_reg, reg_value, 1); 5324 return; 5325 } 5326 5327 void 5328 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg, 5329 uint16_t reg_value) 5330 { 5331 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5332 pci_reg, reg_value, 2); 5333 return; 5334 } 5335 5336 void 5337 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg, 5338 uint32_t reg_value) 5339 { 5340 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5341 pci_reg, reg_value, 4); 5342 return; 5343 } 5344 5345 int 5346 qlnx_pci_find_capability(void *ecore_dev, int cap) 5347 { 5348 int reg; 5349 qlnx_host_t *ha; 5350 5351 ha = ecore_dev; 5352 5353 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0) 5354 return reg; 5355 else { 5356 QL_DPRINT1(ha, "failed\n"); 5357 return 0; 5358 } 5359 } 5360 5361 int 5362 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap) 5363 { 5364 int reg; 5365 qlnx_host_t *ha; 5366 5367 ha = ecore_dev; 5368 5369 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0) 5370 return reg; 5371 else { 5372 QL_DPRINT1(ha, "failed\n"); 5373 return 0; 5374 } 5375 } 5376 5377 uint32_t 5378 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr) 5379 { 5380 uint32_t data32; 5381 struct ecore_hwfn *p_hwfn; 5382 5383 p_hwfn = hwfn; 5384 5385 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5386 (bus_size_t)(p_hwfn->reg_offset + reg_addr)); 5387 5388 return (data32); 5389 } 5390 5391 void 5392 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5393 { 5394 struct ecore_hwfn *p_hwfn = hwfn; 5395 5396 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5397 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5398 5399 return; 5400 } 5401 5402 void 5403 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value) 5404 { 5405 struct ecore_hwfn *p_hwfn = hwfn; 5406 5407 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5408 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5409 return; 5410 } 5411 5412 void 5413 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value) 5414 { 5415 struct ecore_dev *cdev; 5416 struct ecore_hwfn *p_hwfn; 5417 uint32_t offset; 5418 5419 p_hwfn = hwfn; 5420 5421 cdev = p_hwfn->p_dev; 5422 5423 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells)); 5424 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value); 5425 5426 return; 5427 } 5428 5429 void 5430 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5431 { 5432 struct ecore_hwfn *p_hwfn = hwfn; 5433 5434 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \ 5435 (bus_size_t)(p_hwfn->db_offset + reg_addr), value); 5436 5437 return; 5438 } 5439 5440 uint32_t 5441 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr) 5442 { 5443 uint32_t data32; 5444 bus_size_t offset; 5445 struct ecore_dev *cdev; 5446 5447 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5448 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5449 5450 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset); 5451 5452 return (data32); 5453 } 5454 5455 void 5456 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value) 5457 { 5458 bus_size_t offset; 5459 struct ecore_dev *cdev; 5460 5461 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5462 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5463 5464 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5465 5466 return; 5467 } 5468 5469 void 5470 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value) 5471 { 5472 bus_size_t offset; 5473 struct ecore_dev *cdev; 5474 5475 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5476 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5477 5478 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5479 return; 5480 } 5481 5482 void * 5483 qlnx_zalloc(uint32_t size) 5484 { 5485 caddr_t va; 5486 5487 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT); 5488 bzero(va, size); 5489 return ((void *)va); 5490 } 5491 5492 void 5493 qlnx_barrier(void *p_hwfn) 5494 { 5495 qlnx_host_t *ha; 5496 5497 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5498 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE); 5499 } 5500 5501 void 5502 qlnx_link_update(void *p_hwfn) 5503 { 5504 qlnx_host_t *ha; 5505 int prev_link_state; 5506 5507 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5508 5509 qlnx_fill_link(ha, p_hwfn, &ha->if_link); 5510 5511 prev_link_state = ha->link_up; 5512 ha->link_up = ha->if_link.link_up; 5513 5514 if (prev_link_state != ha->link_up) { 5515 if (ha->link_up) { 5516 if_link_state_change(ha->ifp, LINK_STATE_UP); 5517 } else { 5518 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 5519 } 5520 } 5521 #ifndef QLNX_VF 5522 #ifdef CONFIG_ECORE_SRIOV 5523 5524 if (qlnx_vf_device(ha) != 0) { 5525 if (ha->sriov_initialized) 5526 qlnx_inform_vf_link_state(p_hwfn, ha); 5527 } 5528 5529 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 5530 #endif /* #ifdef QLNX_VF */ 5531 5532 return; 5533 } 5534 5535 static void 5536 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn, 5537 struct ecore_vf_acquire_sw_info *p_sw_info) 5538 { 5539 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) | 5540 (QLNX_VERSION_MINOR << 16) | 5541 QLNX_VERSION_BUILD; 5542 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD; 5543 5544 return; 5545 } 5546 5547 void 5548 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req, 5549 void *p_sw_info) 5550 { 5551 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info); 5552 5553 return; 5554 } 5555 5556 void 5557 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn, 5558 struct qlnx_link_output *if_link) 5559 { 5560 struct ecore_mcp_link_params link_params; 5561 struct ecore_mcp_link_state link_state; 5562 uint8_t p_change; 5563 struct ecore_ptt *p_ptt = NULL; 5564 5565 memset(if_link, 0, sizeof(*if_link)); 5566 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params)); 5567 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state)); 5568 5569 ha = (qlnx_host_t *)hwfn->p_dev; 5570 5571 /* Prepare source inputs */ 5572 /* we only deal with physical functions */ 5573 if (qlnx_vf_device(ha) != 0) { 5574 p_ptt = ecore_ptt_acquire(hwfn); 5575 5576 if (p_ptt == NULL) { 5577 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 5578 return; 5579 } 5580 5581 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type); 5582 ecore_ptt_release(hwfn, p_ptt); 5583 5584 memcpy(&link_params, ecore_mcp_get_link_params(hwfn), 5585 sizeof(link_params)); 5586 memcpy(&link_state, ecore_mcp_get_link_state(hwfn), 5587 sizeof(link_state)); 5588 } else { 5589 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type); 5590 ecore_vf_read_bulletin(hwfn, &p_change); 5591 ecore_vf_get_link_params(hwfn, &link_params); 5592 ecore_vf_get_link_state(hwfn, &link_state); 5593 } 5594 5595 /* Set the link parameters to pass to protocol driver */ 5596 if (link_state.link_up) { 5597 if_link->link_up = true; 5598 if_link->speed = link_state.speed; 5599 } 5600 5601 if_link->supported_caps = QLNX_LINK_CAP_FIBRE; 5602 5603 if (link_params.speed.autoneg) 5604 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg; 5605 5606 if (link_params.pause.autoneg || 5607 (link_params.pause.forced_rx && link_params.pause.forced_tx)) 5608 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause; 5609 5610 if (link_params.pause.autoneg || link_params.pause.forced_rx || 5611 link_params.pause.forced_tx) 5612 if_link->supported_caps |= QLNX_LINK_CAP_Pause; 5613 5614 if (link_params.speed.advertised_speeds & 5615 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 5616 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half | 5617 QLNX_LINK_CAP_1000baseT_Full; 5618 5619 if (link_params.speed.advertised_speeds & 5620 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 5621 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5622 5623 if (link_params.speed.advertised_speeds & 5624 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 5625 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5626 5627 if (link_params.speed.advertised_speeds & 5628 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) 5629 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5630 5631 if (link_params.speed.advertised_speeds & 5632 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 5633 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5634 5635 if (link_params.speed.advertised_speeds & 5636 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 5637 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5638 5639 if_link->advertised_caps = if_link->supported_caps; 5640 5641 if_link->autoneg = link_params.speed.autoneg; 5642 if_link->duplex = QLNX_LINK_DUPLEX; 5643 5644 /* Link partner capabilities */ 5645 5646 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD) 5647 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half; 5648 5649 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD) 5650 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full; 5651 5652 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G) 5653 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5654 5655 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G) 5656 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5657 5658 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G) 5659 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5660 5661 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G) 5662 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5663 5664 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G) 5665 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5666 5667 if (link_state.an_complete) 5668 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg; 5669 5670 if (link_state.partner_adv_pause) 5671 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause; 5672 5673 if ((link_state.partner_adv_pause == 5674 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) || 5675 (link_state.partner_adv_pause == 5676 ECORE_LINK_PARTNER_BOTH_PAUSE)) 5677 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause; 5678 5679 return; 5680 } 5681 5682 void 5683 qlnx_schedule_recovery(void *p_hwfn) 5684 { 5685 qlnx_host_t *ha; 5686 5687 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5688 5689 if (qlnx_vf_device(ha) != 0) { 5690 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 5691 } 5692 5693 return; 5694 } 5695 5696 static int 5697 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params) 5698 { 5699 int rc, i; 5700 5701 for (i = 0; i < cdev->num_hwfns; i++) { 5702 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 5703 p_hwfn->pf_params = *func_params; 5704 5705 #ifdef QLNX_ENABLE_IWARP 5706 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) { 5707 p_hwfn->using_ll2 = true; 5708 } 5709 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5710 } 5711 5712 rc = ecore_resc_alloc(cdev); 5713 if (rc) 5714 goto qlnx_nic_setup_exit; 5715 5716 ecore_resc_setup(cdev); 5717 5718 qlnx_nic_setup_exit: 5719 5720 return rc; 5721 } 5722 5723 static int 5724 qlnx_nic_start(struct ecore_dev *cdev) 5725 { 5726 int rc; 5727 struct ecore_hw_init_params params; 5728 5729 bzero(¶ms, sizeof (struct ecore_hw_init_params)); 5730 5731 params.p_tunn = NULL; 5732 params.b_hw_start = true; 5733 params.int_mode = cdev->int_mode; 5734 params.allow_npar_tx_switch = true; 5735 params.bin_fw_data = NULL; 5736 5737 rc = ecore_hw_init(cdev, ¶ms); 5738 if (rc) { 5739 ecore_resc_free(cdev); 5740 return rc; 5741 } 5742 5743 return 0; 5744 } 5745 5746 static int 5747 qlnx_slowpath_start(qlnx_host_t *ha) 5748 { 5749 struct ecore_dev *cdev; 5750 struct ecore_pf_params pf_params; 5751 int rc; 5752 5753 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 5754 pf_params.eth_pf_params.num_cons = 5755 (ha->num_rss) * (ha->num_tc + 1); 5756 5757 #ifdef QLNX_ENABLE_IWARP 5758 if (qlnx_vf_device(ha) != 0) { 5759 if(ha->personality == ECORE_PCI_ETH_IWARP) { 5760 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n"); 5761 pf_params.rdma_pf_params.num_qps = 1024; 5762 pf_params.rdma_pf_params.num_srqs = 1024; 5763 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5764 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP; 5765 } else if(ha->personality == ECORE_PCI_ETH_ROCE) { 5766 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n"); 5767 pf_params.rdma_pf_params.num_qps = 8192; 5768 pf_params.rdma_pf_params.num_srqs = 8192; 5769 //pf_params.rdma_pf_params.min_dpis = 0; 5770 pf_params.rdma_pf_params.min_dpis = 8; 5771 pf_params.rdma_pf_params.roce_edpm_mode = 0; 5772 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5773 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE; 5774 } 5775 } 5776 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5777 5778 cdev = &ha->cdev; 5779 5780 rc = qlnx_nic_setup(cdev, &pf_params); 5781 if (rc) 5782 goto qlnx_slowpath_start_exit; 5783 5784 cdev->int_mode = ECORE_INT_MODE_MSIX; 5785 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; 5786 5787 #ifdef QLNX_MAX_COALESCE 5788 cdev->rx_coalesce_usecs = 255; 5789 cdev->tx_coalesce_usecs = 255; 5790 #endif 5791 5792 rc = qlnx_nic_start(cdev); 5793 5794 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs; 5795 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs; 5796 5797 #ifdef QLNX_USER_LLDP 5798 (void)qlnx_set_lldp_tlvx(ha, NULL); 5799 #endif /* #ifdef QLNX_USER_LLDP */ 5800 5801 qlnx_slowpath_start_exit: 5802 5803 return (rc); 5804 } 5805 5806 static int 5807 qlnx_slowpath_stop(qlnx_host_t *ha) 5808 { 5809 struct ecore_dev *cdev; 5810 device_t dev = ha->pci_dev; 5811 int i; 5812 5813 cdev = &ha->cdev; 5814 5815 ecore_hw_stop(cdev); 5816 5817 for (i = 0; i < ha->cdev.num_hwfns; i++) { 5818 if (ha->sp_handle[i]) 5819 (void)bus_teardown_intr(dev, ha->sp_irq[i], 5820 ha->sp_handle[i]); 5821 5822 ha->sp_handle[i] = NULL; 5823 5824 if (ha->sp_irq[i]) 5825 (void) bus_release_resource(dev, SYS_RES_IRQ, 5826 ha->sp_irq_rid[i], ha->sp_irq[i]); 5827 ha->sp_irq[i] = NULL; 5828 } 5829 5830 ecore_resc_free(cdev); 5831 5832 return 0; 5833 } 5834 5835 static void 5836 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 5837 char ver_str[VER_SIZE]) 5838 { 5839 int i; 5840 5841 memcpy(cdev->name, name, NAME_SIZE); 5842 5843 for_each_hwfn(cdev, i) { 5844 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 5845 } 5846 5847 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD; 5848 5849 return ; 5850 } 5851 5852 void 5853 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats) 5854 { 5855 enum ecore_mcp_protocol_type type; 5856 union ecore_mcp_protocol_stats *stats; 5857 struct ecore_eth_stats eth_stats; 5858 qlnx_host_t *ha; 5859 5860 ha = cdev; 5861 stats = proto_stats; 5862 type = proto_type; 5863 5864 switch (type) { 5865 case ECORE_MCP_LAN_STATS: 5866 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats); 5867 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; 5868 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts; 5869 stats->lan_stats.fcs_err = -1; 5870 break; 5871 5872 default: 5873 ha->err_get_proto_invalid_type++; 5874 5875 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type); 5876 break; 5877 } 5878 return; 5879 } 5880 5881 static int 5882 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver) 5883 { 5884 struct ecore_hwfn *p_hwfn; 5885 struct ecore_ptt *p_ptt; 5886 5887 p_hwfn = &ha->cdev.hwfns[0]; 5888 p_ptt = ecore_ptt_acquire(p_hwfn); 5889 5890 if (p_ptt == NULL) { 5891 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 5892 return (-1); 5893 } 5894 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL); 5895 5896 ecore_ptt_release(p_hwfn, p_ptt); 5897 5898 return (0); 5899 } 5900 5901 static int 5902 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size) 5903 { 5904 struct ecore_hwfn *p_hwfn; 5905 struct ecore_ptt *p_ptt; 5906 5907 p_hwfn = &ha->cdev.hwfns[0]; 5908 p_ptt = ecore_ptt_acquire(p_hwfn); 5909 5910 if (p_ptt == NULL) { 5911 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n"); 5912 return (-1); 5913 } 5914 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size); 5915 5916 ecore_ptt_release(p_hwfn, p_ptt); 5917 5918 return (0); 5919 } 5920 5921 static int 5922 qlnx_alloc_mem_arrays(qlnx_host_t *ha) 5923 { 5924 struct ecore_dev *cdev; 5925 5926 cdev = &ha->cdev; 5927 5928 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS)); 5929 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS)); 5930 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS)); 5931 5932 return 0; 5933 } 5934 5935 static void 5936 qlnx_init_fp(qlnx_host_t *ha) 5937 { 5938 int rss_id, txq_array_index, tc; 5939 5940 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 5941 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 5942 5943 fp->rss_id = rss_id; 5944 fp->edev = ha; 5945 fp->sb_info = &ha->sb_array[rss_id]; 5946 fp->rxq = &ha->rxq_array[rss_id]; 5947 fp->rxq->rxq_id = rss_id; 5948 5949 for (tc = 0; tc < ha->num_tc; tc++) { 5950 txq_array_index = tc * ha->num_rss + rss_id; 5951 fp->txq[tc] = &ha->txq_array[txq_array_index]; 5952 fp->txq[tc]->index = txq_array_index; 5953 } 5954 5955 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str, 5956 rss_id); 5957 5958 fp->tx_ring_full = 0; 5959 5960 /* reset all the statistics counters */ 5961 5962 fp->tx_pkts_processed = 0; 5963 fp->tx_pkts_freed = 0; 5964 fp->tx_pkts_transmitted = 0; 5965 fp->tx_pkts_completed = 0; 5966 5967 #ifdef QLNX_TRACE_PERF_DATA 5968 fp->tx_pkts_trans_ctx = 0; 5969 fp->tx_pkts_compl_ctx = 0; 5970 fp->tx_pkts_trans_fp = 0; 5971 fp->tx_pkts_compl_fp = 0; 5972 fp->tx_pkts_compl_intr = 0; 5973 #endif 5974 fp->tx_lso_wnd_min_len = 0; 5975 fp->tx_defrag = 0; 5976 fp->tx_nsegs_gt_elem_left = 0; 5977 fp->tx_tso_max_nsegs = 0; 5978 fp->tx_tso_min_nsegs = 0; 5979 fp->err_tx_nsegs_gt_elem_left = 0; 5980 fp->err_tx_dmamap_create = 0; 5981 fp->err_tx_defrag_dmamap_load = 0; 5982 fp->err_tx_non_tso_max_seg = 0; 5983 fp->err_tx_dmamap_load = 0; 5984 fp->err_tx_defrag = 0; 5985 fp->err_tx_free_pkt_null = 0; 5986 fp->err_tx_cons_idx_conflict = 0; 5987 5988 fp->rx_pkts = 0; 5989 fp->err_m_getcl = 0; 5990 fp->err_m_getjcl = 0; 5991 } 5992 return; 5993 } 5994 5995 void 5996 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info) 5997 { 5998 struct ecore_dev *cdev; 5999 6000 cdev = &ha->cdev; 6001 6002 if (sb_info->sb_virt) { 6003 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt), 6004 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt))); 6005 sb_info->sb_virt = NULL; 6006 } 6007 } 6008 6009 static int 6010 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info, 6011 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id) 6012 { 6013 struct ecore_hwfn *p_hwfn; 6014 int hwfn_index, rc; 6015 u16 rel_sb_id; 6016 6017 hwfn_index = sb_id % cdev->num_hwfns; 6018 p_hwfn = &cdev->hwfns[hwfn_index]; 6019 rel_sb_id = sb_id / cdev->num_hwfns; 6020 6021 QL_DPRINT2(((qlnx_host_t *)cdev), 6022 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \ 6023 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n", 6024 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info, 6025 sb_virt_addr, (void *)sb_phy_addr); 6026 6027 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, 6028 sb_virt_addr, sb_phy_addr, rel_sb_id); 6029 6030 return rc; 6031 } 6032 6033 /* This function allocates fast-path status block memory */ 6034 int 6035 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id) 6036 { 6037 struct status_block_e4 *sb_virt; 6038 bus_addr_t sb_phys; 6039 int rc; 6040 uint32_t size; 6041 struct ecore_dev *cdev; 6042 6043 cdev = &ha->cdev; 6044 6045 size = sizeof(*sb_virt); 6046 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size); 6047 6048 if (!sb_virt) { 6049 QL_DPRINT1(ha, "Status block allocation failed\n"); 6050 return -ENOMEM; 6051 } 6052 6053 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id); 6054 if (rc) { 6055 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size); 6056 } 6057 6058 return rc; 6059 } 6060 6061 static void 6062 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6063 { 6064 int i; 6065 struct sw_rx_data *rx_buf; 6066 6067 for (i = 0; i < rxq->num_rx_buffers; i++) { 6068 rx_buf = &rxq->sw_rx_ring[i]; 6069 6070 if (rx_buf->data != NULL) { 6071 if (rx_buf->map != NULL) { 6072 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 6073 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 6074 rx_buf->map = NULL; 6075 } 6076 m_freem(rx_buf->data); 6077 rx_buf->data = NULL; 6078 } 6079 } 6080 return; 6081 } 6082 6083 static void 6084 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6085 { 6086 struct ecore_dev *cdev; 6087 int i; 6088 6089 cdev = &ha->cdev; 6090 6091 qlnx_free_rx_buffers(ha, rxq); 6092 6093 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 6094 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]); 6095 if (rxq->tpa_info[i].mpf != NULL) 6096 m_freem(rxq->tpa_info[i].mpf); 6097 } 6098 6099 bzero((void *)&rxq->sw_rx_ring[0], 6100 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 6101 6102 /* Free the real RQ ring used by FW */ 6103 if (rxq->rx_bd_ring.p_virt_addr) { 6104 ecore_chain_free(cdev, &rxq->rx_bd_ring); 6105 rxq->rx_bd_ring.p_virt_addr = NULL; 6106 } 6107 6108 /* Free the real completion ring used by FW */ 6109 if (rxq->rx_comp_ring.p_virt_addr && 6110 rxq->rx_comp_ring.pbl_sp.p_virt_table) { 6111 ecore_chain_free(cdev, &rxq->rx_comp_ring); 6112 rxq->rx_comp_ring.p_virt_addr = NULL; 6113 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL; 6114 } 6115 6116 #ifdef QLNX_SOFT_LRO 6117 { 6118 struct lro_ctrl *lro; 6119 6120 lro = &rxq->lro; 6121 tcp_lro_free(lro); 6122 } 6123 #endif /* #ifdef QLNX_SOFT_LRO */ 6124 6125 return; 6126 } 6127 6128 static int 6129 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6130 { 6131 register struct mbuf *mp; 6132 uint16_t rx_buf_size; 6133 struct sw_rx_data *sw_rx_data; 6134 struct eth_rx_bd *rx_bd; 6135 dma_addr_t dma_addr; 6136 bus_dmamap_t map; 6137 bus_dma_segment_t segs[1]; 6138 int nsegs; 6139 int ret; 6140 struct ecore_dev *cdev; 6141 6142 cdev = &ha->cdev; 6143 6144 rx_buf_size = rxq->rx_buf_size; 6145 6146 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6147 6148 if (mp == NULL) { 6149 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6150 return -ENOMEM; 6151 } 6152 6153 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6154 6155 map = (bus_dmamap_t)0; 6156 6157 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6158 BUS_DMA_NOWAIT); 6159 dma_addr = segs[0].ds_addr; 6160 6161 if (ret || !dma_addr || (nsegs != 1)) { 6162 m_freem(mp); 6163 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6164 ret, (long long unsigned int)dma_addr, nsegs); 6165 return -ENOMEM; 6166 } 6167 6168 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6169 sw_rx_data->data = mp; 6170 sw_rx_data->dma_addr = dma_addr; 6171 sw_rx_data->map = map; 6172 6173 /* Advance PROD and get BD pointer */ 6174 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); 6175 rx_bd->addr.hi = htole32(U64_HI(dma_addr)); 6176 rx_bd->addr.lo = htole32(U64_LO(dma_addr)); 6177 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6178 6179 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6180 6181 return 0; 6182 } 6183 6184 static int 6185 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 6186 struct qlnx_agg_info *tpa) 6187 { 6188 struct mbuf *mp; 6189 dma_addr_t dma_addr; 6190 bus_dmamap_t map; 6191 bus_dma_segment_t segs[1]; 6192 int nsegs; 6193 int ret; 6194 struct sw_rx_data *rx_buf; 6195 6196 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6197 6198 if (mp == NULL) { 6199 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6200 return -ENOMEM; 6201 } 6202 6203 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6204 6205 map = (bus_dmamap_t)0; 6206 6207 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6208 BUS_DMA_NOWAIT); 6209 dma_addr = segs[0].ds_addr; 6210 6211 if (ret || !dma_addr || (nsegs != 1)) { 6212 m_freem(mp); 6213 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6214 ret, (long long unsigned int)dma_addr, nsegs); 6215 return -ENOMEM; 6216 } 6217 6218 rx_buf = &tpa->rx_buf; 6219 6220 memset(rx_buf, 0, sizeof (struct sw_rx_data)); 6221 6222 rx_buf->data = mp; 6223 rx_buf->dma_addr = dma_addr; 6224 rx_buf->map = map; 6225 6226 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6227 6228 return (0); 6229 } 6230 6231 static void 6232 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa) 6233 { 6234 struct sw_rx_data *rx_buf; 6235 6236 rx_buf = &tpa->rx_buf; 6237 6238 if (rx_buf->data != NULL) { 6239 if (rx_buf->map != NULL) { 6240 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 6241 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 6242 rx_buf->map = NULL; 6243 } 6244 m_freem(rx_buf->data); 6245 rx_buf->data = NULL; 6246 } 6247 return; 6248 } 6249 6250 /* This function allocates all memory needed per Rx queue */ 6251 static int 6252 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6253 { 6254 int i, rc, num_allocated; 6255 struct ifnet *ifp; 6256 struct ecore_dev *cdev; 6257 6258 cdev = &ha->cdev; 6259 ifp = ha->ifp; 6260 6261 rxq->num_rx_buffers = RX_RING_SIZE; 6262 6263 rxq->rx_buf_size = ha->rx_buf_size; 6264 6265 /* Allocate the parallel driver ring for Rx buffers */ 6266 bzero((void *)&rxq->sw_rx_ring[0], 6267 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 6268 6269 /* Allocate FW Rx ring */ 6270 6271 rc = ecore_chain_alloc(cdev, 6272 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6273 ECORE_CHAIN_MODE_NEXT_PTR, 6274 ECORE_CHAIN_CNT_TYPE_U16, 6275 RX_RING_SIZE, 6276 sizeof(struct eth_rx_bd), 6277 &rxq->rx_bd_ring, NULL); 6278 6279 if (rc) 6280 goto err; 6281 6282 /* Allocate FW completion ring */ 6283 rc = ecore_chain_alloc(cdev, 6284 ECORE_CHAIN_USE_TO_CONSUME, 6285 ECORE_CHAIN_MODE_PBL, 6286 ECORE_CHAIN_CNT_TYPE_U16, 6287 RX_RING_SIZE, 6288 sizeof(union eth_rx_cqe), 6289 &rxq->rx_comp_ring, NULL); 6290 6291 if (rc) 6292 goto err; 6293 6294 /* Allocate buffers for the Rx ring */ 6295 6296 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 6297 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size, 6298 &rxq->tpa_info[i]); 6299 if (rc) 6300 break; 6301 } 6302 6303 for (i = 0; i < rxq->num_rx_buffers; i++) { 6304 rc = qlnx_alloc_rx_buffer(ha, rxq); 6305 if (rc) 6306 break; 6307 } 6308 num_allocated = i; 6309 if (!num_allocated) { 6310 QL_DPRINT1(ha, "Rx buffers allocation failed\n"); 6311 goto err; 6312 } else if (num_allocated < rxq->num_rx_buffers) { 6313 QL_DPRINT1(ha, "Allocated less buffers than" 6314 " desired (%d allocated)\n", num_allocated); 6315 } 6316 6317 #ifdef QLNX_SOFT_LRO 6318 6319 { 6320 struct lro_ctrl *lro; 6321 6322 lro = &rxq->lro; 6323 6324 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 6325 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) { 6326 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 6327 rxq->rxq_id); 6328 goto err; 6329 } 6330 #else 6331 if (tcp_lro_init(lro)) { 6332 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 6333 rxq->rxq_id); 6334 goto err; 6335 } 6336 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 6337 6338 lro->ifp = ha->ifp; 6339 } 6340 #endif /* #ifdef QLNX_SOFT_LRO */ 6341 return 0; 6342 6343 err: 6344 qlnx_free_mem_rxq(ha, rxq); 6345 return -ENOMEM; 6346 } 6347 6348 static void 6349 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6350 struct qlnx_tx_queue *txq) 6351 { 6352 struct ecore_dev *cdev; 6353 6354 cdev = &ha->cdev; 6355 6356 bzero((void *)&txq->sw_tx_ring[0], 6357 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6358 6359 /* Free the real RQ ring used by FW */ 6360 if (txq->tx_pbl.p_virt_addr) { 6361 ecore_chain_free(cdev, &txq->tx_pbl); 6362 txq->tx_pbl.p_virt_addr = NULL; 6363 } 6364 return; 6365 } 6366 6367 /* This function allocates all memory needed per Tx queue */ 6368 static int 6369 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6370 struct qlnx_tx_queue *txq) 6371 { 6372 int ret = ECORE_SUCCESS; 6373 union eth_tx_bd_types *p_virt; 6374 struct ecore_dev *cdev; 6375 6376 cdev = &ha->cdev; 6377 6378 bzero((void *)&txq->sw_tx_ring[0], 6379 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6380 6381 /* Allocate the real Tx ring to be used by FW */ 6382 ret = ecore_chain_alloc(cdev, 6383 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6384 ECORE_CHAIN_MODE_PBL, 6385 ECORE_CHAIN_CNT_TYPE_U16, 6386 TX_RING_SIZE, 6387 sizeof(*p_virt), 6388 &txq->tx_pbl, NULL); 6389 6390 if (ret != ECORE_SUCCESS) { 6391 goto err; 6392 } 6393 6394 txq->num_tx_buffers = TX_RING_SIZE; 6395 6396 return 0; 6397 6398 err: 6399 qlnx_free_mem_txq(ha, fp, txq); 6400 return -ENOMEM; 6401 } 6402 6403 static void 6404 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6405 { 6406 struct mbuf *mp; 6407 struct ifnet *ifp = ha->ifp; 6408 6409 if (mtx_initialized(&fp->tx_mtx)) { 6410 if (fp->tx_br != NULL) { 6411 mtx_lock(&fp->tx_mtx); 6412 6413 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 6414 fp->tx_pkts_freed++; 6415 m_freem(mp); 6416 } 6417 6418 mtx_unlock(&fp->tx_mtx); 6419 6420 buf_ring_free(fp->tx_br, M_DEVBUF); 6421 fp->tx_br = NULL; 6422 } 6423 mtx_destroy(&fp->tx_mtx); 6424 } 6425 return; 6426 } 6427 6428 static void 6429 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6430 { 6431 int tc; 6432 6433 qlnx_free_mem_sb(ha, fp->sb_info); 6434 6435 qlnx_free_mem_rxq(ha, fp->rxq); 6436 6437 for (tc = 0; tc < ha->num_tc; tc++) 6438 qlnx_free_mem_txq(ha, fp, fp->txq[tc]); 6439 6440 return; 6441 } 6442 6443 static int 6444 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6445 { 6446 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 6447 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id); 6448 6449 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 6450 6451 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF, 6452 M_NOWAIT, &fp->tx_mtx); 6453 if (fp->tx_br == NULL) { 6454 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n", 6455 ha->dev_unit, fp->rss_id); 6456 return -ENOMEM; 6457 } 6458 return 0; 6459 } 6460 6461 static int 6462 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6463 { 6464 int rc, tc; 6465 6466 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id); 6467 if (rc) 6468 goto err; 6469 6470 if (ha->rx_jumbo_buf_eq_mtu) { 6471 if (ha->max_frame_size <= MCLBYTES) 6472 ha->rx_buf_size = MCLBYTES; 6473 else if (ha->max_frame_size <= MJUMPAGESIZE) 6474 ha->rx_buf_size = MJUMPAGESIZE; 6475 else if (ha->max_frame_size <= MJUM9BYTES) 6476 ha->rx_buf_size = MJUM9BYTES; 6477 else if (ha->max_frame_size <= MJUM16BYTES) 6478 ha->rx_buf_size = MJUM16BYTES; 6479 } else { 6480 if (ha->max_frame_size <= MCLBYTES) 6481 ha->rx_buf_size = MCLBYTES; 6482 else 6483 ha->rx_buf_size = MJUMPAGESIZE; 6484 } 6485 6486 rc = qlnx_alloc_mem_rxq(ha, fp->rxq); 6487 if (rc) 6488 goto err; 6489 6490 for (tc = 0; tc < ha->num_tc; tc++) { 6491 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]); 6492 if (rc) 6493 goto err; 6494 } 6495 6496 return 0; 6497 6498 err: 6499 qlnx_free_mem_fp(ha, fp); 6500 return -ENOMEM; 6501 } 6502 6503 static void 6504 qlnx_free_mem_load(qlnx_host_t *ha) 6505 { 6506 int i; 6507 struct ecore_dev *cdev; 6508 6509 cdev = &ha->cdev; 6510 6511 for (i = 0; i < ha->num_rss; i++) { 6512 struct qlnx_fastpath *fp = &ha->fp_array[i]; 6513 6514 qlnx_free_mem_fp(ha, fp); 6515 } 6516 return; 6517 } 6518 6519 static int 6520 qlnx_alloc_mem_load(qlnx_host_t *ha) 6521 { 6522 int rc = 0, rss_id; 6523 6524 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 6525 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 6526 6527 rc = qlnx_alloc_mem_fp(ha, fp); 6528 if (rc) 6529 break; 6530 } 6531 return (rc); 6532 } 6533 6534 static int 6535 qlnx_start_vport(struct ecore_dev *cdev, 6536 u8 vport_id, 6537 u16 mtu, 6538 u8 drop_ttl0_flg, 6539 u8 inner_vlan_removal_en_flg, 6540 u8 tx_switching, 6541 u8 hw_lro_enable) 6542 { 6543 int rc, i; 6544 struct ecore_sp_vport_start_params vport_start_params = { 0 }; 6545 qlnx_host_t *ha; 6546 6547 ha = (qlnx_host_t *)cdev; 6548 6549 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg; 6550 vport_start_params.tx_switching = 0; 6551 vport_start_params.handle_ptp_pkts = 0; 6552 vport_start_params.only_untagged = 0; 6553 vport_start_params.drop_ttl0 = drop_ttl0_flg; 6554 6555 vport_start_params.tpa_mode = 6556 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE); 6557 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6558 6559 vport_start_params.vport_id = vport_id; 6560 vport_start_params.mtu = mtu; 6561 6562 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id); 6563 6564 for_each_hwfn(cdev, i) { 6565 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6566 6567 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid; 6568 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6569 6570 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params); 6571 6572 if (rc) { 6573 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d" 6574 " with MTU %d\n" , vport_id, mtu); 6575 return -ENOMEM; 6576 } 6577 6578 ecore_hw_start_fastpath(p_hwfn); 6579 6580 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n", 6581 vport_id, mtu); 6582 } 6583 return 0; 6584 } 6585 6586 static int 6587 qlnx_update_vport(struct ecore_dev *cdev, 6588 struct qlnx_update_vport_params *params) 6589 { 6590 struct ecore_sp_vport_update_params sp_params; 6591 int rc, i, j, fp_index; 6592 struct ecore_hwfn *p_hwfn; 6593 struct ecore_rss_params *rss; 6594 qlnx_host_t *ha = (qlnx_host_t *)cdev; 6595 struct qlnx_fastpath *fp; 6596 6597 memset(&sp_params, 0, sizeof(sp_params)); 6598 /* Translate protocol params into sp params */ 6599 sp_params.vport_id = params->vport_id; 6600 6601 sp_params.update_vport_active_rx_flg = 6602 params->update_vport_active_rx_flg; 6603 sp_params.vport_active_rx_flg = params->vport_active_rx_flg; 6604 6605 sp_params.update_vport_active_tx_flg = 6606 params->update_vport_active_tx_flg; 6607 sp_params.vport_active_tx_flg = params->vport_active_tx_flg; 6608 6609 sp_params.update_inner_vlan_removal_flg = 6610 params->update_inner_vlan_removal_flg; 6611 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; 6612 6613 sp_params.sge_tpa_params = params->sge_tpa_params; 6614 6615 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 6616 * We need to re-fix the rss values per engine for CMT. 6617 */ 6618 if (params->rss_params->update_rss_config) 6619 sp_params.rss_params = params->rss_params; 6620 else 6621 sp_params.rss_params = NULL; 6622 6623 for_each_hwfn(cdev, i) { 6624 p_hwfn = &cdev->hwfns[i]; 6625 6626 if ((cdev->num_hwfns > 1) && 6627 params->rss_params->update_rss_config && 6628 params->rss_params->rss_enable) { 6629 rss = params->rss_params; 6630 6631 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) { 6632 fp_index = ((cdev->num_hwfns * j) + i) % 6633 ha->num_rss; 6634 6635 fp = &ha->fp_array[fp_index]; 6636 rss->rss_ind_table[j] = fp->rxq->handle; 6637 } 6638 6639 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) { 6640 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n", 6641 rss->rss_ind_table[j], 6642 rss->rss_ind_table[j+1], 6643 rss->rss_ind_table[j+2], 6644 rss->rss_ind_table[j+3], 6645 rss->rss_ind_table[j+4], 6646 rss->rss_ind_table[j+5], 6647 rss->rss_ind_table[j+6], 6648 rss->rss_ind_table[j+7]); 6649 j += 8; 6650 } 6651 } 6652 6653 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6654 6655 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id); 6656 6657 rc = ecore_sp_vport_update(p_hwfn, &sp_params, 6658 ECORE_SPQ_MODE_EBLOCK, NULL); 6659 if (rc) { 6660 QL_DPRINT1(ha, "Failed to update VPORT\n"); 6661 return rc; 6662 } 6663 6664 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \ 6665 rx_active_flag %d [tx_update %d], [rx_update %d]\n", 6666 params->vport_id, params->vport_active_tx_flg, 6667 params->vport_active_rx_flg, 6668 params->update_vport_active_tx_flg, 6669 params->update_vport_active_rx_flg); 6670 } 6671 6672 return 0; 6673 } 6674 6675 static void 6676 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq) 6677 { 6678 struct eth_rx_bd *rx_bd_cons = 6679 ecore_chain_consume(&rxq->rx_bd_ring); 6680 struct eth_rx_bd *rx_bd_prod = 6681 ecore_chain_produce(&rxq->rx_bd_ring); 6682 struct sw_rx_data *sw_rx_data_cons = 6683 &rxq->sw_rx_ring[rxq->sw_rx_cons]; 6684 struct sw_rx_data *sw_rx_data_prod = 6685 &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6686 6687 sw_rx_data_prod->data = sw_rx_data_cons->data; 6688 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); 6689 6690 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 6691 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6692 6693 return; 6694 } 6695 6696 static void 6697 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq) 6698 { 6699 6700 uint16_t bd_prod; 6701 uint16_t cqe_prod; 6702 union { 6703 struct eth_rx_prod_data rx_prod_data; 6704 uint32_t data32; 6705 } rx_prods; 6706 6707 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); 6708 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); 6709 6710 /* Update producers */ 6711 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod); 6712 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod); 6713 6714 /* Make sure that the BD and SGE data is updated before updating the 6715 * producers since FW might read the BD/SGE right after the producer 6716 * is updated. 6717 */ 6718 wmb(); 6719 6720 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr, 6721 sizeof(rx_prods), &rx_prods.data32); 6722 6723 /* mmiowb is needed to synchronize doorbell writes from more than one 6724 * processor. It guarantees that the write arrives to the device before 6725 * the napi lock is released and another qlnx_poll is called (possibly 6726 * on another CPU). Without this barrier, the next doorbell can bypass 6727 * this doorbell. This is applicable to IA64/Altix systems. 6728 */ 6729 wmb(); 6730 6731 return; 6732 } 6733 6734 static uint32_t qlnx_hash_key[] = { 6735 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda), 6736 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2), 6737 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d), 6738 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0), 6739 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb), 6740 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4), 6741 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3), 6742 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c), 6743 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b), 6744 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)}; 6745 6746 static int 6747 qlnx_start_queues(qlnx_host_t *ha) 6748 { 6749 int rc, tc, i, vport_id = 0, 6750 drop_ttl0_flg = 1, vlan_removal_en = 1, 6751 tx_switching = 0, hw_lro_enable = 0; 6752 struct ecore_dev *cdev = &ha->cdev; 6753 struct ecore_rss_params *rss_params = &ha->rss_params; 6754 struct qlnx_update_vport_params vport_update_params; 6755 struct ifnet *ifp; 6756 struct ecore_hwfn *p_hwfn; 6757 struct ecore_sge_tpa_params tpa_params; 6758 struct ecore_queue_start_common_params qparams; 6759 struct qlnx_fastpath *fp; 6760 6761 ifp = ha->ifp; 6762 6763 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss); 6764 6765 if (!ha->num_rss) { 6766 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there" 6767 " are no Rx queues\n"); 6768 return -EINVAL; 6769 } 6770 6771 #ifndef QLNX_SOFT_LRO 6772 hw_lro_enable = ifp->if_capenable & IFCAP_LRO; 6773 #endif /* #ifndef QLNX_SOFT_LRO */ 6774 6775 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg, 6776 vlan_removal_en, tx_switching, hw_lro_enable); 6777 6778 if (rc) { 6779 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc); 6780 return rc; 6781 } 6782 6783 QL_DPRINT2(ha, "Start vport ramrod passed, " 6784 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n", 6785 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en); 6786 6787 for_each_rss(i) { 6788 struct ecore_rxq_start_ret_params rx_ret_params; 6789 struct ecore_txq_start_ret_params tx_ret_params; 6790 6791 fp = &ha->fp_array[i]; 6792 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)]; 6793 6794 bzero(&qparams, sizeof(struct ecore_queue_start_common_params)); 6795 bzero(&rx_ret_params, 6796 sizeof (struct ecore_rxq_start_ret_params)); 6797 6798 qparams.queue_id = i ; 6799 qparams.vport_id = vport_id; 6800 qparams.stats_id = vport_id; 6801 qparams.p_sb = fp->sb_info; 6802 qparams.sb_idx = RX_PI; 6803 6804 6805 rc = ecore_eth_rx_queue_start(p_hwfn, 6806 p_hwfn->hw_info.opaque_fid, 6807 &qparams, 6808 fp->rxq->rx_buf_size, /* bd_max_bytes */ 6809 /* bd_chain_phys_addr */ 6810 fp->rxq->rx_bd_ring.p_phys_addr, 6811 /* cqe_pbl_addr */ 6812 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring), 6813 /* cqe_pbl_size */ 6814 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring), 6815 &rx_ret_params); 6816 6817 if (rc) { 6818 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc); 6819 return rc; 6820 } 6821 6822 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod; 6823 fp->rxq->handle = rx_ret_params.p_handle; 6824 fp->rxq->hw_cons_ptr = 6825 &fp->sb_info->sb_virt->pi_array[RX_PI]; 6826 6827 qlnx_update_rx_prod(p_hwfn, fp->rxq); 6828 6829 for (tc = 0; tc < ha->num_tc; tc++) { 6830 struct qlnx_tx_queue *txq = fp->txq[tc]; 6831 6832 bzero(&qparams, 6833 sizeof(struct ecore_queue_start_common_params)); 6834 bzero(&tx_ret_params, 6835 sizeof (struct ecore_txq_start_ret_params)); 6836 6837 qparams.queue_id = txq->index / cdev->num_hwfns ; 6838 qparams.vport_id = vport_id; 6839 qparams.stats_id = vport_id; 6840 qparams.p_sb = fp->sb_info; 6841 qparams.sb_idx = TX_PI(tc); 6842 6843 rc = ecore_eth_tx_queue_start(p_hwfn, 6844 p_hwfn->hw_info.opaque_fid, 6845 &qparams, tc, 6846 /* bd_chain_phys_addr */ 6847 ecore_chain_get_pbl_phys(&txq->tx_pbl), 6848 ecore_chain_get_page_cnt(&txq->tx_pbl), 6849 &tx_ret_params); 6850 6851 if (rc) { 6852 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n", 6853 txq->index, rc); 6854 return rc; 6855 } 6856 6857 txq->doorbell_addr = tx_ret_params.p_doorbell; 6858 txq->handle = tx_ret_params.p_handle; 6859 6860 txq->hw_cons_ptr = 6861 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; 6862 SET_FIELD(txq->tx_db.data.params, 6863 ETH_DB_DATA_DEST, DB_DEST_XCM); 6864 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, 6865 DB_AGG_CMD_SET); 6866 SET_FIELD(txq->tx_db.data.params, 6867 ETH_DB_DATA_AGG_VAL_SEL, 6868 DQ_XCM_ETH_TX_BD_PROD_CMD); 6869 6870 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 6871 } 6872 } 6873 6874 /* Fill struct with RSS params */ 6875 if (ha->num_rss > 1) { 6876 rss_params->update_rss_config = 1; 6877 rss_params->rss_enable = 1; 6878 rss_params->update_rss_capabilities = 1; 6879 rss_params->update_rss_ind_table = 1; 6880 rss_params->update_rss_key = 1; 6881 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | 6882 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; 6883 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */ 6884 6885 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 6886 fp = &ha->fp_array[(i % ha->num_rss)]; 6887 rss_params->rss_ind_table[i] = fp->rxq->handle; 6888 } 6889 6890 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 6891 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i]; 6892 6893 } else { 6894 memset(rss_params, 0, sizeof(*rss_params)); 6895 } 6896 6897 /* Prepare and send the vport enable */ 6898 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6899 vport_update_params.vport_id = vport_id; 6900 vport_update_params.update_vport_active_tx_flg = 1; 6901 vport_update_params.vport_active_tx_flg = 1; 6902 vport_update_params.update_vport_active_rx_flg = 1; 6903 vport_update_params.vport_active_rx_flg = 1; 6904 vport_update_params.rss_params = rss_params; 6905 vport_update_params.update_inner_vlan_removal_flg = 1; 6906 vport_update_params.inner_vlan_removal_flg = 1; 6907 6908 if (hw_lro_enable) { 6909 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params)); 6910 6911 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6912 6913 tpa_params.update_tpa_en_flg = 1; 6914 tpa_params.tpa_ipv4_en_flg = 1; 6915 tpa_params.tpa_ipv6_en_flg = 1; 6916 6917 tpa_params.update_tpa_param_flg = 1; 6918 tpa_params.tpa_pkt_split_flg = 0; 6919 tpa_params.tpa_hdr_data_split_flg = 0; 6920 tpa_params.tpa_gro_consistent_flg = 0; 6921 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 6922 tpa_params.tpa_max_size = (uint16_t)(-1); 6923 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2; 6924 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2; 6925 6926 vport_update_params.sge_tpa_params = &tpa_params; 6927 } 6928 6929 rc = qlnx_update_vport(cdev, &vport_update_params); 6930 if (rc) { 6931 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc); 6932 return rc; 6933 } 6934 6935 return 0; 6936 } 6937 6938 static int 6939 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6940 struct qlnx_tx_queue *txq) 6941 { 6942 uint16_t hw_bd_cons; 6943 uint16_t ecore_cons_idx; 6944 6945 QL_DPRINT2(ha, "enter\n"); 6946 6947 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6948 6949 while (hw_bd_cons != 6950 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 6951 mtx_lock(&fp->tx_mtx); 6952 6953 (void)qlnx_tx_int(ha, fp, txq); 6954 6955 mtx_unlock(&fp->tx_mtx); 6956 6957 qlnx_mdelay(__func__, 2); 6958 6959 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6960 } 6961 6962 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index); 6963 6964 return 0; 6965 } 6966 6967 static int 6968 qlnx_stop_queues(qlnx_host_t *ha) 6969 { 6970 struct qlnx_update_vport_params vport_update_params; 6971 struct ecore_dev *cdev; 6972 struct qlnx_fastpath *fp; 6973 int rc, tc, i; 6974 6975 cdev = &ha->cdev; 6976 6977 /* Disable the vport */ 6978 6979 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6980 6981 vport_update_params.vport_id = 0; 6982 vport_update_params.update_vport_active_tx_flg = 1; 6983 vport_update_params.vport_active_tx_flg = 0; 6984 vport_update_params.update_vport_active_rx_flg = 1; 6985 vport_update_params.vport_active_rx_flg = 0; 6986 vport_update_params.rss_params = &ha->rss_params; 6987 vport_update_params.rss_params->update_rss_config = 0; 6988 vport_update_params.rss_params->rss_enable = 0; 6989 vport_update_params.update_inner_vlan_removal_flg = 0; 6990 vport_update_params.inner_vlan_removal_flg = 0; 6991 6992 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id); 6993 6994 rc = qlnx_update_vport(cdev, &vport_update_params); 6995 if (rc) { 6996 QL_DPRINT1(ha, "Failed to update vport\n"); 6997 return rc; 6998 } 6999 7000 /* Flush Tx queues. If needed, request drain from MCP */ 7001 for_each_rss(i) { 7002 fp = &ha->fp_array[i]; 7003 7004 for (tc = 0; tc < ha->num_tc; tc++) { 7005 struct qlnx_tx_queue *txq = fp->txq[tc]; 7006 7007 rc = qlnx_drain_txq(ha, fp, txq); 7008 if (rc) 7009 return rc; 7010 } 7011 } 7012 7013 /* Stop all Queues in reverse order*/ 7014 for (i = ha->num_rss - 1; i >= 0; i--) { 7015 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)]; 7016 7017 fp = &ha->fp_array[i]; 7018 7019 /* Stop the Tx Queue(s)*/ 7020 for (tc = 0; tc < ha->num_tc; tc++) { 7021 int tx_queue_id; 7022 7023 tx_queue_id = tc * ha->num_rss + i; 7024 rc = ecore_eth_tx_queue_stop(p_hwfn, 7025 fp->txq[tc]->handle); 7026 7027 if (rc) { 7028 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n", 7029 tx_queue_id); 7030 return rc; 7031 } 7032 } 7033 7034 /* Stop the Rx Queue*/ 7035 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false, 7036 false); 7037 if (rc) { 7038 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i); 7039 return rc; 7040 } 7041 } 7042 7043 /* Stop the vport */ 7044 for_each_hwfn(cdev, i) { 7045 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 7046 7047 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0); 7048 7049 if (rc) { 7050 QL_DPRINT1(ha, "Failed to stop VPORT\n"); 7051 return rc; 7052 } 7053 } 7054 7055 return rc; 7056 } 7057 7058 static int 7059 qlnx_set_ucast_rx_mac(qlnx_host_t *ha, 7060 enum ecore_filter_opcode opcode, 7061 unsigned char mac[ETH_ALEN]) 7062 { 7063 struct ecore_filter_ucast ucast; 7064 struct ecore_dev *cdev; 7065 int rc; 7066 7067 cdev = &ha->cdev; 7068 7069 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 7070 7071 ucast.opcode = opcode; 7072 ucast.type = ECORE_FILTER_MAC; 7073 ucast.is_rx_filter = 1; 7074 ucast.vport_to_add_to = 0; 7075 memcpy(&ucast.mac[0], mac, ETH_ALEN); 7076 7077 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 7078 7079 return (rc); 7080 } 7081 7082 static int 7083 qlnx_remove_all_ucast_mac(qlnx_host_t *ha) 7084 { 7085 struct ecore_filter_ucast ucast; 7086 struct ecore_dev *cdev; 7087 int rc; 7088 7089 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 7090 7091 ucast.opcode = ECORE_FILTER_REPLACE; 7092 ucast.type = ECORE_FILTER_MAC; 7093 ucast.is_rx_filter = 1; 7094 7095 cdev = &ha->cdev; 7096 7097 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 7098 7099 return (rc); 7100 } 7101 7102 static int 7103 qlnx_remove_all_mcast_mac(qlnx_host_t *ha) 7104 { 7105 struct ecore_filter_mcast *mcast; 7106 struct ecore_dev *cdev; 7107 int rc, i; 7108 7109 cdev = &ha->cdev; 7110 7111 mcast = &ha->ecore_mcast; 7112 bzero(mcast, sizeof(struct ecore_filter_mcast)); 7113 7114 mcast->opcode = ECORE_FILTER_REMOVE; 7115 7116 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 7117 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] || 7118 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] || 7119 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) { 7120 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN); 7121 mcast->num_mc_addrs++; 7122 } 7123 } 7124 mcast = &ha->ecore_mcast; 7125 7126 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 7127 7128 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS)); 7129 ha->nmcast = 0; 7130 7131 return (rc); 7132 } 7133 7134 static int 7135 qlnx_clean_filters(qlnx_host_t *ha) 7136 { 7137 int rc = 0; 7138 7139 /* Remove all unicast macs */ 7140 rc = qlnx_remove_all_ucast_mac(ha); 7141 if (rc) 7142 return rc; 7143 7144 /* Remove all multicast macs */ 7145 rc = qlnx_remove_all_mcast_mac(ha); 7146 if (rc) 7147 return rc; 7148 7149 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac); 7150 7151 return (rc); 7152 } 7153 7154 static int 7155 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter) 7156 { 7157 struct ecore_filter_accept_flags accept; 7158 int rc = 0; 7159 struct ecore_dev *cdev; 7160 7161 cdev = &ha->cdev; 7162 7163 bzero(&accept, sizeof(struct ecore_filter_accept_flags)); 7164 7165 accept.update_rx_mode_config = 1; 7166 accept.rx_accept_filter = filter; 7167 7168 accept.update_tx_mode_config = 1; 7169 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 7170 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST; 7171 7172 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false, 7173 ECORE_SPQ_MODE_CB, NULL); 7174 7175 return (rc); 7176 } 7177 7178 static int 7179 qlnx_set_rx_mode(qlnx_host_t *ha) 7180 { 7181 int rc = 0; 7182 uint8_t filter; 7183 7184 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac); 7185 if (rc) 7186 return rc; 7187 7188 rc = qlnx_remove_all_mcast_mac(ha); 7189 if (rc) 7190 return rc; 7191 7192 filter = ECORE_ACCEPT_UCAST_MATCHED | 7193 ECORE_ACCEPT_MCAST_MATCHED | 7194 ECORE_ACCEPT_BCAST; 7195 7196 if (qlnx_vf_device(ha) == 0) { 7197 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 7198 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 7199 } 7200 ha->filter = filter; 7201 7202 rc = qlnx_set_rx_accept_filter(ha, filter); 7203 7204 return (rc); 7205 } 7206 7207 static int 7208 qlnx_set_link(qlnx_host_t *ha, bool link_up) 7209 { 7210 int i, rc = 0; 7211 struct ecore_dev *cdev; 7212 struct ecore_hwfn *hwfn; 7213 struct ecore_ptt *ptt; 7214 7215 if (qlnx_vf_device(ha) == 0) 7216 return (0); 7217 7218 cdev = &ha->cdev; 7219 7220 for_each_hwfn(cdev, i) { 7221 hwfn = &cdev->hwfns[i]; 7222 7223 ptt = ecore_ptt_acquire(hwfn); 7224 if (!ptt) 7225 return -EBUSY; 7226 7227 rc = ecore_mcp_set_link(hwfn, ptt, link_up); 7228 7229 ecore_ptt_release(hwfn, ptt); 7230 7231 if (rc) 7232 return rc; 7233 } 7234 return (rc); 7235 } 7236 7237 #if __FreeBSD_version >= 1100000 7238 static uint64_t 7239 qlnx_get_counter(if_t ifp, ift_counter cnt) 7240 { 7241 qlnx_host_t *ha; 7242 uint64_t count; 7243 7244 ha = (qlnx_host_t *)if_getsoftc(ifp); 7245 7246 switch (cnt) { 7247 case IFCOUNTER_IPACKETS: 7248 count = ha->hw_stats.common.rx_ucast_pkts + 7249 ha->hw_stats.common.rx_mcast_pkts + 7250 ha->hw_stats.common.rx_bcast_pkts; 7251 break; 7252 7253 case IFCOUNTER_IERRORS: 7254 count = ha->hw_stats.common.rx_crc_errors + 7255 ha->hw_stats.common.rx_align_errors + 7256 ha->hw_stats.common.rx_oversize_packets + 7257 ha->hw_stats.common.rx_undersize_packets; 7258 break; 7259 7260 case IFCOUNTER_OPACKETS: 7261 count = ha->hw_stats.common.tx_ucast_pkts + 7262 ha->hw_stats.common.tx_mcast_pkts + 7263 ha->hw_stats.common.tx_bcast_pkts; 7264 break; 7265 7266 case IFCOUNTER_OERRORS: 7267 count = ha->hw_stats.common.tx_err_drop_pkts; 7268 break; 7269 7270 case IFCOUNTER_COLLISIONS: 7271 return (0); 7272 7273 case IFCOUNTER_IBYTES: 7274 count = ha->hw_stats.common.rx_ucast_bytes + 7275 ha->hw_stats.common.rx_mcast_bytes + 7276 ha->hw_stats.common.rx_bcast_bytes; 7277 break; 7278 7279 case IFCOUNTER_OBYTES: 7280 count = ha->hw_stats.common.tx_ucast_bytes + 7281 ha->hw_stats.common.tx_mcast_bytes + 7282 ha->hw_stats.common.tx_bcast_bytes; 7283 break; 7284 7285 case IFCOUNTER_IMCASTS: 7286 count = ha->hw_stats.common.rx_mcast_bytes; 7287 break; 7288 7289 case IFCOUNTER_OMCASTS: 7290 count = ha->hw_stats.common.tx_mcast_bytes; 7291 break; 7292 7293 case IFCOUNTER_IQDROPS: 7294 case IFCOUNTER_OQDROPS: 7295 case IFCOUNTER_NOPROTO: 7296 7297 default: 7298 return (if_get_counter_default(ifp, cnt)); 7299 } 7300 return (count); 7301 } 7302 #endif 7303 7304 static void 7305 qlnx_timer(void *arg) 7306 { 7307 qlnx_host_t *ha; 7308 7309 ha = (qlnx_host_t *)arg; 7310 7311 if (ha->error_recovery) { 7312 ha->error_recovery = 0; 7313 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 7314 return; 7315 } 7316 7317 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats); 7318 7319 if (ha->storm_stats_gather) 7320 qlnx_sample_storm_stats(ha); 7321 7322 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7323 7324 return; 7325 } 7326 7327 static int 7328 qlnx_load(qlnx_host_t *ha) 7329 { 7330 int i; 7331 int rc = 0; 7332 struct ecore_dev *cdev; 7333 device_t dev; 7334 7335 cdev = &ha->cdev; 7336 dev = ha->pci_dev; 7337 7338 QL_DPRINT2(ha, "enter\n"); 7339 7340 rc = qlnx_alloc_mem_arrays(ha); 7341 if (rc) 7342 goto qlnx_load_exit0; 7343 7344 qlnx_init_fp(ha); 7345 7346 rc = qlnx_alloc_mem_load(ha); 7347 if (rc) 7348 goto qlnx_load_exit1; 7349 7350 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n", 7351 ha->num_rss, ha->num_tc); 7352 7353 for (i = 0; i < ha->num_rss; i++) { 7354 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq, 7355 (INTR_TYPE_NET | INTR_MPSAFE), 7356 NULL, qlnx_fp_isr, &ha->irq_vec[i], 7357 &ha->irq_vec[i].handle))) { 7358 QL_DPRINT1(ha, "could not setup interrupt\n"); 7359 goto qlnx_load_exit2; 7360 } 7361 7362 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \ 7363 irq %p handle %p\n", i, 7364 ha->irq_vec[i].irq_rid, 7365 ha->irq_vec[i].irq, ha->irq_vec[i].handle); 7366 7367 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus)); 7368 } 7369 7370 rc = qlnx_start_queues(ha); 7371 if (rc) 7372 goto qlnx_load_exit2; 7373 7374 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n"); 7375 7376 /* Add primary mac and set Rx filters */ 7377 rc = qlnx_set_rx_mode(ha); 7378 if (rc) 7379 goto qlnx_load_exit2; 7380 7381 /* Ask for link-up using current configuration */ 7382 qlnx_set_link(ha, true); 7383 7384 if (qlnx_vf_device(ha) == 0) 7385 qlnx_link_update(&ha->cdev.hwfns[0]); 7386 7387 ha->state = QLNX_STATE_OPEN; 7388 7389 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats)); 7390 7391 if (ha->flags.callout_init) 7392 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7393 7394 goto qlnx_load_exit0; 7395 7396 qlnx_load_exit2: 7397 qlnx_free_mem_load(ha); 7398 7399 qlnx_load_exit1: 7400 ha->num_rss = 0; 7401 7402 qlnx_load_exit0: 7403 QL_DPRINT2(ha, "exit [%d]\n", rc); 7404 return rc; 7405 } 7406 7407 static void 7408 qlnx_drain_soft_lro(qlnx_host_t *ha) 7409 { 7410 #ifdef QLNX_SOFT_LRO 7411 7412 struct ifnet *ifp; 7413 int i; 7414 7415 ifp = ha->ifp; 7416 7417 if (ifp->if_capenable & IFCAP_LRO) { 7418 for (i = 0; i < ha->num_rss; i++) { 7419 struct qlnx_fastpath *fp = &ha->fp_array[i]; 7420 struct lro_ctrl *lro; 7421 7422 lro = &fp->rxq->lro; 7423 7424 #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) 7425 7426 tcp_lro_flush_all(lro); 7427 7428 #else 7429 struct lro_entry *queued; 7430 7431 while ((!SLIST_EMPTY(&lro->lro_active))){ 7432 queued = SLIST_FIRST(&lro->lro_active); 7433 SLIST_REMOVE_HEAD(&lro->lro_active, next); 7434 tcp_lro_flush(lro, queued); 7435 } 7436 7437 #endif /* #if (__FreeBSD_version >= 1100101) || (defined QLNX_QSORT_LRO) */ 7438 } 7439 } 7440 7441 #endif /* #ifdef QLNX_SOFT_LRO */ 7442 7443 return; 7444 } 7445 7446 static void 7447 qlnx_unload(qlnx_host_t *ha) 7448 { 7449 struct ecore_dev *cdev; 7450 device_t dev; 7451 int i; 7452 7453 cdev = &ha->cdev; 7454 dev = ha->pci_dev; 7455 7456 QL_DPRINT2(ha, "enter\n"); 7457 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state); 7458 7459 if (ha->state == QLNX_STATE_OPEN) { 7460 qlnx_set_link(ha, false); 7461 qlnx_clean_filters(ha); 7462 qlnx_stop_queues(ha); 7463 ecore_hw_stop_fastpath(cdev); 7464 7465 for (i = 0; i < ha->num_rss; i++) { 7466 if (ha->irq_vec[i].handle) { 7467 (void)bus_teardown_intr(dev, 7468 ha->irq_vec[i].irq, 7469 ha->irq_vec[i].handle); 7470 ha->irq_vec[i].handle = NULL; 7471 } 7472 } 7473 7474 qlnx_drain_fp_taskqueues(ha); 7475 qlnx_drain_soft_lro(ha); 7476 qlnx_free_mem_load(ha); 7477 } 7478 7479 if (ha->flags.callout_init) 7480 callout_drain(&ha->qlnx_callout); 7481 7482 qlnx_mdelay(__func__, 1000); 7483 7484 ha->state = QLNX_STATE_CLOSED; 7485 7486 QL_DPRINT2(ha, "exit\n"); 7487 return; 7488 } 7489 7490 static int 7491 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7492 { 7493 int rval = -1; 7494 struct ecore_hwfn *p_hwfn; 7495 struct ecore_ptt *p_ptt; 7496 7497 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7498 7499 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7500 p_ptt = ecore_ptt_acquire(p_hwfn); 7501 7502 if (!p_ptt) { 7503 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7504 return (rval); 7505 } 7506 7507 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7508 7509 if (rval == DBG_STATUS_OK) 7510 rval = 0; 7511 else { 7512 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed" 7513 "[0x%x]\n", rval); 7514 } 7515 7516 ecore_ptt_release(p_hwfn, p_ptt); 7517 7518 return (rval); 7519 } 7520 7521 static int 7522 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7523 { 7524 int rval = -1; 7525 struct ecore_hwfn *p_hwfn; 7526 struct ecore_ptt *p_ptt; 7527 7528 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7529 7530 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7531 p_ptt = ecore_ptt_acquire(p_hwfn); 7532 7533 if (!p_ptt) { 7534 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7535 return (rval); 7536 } 7537 7538 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7539 7540 if (rval == DBG_STATUS_OK) 7541 rval = 0; 7542 else { 7543 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed" 7544 " [0x%x]\n", rval); 7545 } 7546 7547 ecore_ptt_release(p_hwfn, p_ptt); 7548 7549 return (rval); 7550 } 7551 7552 static void 7553 qlnx_sample_storm_stats(qlnx_host_t *ha) 7554 { 7555 int i, index; 7556 struct ecore_dev *cdev; 7557 qlnx_storm_stats_t *s_stats; 7558 uint32_t reg; 7559 struct ecore_ptt *p_ptt; 7560 struct ecore_hwfn *hwfn; 7561 7562 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) { 7563 ha->storm_stats_gather = 0; 7564 return; 7565 } 7566 7567 cdev = &ha->cdev; 7568 7569 for_each_hwfn(cdev, i) { 7570 hwfn = &cdev->hwfns[i]; 7571 7572 p_ptt = ecore_ptt_acquire(hwfn); 7573 if (!p_ptt) 7574 return; 7575 7576 index = ha->storm_stats_index + 7577 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN); 7578 7579 s_stats = &ha->storm_stats[index]; 7580 7581 /* XSTORM */ 7582 reg = XSEM_REG_FAST_MEMORY + 7583 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7584 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7585 7586 reg = XSEM_REG_FAST_MEMORY + 7587 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7588 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7589 7590 reg = XSEM_REG_FAST_MEMORY + 7591 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7592 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7593 7594 reg = XSEM_REG_FAST_MEMORY + 7595 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7596 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7597 7598 /* YSTORM */ 7599 reg = YSEM_REG_FAST_MEMORY + 7600 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7601 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7602 7603 reg = YSEM_REG_FAST_MEMORY + 7604 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7605 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7606 7607 reg = YSEM_REG_FAST_MEMORY + 7608 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7609 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7610 7611 reg = YSEM_REG_FAST_MEMORY + 7612 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7613 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7614 7615 /* PSTORM */ 7616 reg = PSEM_REG_FAST_MEMORY + 7617 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7618 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7619 7620 reg = PSEM_REG_FAST_MEMORY + 7621 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7622 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7623 7624 reg = PSEM_REG_FAST_MEMORY + 7625 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7626 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7627 7628 reg = PSEM_REG_FAST_MEMORY + 7629 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7630 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7631 7632 /* TSTORM */ 7633 reg = TSEM_REG_FAST_MEMORY + 7634 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7635 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7636 7637 reg = TSEM_REG_FAST_MEMORY + 7638 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7639 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7640 7641 reg = TSEM_REG_FAST_MEMORY + 7642 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7643 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7644 7645 reg = TSEM_REG_FAST_MEMORY + 7646 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7647 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7648 7649 /* MSTORM */ 7650 reg = MSEM_REG_FAST_MEMORY + 7651 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7652 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7653 7654 reg = MSEM_REG_FAST_MEMORY + 7655 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7656 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7657 7658 reg = MSEM_REG_FAST_MEMORY + 7659 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7660 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7661 7662 reg = MSEM_REG_FAST_MEMORY + 7663 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7664 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7665 7666 /* USTORM */ 7667 reg = USEM_REG_FAST_MEMORY + 7668 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7669 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7670 7671 reg = USEM_REG_FAST_MEMORY + 7672 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7673 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7674 7675 reg = USEM_REG_FAST_MEMORY + 7676 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7677 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7678 7679 reg = USEM_REG_FAST_MEMORY + 7680 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7681 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7682 7683 ecore_ptt_release(hwfn, p_ptt); 7684 } 7685 7686 ha->storm_stats_index++; 7687 7688 return; 7689 } 7690 7691 /* 7692 * Name: qlnx_dump_buf8 7693 * Function: dumps a buffer as bytes 7694 */ 7695 static void 7696 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len) 7697 { 7698 device_t dev; 7699 uint32_t i = 0; 7700 uint8_t *buf; 7701 7702 dev = ha->pci_dev; 7703 buf = dbuf; 7704 7705 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len); 7706 7707 while (len >= 16) { 7708 device_printf(dev,"0x%08x:" 7709 " %02x %02x %02x %02x %02x %02x %02x %02x" 7710 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7711 buf[0], buf[1], buf[2], buf[3], 7712 buf[4], buf[5], buf[6], buf[7], 7713 buf[8], buf[9], buf[10], buf[11], 7714 buf[12], buf[13], buf[14], buf[15]); 7715 i += 16; 7716 len -= 16; 7717 buf += 16; 7718 } 7719 switch (len) { 7720 case 1: 7721 device_printf(dev,"0x%08x: %02x\n", i, buf[0]); 7722 break; 7723 case 2: 7724 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]); 7725 break; 7726 case 3: 7727 device_printf(dev,"0x%08x: %02x %02x %02x\n", 7728 i, buf[0], buf[1], buf[2]); 7729 break; 7730 case 4: 7731 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i, 7732 buf[0], buf[1], buf[2], buf[3]); 7733 break; 7734 case 5: 7735 device_printf(dev,"0x%08x:" 7736 " %02x %02x %02x %02x %02x\n", i, 7737 buf[0], buf[1], buf[2], buf[3], buf[4]); 7738 break; 7739 case 6: 7740 device_printf(dev,"0x%08x:" 7741 " %02x %02x %02x %02x %02x %02x\n", i, 7742 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); 7743 break; 7744 case 7: 7745 device_printf(dev,"0x%08x:" 7746 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7747 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); 7748 break; 7749 case 8: 7750 device_printf(dev,"0x%08x:" 7751 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7752 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7753 buf[7]); 7754 break; 7755 case 9: 7756 device_printf(dev,"0x%08x:" 7757 " %02x %02x %02x %02x %02x %02x %02x %02x" 7758 " %02x\n", i, 7759 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7760 buf[7], buf[8]); 7761 break; 7762 case 10: 7763 device_printf(dev,"0x%08x:" 7764 " %02x %02x %02x %02x %02x %02x %02x %02x" 7765 " %02x %02x\n", i, 7766 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7767 buf[7], buf[8], buf[9]); 7768 break; 7769 case 11: 7770 device_printf(dev,"0x%08x:" 7771 " %02x %02x %02x %02x %02x %02x %02x %02x" 7772 " %02x %02x %02x\n", i, 7773 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7774 buf[7], buf[8], buf[9], buf[10]); 7775 break; 7776 case 12: 7777 device_printf(dev,"0x%08x:" 7778 " %02x %02x %02x %02x %02x %02x %02x %02x" 7779 " %02x %02x %02x %02x\n", i, 7780 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7781 buf[7], buf[8], buf[9], buf[10], buf[11]); 7782 break; 7783 case 13: 7784 device_printf(dev,"0x%08x:" 7785 " %02x %02x %02x %02x %02x %02x %02x %02x" 7786 " %02x %02x %02x %02x %02x\n", i, 7787 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7788 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]); 7789 break; 7790 case 14: 7791 device_printf(dev,"0x%08x:" 7792 " %02x %02x %02x %02x %02x %02x %02x %02x" 7793 " %02x %02x %02x %02x %02x %02x\n", i, 7794 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7795 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7796 buf[13]); 7797 break; 7798 case 15: 7799 device_printf(dev,"0x%08x:" 7800 " %02x %02x %02x %02x %02x %02x %02x %02x" 7801 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7802 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7803 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7804 buf[13], buf[14]); 7805 break; 7806 default: 7807 break; 7808 } 7809 7810 device_printf(dev, "%s: %s dump end\n", __func__, msg); 7811 7812 return; 7813 } 7814 7815 #ifdef CONFIG_ECORE_SRIOV 7816 7817 static void 7818 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id) 7819 { 7820 struct ecore_public_vf_info *vf_info; 7821 7822 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false); 7823 7824 if (!vf_info) 7825 return; 7826 7827 /* Clear the VF mac */ 7828 memset(vf_info->forced_mac, 0, ETH_ALEN); 7829 7830 vf_info->forced_vlan = 0; 7831 7832 return; 7833 } 7834 7835 void 7836 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id) 7837 { 7838 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id); 7839 return; 7840 } 7841 7842 static int 7843 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid, 7844 struct ecore_filter_ucast *params) 7845 { 7846 struct ecore_public_vf_info *vf; 7847 7848 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) { 7849 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev), 7850 "VF[%d] vport not initialized\n", vfid); 7851 return ECORE_INVAL; 7852 } 7853 7854 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true); 7855 if (!vf) 7856 return -EINVAL; 7857 7858 /* No real decision to make; Store the configured MAC */ 7859 if (params->type == ECORE_FILTER_MAC || 7860 params->type == ECORE_FILTER_MAC_VLAN) 7861 memcpy(params->mac, vf->forced_mac, ETH_ALEN); 7862 7863 return 0; 7864 } 7865 7866 int 7867 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params) 7868 { 7869 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params)); 7870 } 7871 7872 static int 7873 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid, 7874 struct ecore_sp_vport_update_params *params, uint16_t * tlvs) 7875 { 7876 uint8_t mask; 7877 struct ecore_filter_accept_flags *flags; 7878 7879 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) { 7880 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev), 7881 "VF[%d] vport not initialized\n", vfid); 7882 return ECORE_INVAL; 7883 } 7884 7885 /* Untrusted VFs can't even be trusted to know that fact. 7886 * Simply indicate everything is configured fine, and trace 7887 * configuration 'behind their back'. 7888 */ 7889 mask = ECORE_ACCEPT_UCAST_UNMATCHED | ECORE_ACCEPT_MCAST_UNMATCHED; 7890 flags = ¶ms->accept_flags; 7891 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM))) 7892 return 0; 7893 7894 return 0; 7895 7896 } 7897 int 7898 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs) 7899 { 7900 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs)); 7901 } 7902 7903 static int 7904 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn) 7905 { 7906 int i; 7907 struct ecore_dev *cdev; 7908 7909 cdev = p_hwfn->p_dev; 7910 7911 for (i = 0; i < cdev->num_hwfns; i++) { 7912 if (&cdev->hwfns[i] == p_hwfn) 7913 break; 7914 } 7915 7916 if (i >= cdev->num_hwfns) 7917 return (-1); 7918 7919 return (i); 7920 } 7921 7922 static int 7923 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id) 7924 { 7925 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 7926 int i; 7927 7928 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n", 7929 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id); 7930 7931 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 7932 return (-1); 7933 7934 if (ha->sriov_task[i].pf_taskqueue != NULL) { 7935 atomic_testandset_32(&ha->sriov_task[i].flags, 7936 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG); 7937 7938 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 7939 &ha->sriov_task[i].pf_task); 7940 } 7941 7942 return (ECORE_SUCCESS); 7943 } 7944 7945 int 7946 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id) 7947 { 7948 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id)); 7949 } 7950 7951 static void 7952 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn) 7953 { 7954 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 7955 int i; 7956 7957 if (!ha->sriov_initialized) 7958 return; 7959 7960 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 7961 ha, p_hwfn->p_dev, p_hwfn); 7962 7963 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 7964 return; 7965 7966 if (ha->sriov_task[i].pf_taskqueue != NULL) { 7967 atomic_testandset_32(&ha->sriov_task[i].flags, 7968 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE); 7969 7970 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 7971 &ha->sriov_task[i].pf_task); 7972 } 7973 7974 return; 7975 } 7976 7977 void 7978 qlnx_vf_flr_update(void *p_hwfn) 7979 { 7980 __qlnx_vf_flr_update(p_hwfn); 7981 7982 return; 7983 } 7984 7985 #ifndef QLNX_VF 7986 7987 static void 7988 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn) 7989 { 7990 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 7991 int i; 7992 7993 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 7994 ha, p_hwfn->p_dev, p_hwfn); 7995 7996 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 7997 return; 7998 7999 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n", 8000 ha, p_hwfn->p_dev, p_hwfn, i); 8001 8002 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8003 atomic_testandset_32(&ha->sriov_task[i].flags, 8004 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE); 8005 8006 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 8007 &ha->sriov_task[i].pf_task); 8008 } 8009 } 8010 8011 static void 8012 qlnx_initialize_sriov(qlnx_host_t *ha) 8013 { 8014 device_t dev; 8015 nvlist_t *pf_schema, *vf_schema; 8016 int iov_error; 8017 8018 dev = ha->pci_dev; 8019 8020 pf_schema = pci_iov_schema_alloc_node(); 8021 vf_schema = pci_iov_schema_alloc_node(); 8022 8023 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 8024 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 8025 IOV_SCHEMA_HASDEFAULT, FALSE); 8026 pci_iov_schema_add_bool(vf_schema, "allow-promisc", 8027 IOV_SCHEMA_HASDEFAULT, FALSE); 8028 pci_iov_schema_add_uint16(vf_schema, "num-queues", 8029 IOV_SCHEMA_HASDEFAULT, 1); 8030 8031 iov_error = pci_iov_attach(dev, pf_schema, vf_schema); 8032 8033 if (iov_error != 0) { 8034 ha->sriov_initialized = 0; 8035 } else { 8036 device_printf(dev, "SRIOV initialized\n"); 8037 ha->sriov_initialized = 1; 8038 } 8039 8040 return; 8041 } 8042 8043 static void 8044 qlnx_sriov_disable(qlnx_host_t *ha) 8045 { 8046 struct ecore_dev *cdev; 8047 int i, j; 8048 8049 cdev = &ha->cdev; 8050 8051 ecore_iov_set_vfs_to_disable(cdev, true); 8052 8053 for_each_hwfn(cdev, i) { 8054 struct ecore_hwfn *hwfn = &cdev->hwfns[i]; 8055 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 8056 8057 if (!ptt) { 8058 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 8059 return; 8060 } 8061 /* Clean WFQ db and configure equal weight for all vports */ 8062 ecore_clean_wfq_db(hwfn, ptt); 8063 8064 ecore_for_each_vf(hwfn, j) { 8065 int k = 0; 8066 8067 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false)) 8068 continue; 8069 8070 if (ecore_iov_is_vf_started(hwfn, j)) { 8071 /* Wait until VF is disabled before releasing */ 8072 8073 for (k = 0; k < 100; k++) { 8074 if (!ecore_iov_is_vf_stopped(hwfn, j)) { 8075 qlnx_mdelay(__func__, 10); 8076 } else 8077 break; 8078 } 8079 } 8080 8081 if (k < 100) 8082 ecore_iov_release_hw_for_vf(&cdev->hwfns[i], 8083 ptt, j); 8084 else { 8085 QL_DPRINT1(ha, 8086 "Timeout waiting for VF's FLR to end\n"); 8087 } 8088 } 8089 ecore_ptt_release(hwfn, ptt); 8090 } 8091 8092 ecore_iov_set_vfs_to_disable(cdev, false); 8093 8094 return; 8095 } 8096 8097 static void 8098 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid, 8099 struct ecore_iov_vf_init_params *params) 8100 { 8101 u16 base, i; 8102 8103 /* Since we have an equal resource distribution per-VF, and we assume 8104 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting 8105 * sequentially from there. 8106 */ 8107 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues; 8108 8109 params->rel_vf_id = vfid; 8110 8111 for (i = 0; i < params->num_queues; i++) { 8112 params->req_rx_queue[i] = base + i; 8113 params->req_tx_queue[i] = base + i; 8114 } 8115 8116 /* PF uses indices 0 for itself; Set vport/RSS afterwards */ 8117 params->vport_id = vfid + 1; 8118 params->rss_eng_id = vfid + 1; 8119 8120 return; 8121 } 8122 8123 static int 8124 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params) 8125 { 8126 qlnx_host_t *ha; 8127 struct ecore_dev *cdev; 8128 struct ecore_iov_vf_init_params params; 8129 int ret, j, i; 8130 uint32_t max_vfs; 8131 8132 if ((ha = device_get_softc(dev)) == NULL) { 8133 device_printf(dev, "%s: cannot get softc\n", __func__); 8134 return (-1); 8135 } 8136 8137 if (qlnx_create_pf_taskqueues(ha) != 0) 8138 goto qlnx_iov_init_err0; 8139 8140 cdev = &ha->cdev; 8141 8142 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT); 8143 8144 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n", 8145 dev, num_vfs, max_vfs); 8146 8147 if (num_vfs >= max_vfs) { 8148 QL_DPRINT1(ha, "Can start at most %d VFs\n", 8149 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1)); 8150 goto qlnx_iov_init_err0; 8151 } 8152 8153 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF, 8154 M_NOWAIT); 8155 8156 if (ha->vf_attr == NULL) 8157 goto qlnx_iov_init_err0; 8158 8159 memset(¶ms, 0, sizeof(params)); 8160 8161 /* Initialize HW for VF access */ 8162 for_each_hwfn(cdev, j) { 8163 struct ecore_hwfn *hwfn = &cdev->hwfns[j]; 8164 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 8165 8166 /* Make sure not to use more than 16 queues per VF */ 8167 params.num_queues = min_t(int, 8168 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs), 8169 16); 8170 8171 if (!ptt) { 8172 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 8173 goto qlnx_iov_init_err1; 8174 } 8175 8176 for (i = 0; i < num_vfs; i++) { 8177 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true)) 8178 continue; 8179 8180 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms); 8181 8182 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms); 8183 8184 if (ret) { 8185 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i); 8186 ecore_ptt_release(hwfn, ptt); 8187 goto qlnx_iov_init_err1; 8188 } 8189 } 8190 8191 ecore_ptt_release(hwfn, ptt); 8192 } 8193 8194 ha->num_vfs = num_vfs; 8195 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha); 8196 8197 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs); 8198 8199 return (0); 8200 8201 qlnx_iov_init_err1: 8202 qlnx_sriov_disable(ha); 8203 8204 qlnx_iov_init_err0: 8205 qlnx_destroy_pf_taskqueues(ha); 8206 ha->num_vfs = 0; 8207 8208 return (-1); 8209 } 8210 8211 static void 8212 qlnx_iov_uninit(device_t dev) 8213 { 8214 qlnx_host_t *ha; 8215 8216 if ((ha = device_get_softc(dev)) == NULL) { 8217 device_printf(dev, "%s: cannot get softc\n", __func__); 8218 return; 8219 } 8220 8221 QL_DPRINT2(ha," dev = %p enter\n", dev); 8222 8223 qlnx_sriov_disable(ha); 8224 qlnx_destroy_pf_taskqueues(ha); 8225 8226 free(ha->vf_attr, M_QLNXBUF); 8227 ha->vf_attr = NULL; 8228 8229 ha->num_vfs = 0; 8230 8231 QL_DPRINT2(ha," dev = %p exit\n", dev); 8232 return; 8233 } 8234 8235 static int 8236 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 8237 { 8238 qlnx_host_t *ha; 8239 qlnx_vf_attr_t *vf_attr; 8240 unsigned const char *mac; 8241 size_t size; 8242 struct ecore_hwfn *p_hwfn; 8243 8244 if ((ha = device_get_softc(dev)) == NULL) { 8245 device_printf(dev, "%s: cannot get softc\n", __func__); 8246 return (-1); 8247 } 8248 8249 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum); 8250 8251 if (vfnum > (ha->num_vfs - 1)) { 8252 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n", 8253 vfnum, (ha->num_vfs - 1)); 8254 } 8255 8256 vf_attr = &ha->vf_attr[vfnum]; 8257 8258 if (nvlist_exists_binary(params, "mac-addr")) { 8259 mac = nvlist_get_binary(params, "mac-addr", &size); 8260 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN); 8261 device_printf(dev, 8262 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", 8263 __func__, vf_attr->mac_addr[0], 8264 vf_attr->mac_addr[1], vf_attr->mac_addr[2], 8265 vf_attr->mac_addr[3], vf_attr->mac_addr[4], 8266 vf_attr->mac_addr[5]); 8267 p_hwfn = &ha->cdev.hwfns[0]; 8268 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr, 8269 vfnum); 8270 } 8271 8272 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum); 8273 return (0); 8274 } 8275 8276 static void 8277 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8278 { 8279 uint64_t events[ECORE_VF_ARRAY_LENGTH]; 8280 struct ecore_ptt *ptt; 8281 int i; 8282 8283 ptt = ecore_ptt_acquire(p_hwfn); 8284 if (!ptt) { 8285 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8286 __qlnx_pf_vf_msg(p_hwfn, 0); 8287 return; 8288 } 8289 8290 ecore_iov_pf_get_pending_events(p_hwfn, events); 8291 8292 QL_DPRINT2(ha, "Event mask of VF events:" 8293 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n", 8294 events[0], events[1], events[2]); 8295 8296 ecore_for_each_vf(p_hwfn, i) { 8297 /* Skip VFs with no pending messages */ 8298 if (!(events[i / 64] & (1ULL << (i % 64)))) 8299 continue; 8300 8301 QL_DPRINT2(ha, 8302 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", 8303 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i); 8304 8305 /* Copy VF's message to PF's request buffer for that VF */ 8306 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i)) 8307 continue; 8308 8309 ecore_iov_process_mbx_req(p_hwfn, ptt, i); 8310 } 8311 8312 ecore_ptt_release(p_hwfn, ptt); 8313 8314 return; 8315 } 8316 8317 static void 8318 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8319 { 8320 struct ecore_ptt *ptt; 8321 int ret; 8322 8323 ptt = ecore_ptt_acquire(p_hwfn); 8324 8325 if (!ptt) { 8326 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8327 __qlnx_vf_flr_update(p_hwfn); 8328 return; 8329 } 8330 8331 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt); 8332 8333 if (ret) { 8334 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n"); 8335 } 8336 8337 ecore_ptt_release(p_hwfn, ptt); 8338 8339 return; 8340 } 8341 8342 static void 8343 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8344 { 8345 struct ecore_ptt *ptt; 8346 int i; 8347 8348 ptt = ecore_ptt_acquire(p_hwfn); 8349 8350 if (!ptt) { 8351 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8352 qlnx_vf_bulleting_update(p_hwfn); 8353 return; 8354 } 8355 8356 ecore_for_each_vf(p_hwfn, i) { 8357 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n", 8358 p_hwfn, i); 8359 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt); 8360 } 8361 8362 ecore_ptt_release(p_hwfn, ptt); 8363 8364 return; 8365 } 8366 8367 static void 8368 qlnx_pf_taskqueue(void *context, int pending) 8369 { 8370 struct ecore_hwfn *p_hwfn; 8371 qlnx_host_t *ha; 8372 int i; 8373 8374 p_hwfn = context; 8375 8376 if (p_hwfn == NULL) 8377 return; 8378 8379 ha = (qlnx_host_t *)(p_hwfn->p_dev); 8380 8381 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8382 return; 8383 8384 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8385 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG)) 8386 qlnx_handle_vf_msg(ha, p_hwfn); 8387 8388 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8389 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE)) 8390 qlnx_handle_vf_flr_update(ha, p_hwfn); 8391 8392 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8393 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE)) 8394 qlnx_handle_bulletin_update(ha, p_hwfn); 8395 8396 return; 8397 } 8398 8399 static int 8400 qlnx_create_pf_taskqueues(qlnx_host_t *ha) 8401 { 8402 int i; 8403 uint8_t tq_name[32]; 8404 8405 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8406 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 8407 8408 bzero(tq_name, sizeof (tq_name)); 8409 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i); 8410 8411 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn); 8412 8413 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 8414 taskqueue_thread_enqueue, 8415 &ha->sriov_task[i].pf_taskqueue); 8416 8417 if (ha->sriov_task[i].pf_taskqueue == NULL) 8418 return (-1); 8419 8420 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1, 8421 PI_NET, "%s", tq_name); 8422 8423 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue); 8424 } 8425 8426 return (0); 8427 } 8428 8429 static void 8430 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha) 8431 { 8432 int i; 8433 8434 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8435 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8436 taskqueue_drain(ha->sriov_task[i].pf_taskqueue, 8437 &ha->sriov_task[i].pf_task); 8438 taskqueue_free(ha->sriov_task[i].pf_taskqueue); 8439 ha->sriov_task[i].pf_taskqueue = NULL; 8440 } 8441 } 8442 return; 8443 } 8444 8445 static void 8446 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha) 8447 { 8448 struct ecore_mcp_link_capabilities caps; 8449 struct ecore_mcp_link_params params; 8450 struct ecore_mcp_link_state link; 8451 int i; 8452 8453 if (!p_hwfn->pf_iov_info) 8454 return; 8455 8456 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params)); 8457 memset(&link, 0, sizeof(struct ecore_mcp_link_state)); 8458 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities)); 8459 8460 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 8461 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link)); 8462 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params)); 8463 8464 QL_DPRINT2(ha, "called\n"); 8465 8466 /* Update bulletin of all future possible VFs with link configuration */ 8467 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) { 8468 /* Modify link according to the VF's configured link state */ 8469 8470 link.link_up = false; 8471 8472 if (ha->link_up) { 8473 link.link_up = true; 8474 /* Set speed according to maximum supported by HW. 8475 * that is 40G for regular devices and 100G for CMT 8476 * mode devices. 8477 */ 8478 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ? 8479 100000 : link.speed; 8480 } 8481 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up); 8482 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps); 8483 } 8484 8485 qlnx_vf_bulleting_update(p_hwfn); 8486 8487 return; 8488 } 8489 #endif /* #ifndef QLNX_VF */ 8490 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 8491