1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File: qlnx_os.c 30 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "qlnx_os.h" 37 #include "bcm_osal.h" 38 #include "reg_addr.h" 39 #include "ecore_gtt_reg_addr.h" 40 #include "ecore.h" 41 #include "ecore_chain.h" 42 #include "ecore_status.h" 43 #include "ecore_hw.h" 44 #include "ecore_rt_defs.h" 45 #include "ecore_init_ops.h" 46 #include "ecore_int.h" 47 #include "ecore_cxt.h" 48 #include "ecore_spq.h" 49 #include "ecore_init_fw_funcs.h" 50 #include "ecore_sp_commands.h" 51 #include "ecore_dev_api.h" 52 #include "ecore_l2_api.h" 53 #include "ecore_mcp.h" 54 #include "ecore_hw_defs.h" 55 #include "mcp_public.h" 56 #include "ecore_iro.h" 57 #include "nvm_cfg.h" 58 #include "ecore_dev_api.h" 59 #include "ecore_dbg_fw_funcs.h" 60 #include "ecore_iov_api.h" 61 #include "ecore_vf_api.h" 62 63 #include "qlnx_ioctl.h" 64 #include "qlnx_def.h" 65 #include "qlnx_ver.h" 66 67 #ifdef QLNX_ENABLE_IWARP 68 #include "qlnx_rdma.h" 69 #endif /* #ifdef QLNX_ENABLE_IWARP */ 70 71 #include <sys/smp.h> 72 73 /* 74 * static functions 75 */ 76 /* 77 * ioctl related functions 78 */ 79 static void qlnx_add_sysctls(qlnx_host_t *ha); 80 81 /* 82 * main driver 83 */ 84 static void qlnx_release(qlnx_host_t *ha); 85 static void qlnx_fp_isr(void *arg); 86 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha); 87 static void qlnx_init(void *arg); 88 static void qlnx_init_locked(qlnx_host_t *ha); 89 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi); 90 static int qlnx_set_promisc(qlnx_host_t *ha); 91 static int qlnx_set_allmulti(qlnx_host_t *ha); 92 static int qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); 93 static int qlnx_media_change(struct ifnet *ifp); 94 static void qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); 95 static void qlnx_stop(qlnx_host_t *ha); 96 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, 97 struct mbuf **m_headp); 98 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha); 99 static uint32_t qlnx_get_optics(qlnx_host_t *ha, 100 struct qlnx_link_output *if_link); 101 static int qlnx_transmit(struct ifnet *ifp, struct mbuf *mp); 102 static int qlnx_transmit_locked(struct ifnet *ifp, struct qlnx_fastpath *fp, 103 struct mbuf *mp); 104 static void qlnx_qflush(struct ifnet *ifp); 105 106 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha); 107 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha); 108 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha); 109 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha); 110 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha); 111 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha); 112 113 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver); 114 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size); 115 116 static int qlnx_nic_setup(struct ecore_dev *cdev, 117 struct ecore_pf_params *func_params); 118 static int qlnx_nic_start(struct ecore_dev *cdev); 119 static int qlnx_slowpath_start(qlnx_host_t *ha); 120 static int qlnx_slowpath_stop(qlnx_host_t *ha); 121 static int qlnx_init_hw(qlnx_host_t *ha); 122 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 123 char ver_str[VER_SIZE]); 124 static void qlnx_unload(qlnx_host_t *ha); 125 static int qlnx_load(qlnx_host_t *ha); 126 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 127 uint32_t add_mac); 128 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, 129 uint32_t len); 130 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq); 131 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq); 132 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, 133 struct qlnx_rx_queue *rxq); 134 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter); 135 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, 136 int hwfn_index); 137 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, 138 int hwfn_index); 139 static void qlnx_timer(void *arg); 140 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 141 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 142 static void qlnx_trigger_dump(qlnx_host_t *ha); 143 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 144 struct qlnx_tx_queue *txq); 145 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 146 struct qlnx_tx_queue *txq); 147 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 148 int lro_enable); 149 static void qlnx_fp_taskqueue(void *context, int pending); 150 static void qlnx_sample_storm_stats(qlnx_host_t *ha); 151 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 152 struct qlnx_agg_info *tpa); 153 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa); 154 155 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt); 156 157 /* 158 * Hooks to the Operating Systems 159 */ 160 static int qlnx_pci_probe (device_t); 161 static int qlnx_pci_attach (device_t); 162 static int qlnx_pci_detach (device_t); 163 164 #ifndef QLNX_VF 165 166 #ifdef CONFIG_ECORE_SRIOV 167 168 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params); 169 static void qlnx_iov_uninit(device_t dev); 170 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params); 171 static void qlnx_initialize_sriov(qlnx_host_t *ha); 172 static void qlnx_pf_taskqueue(void *context, int pending); 173 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha); 174 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha); 175 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha); 176 177 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 178 179 static device_method_t qlnx_pci_methods[] = { 180 /* Device interface */ 181 DEVMETHOD(device_probe, qlnx_pci_probe), 182 DEVMETHOD(device_attach, qlnx_pci_attach), 183 DEVMETHOD(device_detach, qlnx_pci_detach), 184 185 #ifdef CONFIG_ECORE_SRIOV 186 DEVMETHOD(pci_iov_init, qlnx_iov_init), 187 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit), 188 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf), 189 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 190 { 0, 0 } 191 }; 192 193 static driver_t qlnx_pci_driver = { 194 "ql", qlnx_pci_methods, sizeof (qlnx_host_t), 195 }; 196 197 MODULE_VERSION(if_qlnxe,1); 198 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, 0, 0); 199 200 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1); 201 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1); 202 203 #else 204 205 static device_method_t qlnxv_pci_methods[] = { 206 /* Device interface */ 207 DEVMETHOD(device_probe, qlnx_pci_probe), 208 DEVMETHOD(device_attach, qlnx_pci_attach), 209 DEVMETHOD(device_detach, qlnx_pci_detach), 210 { 0, 0 } 211 }; 212 213 static driver_t qlnxv_pci_driver = { 214 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t), 215 }; 216 217 MODULE_VERSION(if_qlnxev,1); 218 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, 0, 0); 219 220 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1); 221 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1); 222 223 #endif /* #ifdef QLNX_VF */ 224 225 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver"); 226 227 char qlnx_dev_str[128]; 228 char qlnx_ver_str[VER_SIZE]; 229 char qlnx_name_str[NAME_SIZE]; 230 231 /* 232 * Some PCI Configuration Space Related Defines 233 */ 234 235 #ifndef PCI_VENDOR_QLOGIC 236 #define PCI_VENDOR_QLOGIC 0x1077 237 #endif 238 239 /* 40G Adapter QLE45xxx*/ 240 #ifndef QLOGIC_PCI_DEVICE_ID_1634 241 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634 242 #endif 243 244 /* 100G Adapter QLE45xxx*/ 245 #ifndef QLOGIC_PCI_DEVICE_ID_1644 246 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644 247 #endif 248 249 /* 25G Adapter QLE45xxx*/ 250 #ifndef QLOGIC_PCI_DEVICE_ID_1656 251 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656 252 #endif 253 254 /* 50G Adapter QLE45xxx*/ 255 #ifndef QLOGIC_PCI_DEVICE_ID_1654 256 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654 257 #endif 258 259 /* 10G/25G/40G Adapter QLE41xxx*/ 260 #ifndef QLOGIC_PCI_DEVICE_ID_8070 261 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070 262 #endif 263 264 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/ 265 #ifndef QLOGIC_PCI_DEVICE_ID_8090 266 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090 267 #endif 268 269 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 270 "qlnxe driver parameters"); 271 272 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */ 273 static int qlnxe_queue_count = QLNX_DEFAULT_RSS; 274 275 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 276 &qlnxe_queue_count, 0, "Multi-Queue queue count"); 277 278 /* 279 * Note on RDMA personality setting 280 * 281 * Read the personality configured in NVRAM 282 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and 283 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT 284 * use the personality in NVRAM. 285 286 * Otherwise use t the personality configured in sysctl. 287 * 288 */ 289 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */ 290 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */ 291 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */ 292 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */ 293 #define QLNX_PERSONALITY_BITS_PER_FUNC 4 294 #define QLNX_PERSONALIY_MASK 0xF 295 296 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/ 297 static uint64_t qlnxe_rdma_configuration = 0x22222222; 298 299 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN, 300 &qlnxe_rdma_configuration, 0, "RDMA Configuration"); 301 302 int 303 qlnx_vf_device(qlnx_host_t *ha) 304 { 305 uint16_t device_id; 306 307 device_id = ha->device_id; 308 309 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 310 return 0; 311 312 return -1; 313 } 314 315 static int 316 qlnx_valid_device(qlnx_host_t *ha) 317 { 318 uint16_t device_id; 319 320 device_id = ha->device_id; 321 322 #ifndef QLNX_VF 323 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 324 (device_id == QLOGIC_PCI_DEVICE_ID_1644) || 325 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 326 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 327 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 328 return 0; 329 #else 330 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 331 return 0; 332 333 #endif /* #ifndef QLNX_VF */ 334 return -1; 335 } 336 337 #ifdef QLNX_ENABLE_IWARP 338 static int 339 qlnx_rdma_supported(struct qlnx_host *ha) 340 { 341 uint16_t device_id; 342 343 device_id = pci_get_device(ha->pci_dev); 344 345 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 346 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 347 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 348 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 349 return (0); 350 351 return (-1); 352 } 353 #endif /* #ifdef QLNX_ENABLE_IWARP */ 354 355 /* 356 * Name: qlnx_pci_probe 357 * Function: Validate the PCI device to be a QLA80XX device 358 */ 359 static int 360 qlnx_pci_probe(device_t dev) 361 { 362 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d", 363 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); 364 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx"); 365 366 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) { 367 return (ENXIO); 368 } 369 370 switch (pci_get_device(dev)) { 371 #ifndef QLNX_VF 372 373 case QLOGIC_PCI_DEVICE_ID_1644: 374 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 375 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function", 376 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 377 QLNX_VERSION_BUILD); 378 device_set_desc_copy(dev, qlnx_dev_str); 379 380 break; 381 382 case QLOGIC_PCI_DEVICE_ID_1634: 383 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 384 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function", 385 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 386 QLNX_VERSION_BUILD); 387 device_set_desc_copy(dev, qlnx_dev_str); 388 389 break; 390 391 case QLOGIC_PCI_DEVICE_ID_1656: 392 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 393 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function", 394 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 395 QLNX_VERSION_BUILD); 396 device_set_desc_copy(dev, qlnx_dev_str); 397 398 break; 399 400 case QLOGIC_PCI_DEVICE_ID_1654: 401 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 402 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function", 403 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 404 QLNX_VERSION_BUILD); 405 device_set_desc_copy(dev, qlnx_dev_str); 406 407 break; 408 409 case QLOGIC_PCI_DEVICE_ID_8070: 410 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 411 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)" 412 " Adapter-Ethernet Function", 413 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 414 QLNX_VERSION_BUILD); 415 device_set_desc_copy(dev, qlnx_dev_str); 416 417 break; 418 419 #else 420 case QLOGIC_PCI_DEVICE_ID_8090: 421 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 422 "Qlogic SRIOV PCI CNA (AH) " 423 "Adapter-Ethernet Function", 424 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 425 QLNX_VERSION_BUILD); 426 device_set_desc_copy(dev, qlnx_dev_str); 427 428 break; 429 430 #endif /* #ifndef QLNX_VF */ 431 432 default: 433 return (ENXIO); 434 } 435 436 #ifdef QLNX_ENABLE_IWARP 437 qlnx_rdma_init(); 438 #endif /* #ifdef QLNX_ENABLE_IWARP */ 439 440 return (BUS_PROBE_DEFAULT); 441 } 442 443 static uint16_t 444 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 445 struct qlnx_tx_queue *txq) 446 { 447 u16 hw_bd_cons; 448 u16 ecore_cons_idx; 449 uint16_t diff; 450 451 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 452 453 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl); 454 if (hw_bd_cons < ecore_cons_idx) { 455 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 456 } else { 457 diff = hw_bd_cons - ecore_cons_idx; 458 } 459 return diff; 460 } 461 462 static void 463 qlnx_sp_intr(void *arg) 464 { 465 struct ecore_hwfn *p_hwfn; 466 qlnx_host_t *ha; 467 int i; 468 469 p_hwfn = arg; 470 471 if (p_hwfn == NULL) { 472 printf("%s: spurious slowpath intr\n", __func__); 473 return; 474 } 475 476 ha = (qlnx_host_t *)p_hwfn->p_dev; 477 478 QL_DPRINT2(ha, "enter\n"); 479 480 for (i = 0; i < ha->cdev.num_hwfns; i++) { 481 if (&ha->cdev.hwfns[i] == p_hwfn) { 482 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]); 483 break; 484 } 485 } 486 QL_DPRINT2(ha, "exit\n"); 487 488 return; 489 } 490 491 static void 492 qlnx_sp_taskqueue(void *context, int pending) 493 { 494 struct ecore_hwfn *p_hwfn; 495 496 p_hwfn = context; 497 498 if (p_hwfn != NULL) { 499 qlnx_sp_isr(p_hwfn); 500 } 501 return; 502 } 503 504 static int 505 qlnx_create_sp_taskqueues(qlnx_host_t *ha) 506 { 507 int i; 508 uint8_t tq_name[32]; 509 510 for (i = 0; i < ha->cdev.num_hwfns; i++) { 511 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 512 513 bzero(tq_name, sizeof (tq_name)); 514 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i); 515 516 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn); 517 518 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT, 519 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]); 520 521 if (ha->sp_taskqueue[i] == NULL) 522 return (-1); 523 524 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s", 525 tq_name); 526 527 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]); 528 } 529 530 return (0); 531 } 532 533 static void 534 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha) 535 { 536 int i; 537 538 for (i = 0; i < ha->cdev.num_hwfns; i++) { 539 if (ha->sp_taskqueue[i] != NULL) { 540 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]); 541 taskqueue_free(ha->sp_taskqueue[i]); 542 } 543 } 544 return; 545 } 546 547 static void 548 qlnx_fp_taskqueue(void *context, int pending) 549 { 550 struct qlnx_fastpath *fp; 551 qlnx_host_t *ha; 552 struct ifnet *ifp; 553 554 fp = context; 555 556 if (fp == NULL) 557 return; 558 559 ha = (qlnx_host_t *)fp->edev; 560 561 ifp = ha->ifp; 562 563 if(ifp->if_drv_flags & IFF_DRV_RUNNING) { 564 if (!drbr_empty(ifp, fp->tx_br)) { 565 if(mtx_trylock(&fp->tx_mtx)) { 566 #ifdef QLNX_TRACE_PERF_DATA 567 tx_pkts = fp->tx_pkts_transmitted; 568 tx_compl = fp->tx_pkts_completed; 569 #endif 570 571 qlnx_transmit_locked(ifp, fp, NULL); 572 573 #ifdef QLNX_TRACE_PERF_DATA 574 fp->tx_pkts_trans_fp += 575 (fp->tx_pkts_transmitted - tx_pkts); 576 fp->tx_pkts_compl_fp += 577 (fp->tx_pkts_completed - tx_compl); 578 #endif 579 mtx_unlock(&fp->tx_mtx); 580 } 581 } 582 } 583 584 QL_DPRINT2(ha, "exit \n"); 585 return; 586 } 587 588 static int 589 qlnx_create_fp_taskqueues(qlnx_host_t *ha) 590 { 591 int i; 592 uint8_t tq_name[32]; 593 struct qlnx_fastpath *fp; 594 595 for (i = 0; i < ha->num_rss; i++) { 596 fp = &ha->fp_array[i]; 597 598 bzero(tq_name, sizeof (tq_name)); 599 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 600 601 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp); 602 603 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 604 taskqueue_thread_enqueue, 605 &fp->fp_taskqueue); 606 607 if (fp->fp_taskqueue == NULL) 608 return (-1); 609 610 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 611 tq_name); 612 613 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue); 614 } 615 616 return (0); 617 } 618 619 static void 620 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha) 621 { 622 int i; 623 struct qlnx_fastpath *fp; 624 625 for (i = 0; i < ha->num_rss; i++) { 626 fp = &ha->fp_array[i]; 627 628 if (fp->fp_taskqueue != NULL) { 629 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 630 taskqueue_free(fp->fp_taskqueue); 631 fp->fp_taskqueue = NULL; 632 } 633 } 634 return; 635 } 636 637 static void 638 qlnx_drain_fp_taskqueues(qlnx_host_t *ha) 639 { 640 int i; 641 struct qlnx_fastpath *fp; 642 643 for (i = 0; i < ha->num_rss; i++) { 644 fp = &ha->fp_array[i]; 645 646 if (fp->fp_taskqueue != NULL) { 647 QLNX_UNLOCK(ha); 648 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 649 QLNX_LOCK(ha); 650 } 651 } 652 return; 653 } 654 655 static void 656 qlnx_get_params(qlnx_host_t *ha) 657 { 658 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) { 659 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n", 660 qlnxe_queue_count); 661 qlnxe_queue_count = 0; 662 } 663 return; 664 } 665 666 static void 667 qlnx_error_recovery_taskqueue(void *context, int pending) 668 { 669 qlnx_host_t *ha; 670 671 ha = context; 672 673 QL_DPRINT2(ha, "enter\n"); 674 675 QLNX_LOCK(ha); 676 qlnx_stop(ha); 677 QLNX_UNLOCK(ha); 678 679 #ifdef QLNX_ENABLE_IWARP 680 qlnx_rdma_dev_remove(ha); 681 #endif /* #ifdef QLNX_ENABLE_IWARP */ 682 683 qlnx_slowpath_stop(ha); 684 qlnx_slowpath_start(ha); 685 686 #ifdef QLNX_ENABLE_IWARP 687 qlnx_rdma_dev_add(ha); 688 #endif /* #ifdef QLNX_ENABLE_IWARP */ 689 690 qlnx_init(ha); 691 692 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 693 694 QL_DPRINT2(ha, "exit\n"); 695 696 return; 697 } 698 699 static int 700 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha) 701 { 702 uint8_t tq_name[32]; 703 704 bzero(tq_name, sizeof (tq_name)); 705 snprintf(tq_name, sizeof (tq_name), "ql_err_tq"); 706 707 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha); 708 709 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 710 taskqueue_thread_enqueue, &ha->err_taskqueue); 711 712 if (ha->err_taskqueue == NULL) 713 return (-1); 714 715 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name); 716 717 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue); 718 719 return (0); 720 } 721 722 static void 723 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha) 724 { 725 if (ha->err_taskqueue != NULL) { 726 taskqueue_drain(ha->err_taskqueue, &ha->err_task); 727 taskqueue_free(ha->err_taskqueue); 728 } 729 730 ha->err_taskqueue = NULL; 731 732 return; 733 } 734 735 /* 736 * Name: qlnx_pci_attach 737 * Function: attaches the device to the operating system 738 */ 739 static int 740 qlnx_pci_attach(device_t dev) 741 { 742 qlnx_host_t *ha = NULL; 743 uint32_t rsrc_len_reg __unused = 0; 744 uint32_t rsrc_len_dbells = 0; 745 uint32_t rsrc_len_msix __unused = 0; 746 int i; 747 uint32_t mfw_ver; 748 uint32_t num_sp_msix = 0; 749 uint32_t num_rdma_irqs = 0; 750 751 if ((ha = device_get_softc(dev)) == NULL) { 752 device_printf(dev, "cannot get softc\n"); 753 return (ENOMEM); 754 } 755 756 memset(ha, 0, sizeof (qlnx_host_t)); 757 758 ha->device_id = pci_get_device(dev); 759 760 if (qlnx_valid_device(ha) != 0) { 761 device_printf(dev, "device is not valid device\n"); 762 return (ENXIO); 763 } 764 ha->pci_func = pci_get_function(dev); 765 766 ha->pci_dev = dev; 767 768 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 769 770 ha->flags.lock_init = 1; 771 772 pci_enable_busmaster(dev); 773 774 /* 775 * map the PCI BARs 776 */ 777 778 ha->reg_rid = PCIR_BAR(0); 779 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 780 RF_ACTIVE); 781 782 if (ha->pci_reg == NULL) { 783 device_printf(dev, "unable to map BAR0\n"); 784 goto qlnx_pci_attach_err; 785 } 786 787 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 788 ha->reg_rid); 789 790 ha->dbells_rid = PCIR_BAR(2); 791 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, 792 SYS_RES_MEMORY, 793 ha->dbells_rid); 794 if (rsrc_len_dbells) { 795 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 796 &ha->dbells_rid, RF_ACTIVE); 797 798 if (ha->pci_dbells == NULL) { 799 device_printf(dev, "unable to map BAR1\n"); 800 goto qlnx_pci_attach_err; 801 } 802 ha->dbells_phys_addr = (uint64_t) 803 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid); 804 805 ha->dbells_size = rsrc_len_dbells; 806 } else { 807 if (qlnx_vf_device(ha) != 0) { 808 device_printf(dev, " BAR1 size is zero\n"); 809 goto qlnx_pci_attach_err; 810 } 811 } 812 813 ha->msix_rid = PCIR_BAR(4); 814 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 815 &ha->msix_rid, RF_ACTIVE); 816 817 if (ha->msix_bar == NULL) { 818 device_printf(dev, "unable to map BAR2\n"); 819 goto qlnx_pci_attach_err; 820 } 821 822 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 823 ha->msix_rid); 824 825 ha->dbg_level = 0x0000; 826 827 QL_DPRINT1(ha, "\n\t\t\t" 828 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 829 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 830 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 831 " msix_avail = 0x%x " 832 "\n\t\t\t[ncpus = %d]\n", 833 ha->pci_dev, ha->pci_reg, rsrc_len_reg, 834 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 835 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 836 mp_ncpus); 837 /* 838 * allocate dma tags 839 */ 840 841 if (qlnx_alloc_parent_dma_tag(ha)) 842 goto qlnx_pci_attach_err; 843 844 if (qlnx_alloc_tx_dma_tag(ha)) 845 goto qlnx_pci_attach_err; 846 847 if (qlnx_alloc_rx_dma_tag(ha)) 848 goto qlnx_pci_attach_err; 849 850 851 if (qlnx_init_hw(ha) != 0) 852 goto qlnx_pci_attach_err; 853 854 ha->flags.hw_init = 1; 855 856 qlnx_get_params(ha); 857 858 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) && 859 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) { 860 qlnxe_queue_count = QLNX_MAX_RSS; 861 } 862 863 /* 864 * Allocate MSI-x vectors 865 */ 866 if (qlnx_vf_device(ha) != 0) { 867 if (qlnxe_queue_count == 0) 868 ha->num_rss = QLNX_DEFAULT_RSS; 869 else 870 ha->num_rss = qlnxe_queue_count; 871 872 num_sp_msix = ha->cdev.num_hwfns; 873 } else { 874 uint8_t max_rxq; 875 uint8_t max_txq; 876 877 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq); 878 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq); 879 880 if (max_rxq < max_txq) 881 ha->num_rss = max_rxq; 882 else 883 ha->num_rss = max_txq; 884 885 if (ha->num_rss > QLNX_MAX_VF_RSS) 886 ha->num_rss = QLNX_MAX_VF_RSS; 887 888 num_sp_msix = 0; 889 } 890 891 if (ha->num_rss > mp_ncpus) 892 ha->num_rss = mp_ncpus; 893 894 ha->num_tc = QLNX_MAX_TC; 895 896 ha->msix_count = pci_msix_count(dev); 897 898 #ifdef QLNX_ENABLE_IWARP 899 900 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha); 901 902 #endif /* #ifdef QLNX_ENABLE_IWARP */ 903 904 if (!ha->msix_count || 905 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) { 906 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 907 ha->msix_count); 908 goto qlnx_pci_attach_err; 909 } 910 911 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs)) 912 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs; 913 else 914 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs); 915 916 QL_DPRINT1(ha, "\n\t\t\t" 917 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 918 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 919 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 920 " msix_avail = 0x%x msix_alloc = 0x%x" 921 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n", 922 ha->pci_reg, rsrc_len_reg, 923 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 924 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 925 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc); 926 927 if (pci_alloc_msix(dev, &ha->msix_count)) { 928 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__, 929 ha->msix_count); 930 ha->msix_count = 0; 931 goto qlnx_pci_attach_err; 932 } 933 934 /* 935 * Initialize slow path interrupt and task queue 936 */ 937 938 if (num_sp_msix) { 939 if (qlnx_create_sp_taskqueues(ha) != 0) 940 goto qlnx_pci_attach_err; 941 942 for (i = 0; i < ha->cdev.num_hwfns; i++) { 943 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 944 945 ha->sp_irq_rid[i] = i + 1; 946 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, 947 &ha->sp_irq_rid[i], 948 (RF_ACTIVE | RF_SHAREABLE)); 949 if (ha->sp_irq[i] == NULL) { 950 device_printf(dev, 951 "could not allocate mbx interrupt\n"); 952 goto qlnx_pci_attach_err; 953 } 954 955 if (bus_setup_intr(dev, ha->sp_irq[i], 956 (INTR_TYPE_NET | INTR_MPSAFE), NULL, 957 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) { 958 device_printf(dev, 959 "could not setup slow path interrupt\n"); 960 goto qlnx_pci_attach_err; 961 } 962 963 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d" 964 " sp_irq %p sp_handle %p\n", p_hwfn, 965 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]); 966 } 967 } 968 969 /* 970 * initialize fast path interrupt 971 */ 972 if (qlnx_create_fp_taskqueues(ha) != 0) 973 goto qlnx_pci_attach_err; 974 975 for (i = 0; i < ha->num_rss; i++) { 976 ha->irq_vec[i].rss_idx = i; 977 ha->irq_vec[i].ha = ha; 978 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i; 979 980 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 981 &ha->irq_vec[i].irq_rid, 982 (RF_ACTIVE | RF_SHAREABLE)); 983 984 if (ha->irq_vec[i].irq == NULL) { 985 device_printf(dev, 986 "could not allocate interrupt[%d] irq_rid = %d\n", 987 i, ha->irq_vec[i].irq_rid); 988 goto qlnx_pci_attach_err; 989 } 990 991 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) { 992 device_printf(dev, "could not allocate tx_br[%d]\n", i); 993 goto qlnx_pci_attach_err; 994 } 995 } 996 997 if (qlnx_vf_device(ha) != 0) { 998 callout_init(&ha->qlnx_callout, 1); 999 ha->flags.callout_init = 1; 1000 1001 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1002 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0) 1003 goto qlnx_pci_attach_err; 1004 if (ha->grcdump_size[i] == 0) 1005 goto qlnx_pci_attach_err; 1006 1007 ha->grcdump_size[i] = ha->grcdump_size[i] << 2; 1008 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n", 1009 i, ha->grcdump_size[i]); 1010 1011 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]); 1012 if (ha->grcdump[i] == NULL) { 1013 device_printf(dev, "grcdump alloc[%d] failed\n", i); 1014 goto qlnx_pci_attach_err; 1015 } 1016 1017 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0) 1018 goto qlnx_pci_attach_err; 1019 if (ha->idle_chk_size[i] == 0) 1020 goto qlnx_pci_attach_err; 1021 1022 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2; 1023 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n", 1024 i, ha->idle_chk_size[i]); 1025 1026 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]); 1027 1028 if (ha->idle_chk[i] == NULL) { 1029 device_printf(dev, "idle_chk alloc failed\n"); 1030 goto qlnx_pci_attach_err; 1031 } 1032 } 1033 1034 if (qlnx_create_error_recovery_taskqueue(ha) != 0) 1035 goto qlnx_pci_attach_err; 1036 } 1037 1038 if (qlnx_slowpath_start(ha) != 0) 1039 goto qlnx_pci_attach_err; 1040 else 1041 ha->flags.slowpath_start = 1; 1042 1043 if (qlnx_vf_device(ha) != 0) { 1044 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) { 1045 qlnx_mdelay(__func__, 1000); 1046 qlnx_trigger_dump(ha); 1047 1048 goto qlnx_pci_attach_err0; 1049 } 1050 1051 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) { 1052 qlnx_mdelay(__func__, 1000); 1053 qlnx_trigger_dump(ha); 1054 1055 goto qlnx_pci_attach_err0; 1056 } 1057 } else { 1058 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 1059 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL); 1060 } 1061 1062 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d", 1063 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF), 1064 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF)); 1065 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d", 1066 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, 1067 FW_ENGINEERING_VERSION); 1068 1069 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n", 1070 ha->stormfw_ver, ha->mfw_ver); 1071 1072 qlnx_init_ifnet(dev, ha); 1073 1074 /* 1075 * add sysctls 1076 */ 1077 qlnx_add_sysctls(ha); 1078 1079 qlnx_pci_attach_err0: 1080 /* 1081 * create ioctl device interface 1082 */ 1083 if (qlnx_vf_device(ha) != 0) { 1084 if (qlnx_make_cdev(ha)) { 1085 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 1086 goto qlnx_pci_attach_err; 1087 } 1088 1089 #ifdef QLNX_ENABLE_IWARP 1090 qlnx_rdma_dev_add(ha); 1091 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1092 } 1093 1094 #ifndef QLNX_VF 1095 #ifdef CONFIG_ECORE_SRIOV 1096 1097 if (qlnx_vf_device(ha) != 0) 1098 qlnx_initialize_sriov(ha); 1099 1100 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1101 #endif /* #ifdef QLNX_VF */ 1102 1103 QL_DPRINT2(ha, "success\n"); 1104 1105 return (0); 1106 1107 qlnx_pci_attach_err: 1108 1109 qlnx_release(ha); 1110 1111 return (ENXIO); 1112 } 1113 1114 /* 1115 * Name: qlnx_pci_detach 1116 * Function: Unhooks the device from the operating system 1117 */ 1118 static int 1119 qlnx_pci_detach(device_t dev) 1120 { 1121 qlnx_host_t *ha = NULL; 1122 1123 if ((ha = device_get_softc(dev)) == NULL) { 1124 device_printf(dev, "%s: cannot get softc\n", __func__); 1125 return (ENOMEM); 1126 } 1127 1128 if (qlnx_vf_device(ha) != 0) { 1129 #ifdef CONFIG_ECORE_SRIOV 1130 int ret; 1131 1132 ret = pci_iov_detach(dev); 1133 if (ret) { 1134 device_printf(dev, "%s: SRIOV in use\n", __func__); 1135 return (ret); 1136 } 1137 1138 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1139 1140 #ifdef QLNX_ENABLE_IWARP 1141 if (qlnx_rdma_dev_remove(ha) != 0) 1142 return (EBUSY); 1143 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1144 } 1145 1146 QLNX_LOCK(ha); 1147 qlnx_stop(ha); 1148 QLNX_UNLOCK(ha); 1149 1150 qlnx_release(ha); 1151 1152 return (0); 1153 } 1154 1155 #ifdef QLNX_ENABLE_IWARP 1156 1157 static uint8_t 1158 qlnx_get_personality(uint8_t pci_func) 1159 { 1160 uint8_t personality; 1161 1162 personality = (qlnxe_rdma_configuration >> 1163 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) & 1164 QLNX_PERSONALIY_MASK; 1165 return (personality); 1166 } 1167 1168 static void 1169 qlnx_set_personality(qlnx_host_t *ha) 1170 { 1171 uint8_t personality; 1172 1173 personality = qlnx_get_personality(ha->pci_func); 1174 1175 switch (personality) { 1176 case QLNX_PERSONALITY_DEFAULT: 1177 device_printf(ha->pci_dev, "%s: DEFAULT\n", 1178 __func__); 1179 ha->personality = ECORE_PCI_DEFAULT; 1180 break; 1181 1182 case QLNX_PERSONALITY_ETH_ONLY: 1183 device_printf(ha->pci_dev, "%s: ETH_ONLY\n", 1184 __func__); 1185 ha->personality = ECORE_PCI_ETH; 1186 break; 1187 1188 case QLNX_PERSONALITY_ETH_IWARP: 1189 device_printf(ha->pci_dev, "%s: ETH_IWARP\n", 1190 __func__); 1191 ha->personality = ECORE_PCI_ETH_IWARP; 1192 break; 1193 1194 case QLNX_PERSONALITY_ETH_ROCE: 1195 device_printf(ha->pci_dev, "%s: ETH_ROCE\n", 1196 __func__); 1197 ha->personality = ECORE_PCI_ETH_ROCE; 1198 break; 1199 } 1200 1201 return; 1202 } 1203 1204 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1205 1206 static int 1207 qlnx_init_hw(qlnx_host_t *ha) 1208 { 1209 int rval = 0; 1210 struct ecore_hw_prepare_params params; 1211 1212 ecore_init_struct(&ha->cdev); 1213 1214 /* ha->dp_module = ECORE_MSG_PROBE | 1215 ECORE_MSG_INTR | 1216 ECORE_MSG_SP | 1217 ECORE_MSG_LINK | 1218 ECORE_MSG_SPQ | 1219 ECORE_MSG_RDMA; 1220 ha->dp_level = ECORE_LEVEL_VERBOSE;*/ 1221 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2; 1222 ha->dp_level = ECORE_LEVEL_NOTICE; 1223 //ha->dp_level = ECORE_LEVEL_VERBOSE; 1224 1225 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev); 1226 1227 ha->cdev.regview = ha->pci_reg; 1228 1229 ha->personality = ECORE_PCI_DEFAULT; 1230 1231 if (qlnx_vf_device(ha) == 0) { 1232 ha->cdev.b_is_vf = true; 1233 1234 if (ha->pci_dbells != NULL) { 1235 ha->cdev.doorbells = ha->pci_dbells; 1236 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1237 ha->cdev.db_size = ha->dbells_size; 1238 } else { 1239 ha->pci_dbells = ha->pci_reg; 1240 } 1241 } else { 1242 ha->cdev.doorbells = ha->pci_dbells; 1243 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1244 ha->cdev.db_size = ha->dbells_size; 1245 1246 #ifdef QLNX_ENABLE_IWARP 1247 1248 if (qlnx_rdma_supported(ha) == 0) 1249 qlnx_set_personality(ha); 1250 1251 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1252 } 1253 QL_DPRINT2(ha, "%s: %s\n", __func__, 1254 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet")); 1255 1256 bzero(¶ms, sizeof (struct ecore_hw_prepare_params)); 1257 1258 params.personality = ha->personality; 1259 1260 params.drv_resc_alloc = false; 1261 params.chk_reg_fifo = false; 1262 params.initiate_pf_flr = true; 1263 params.epoch = 0; 1264 1265 ecore_hw_prepare(&ha->cdev, ¶ms); 1266 1267 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str); 1268 1269 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n", 1270 ha, &ha->cdev, &ha->cdev.hwfns[0]); 1271 1272 return (rval); 1273 } 1274 1275 static void 1276 qlnx_release(qlnx_host_t *ha) 1277 { 1278 device_t dev; 1279 int i; 1280 1281 dev = ha->pci_dev; 1282 1283 QL_DPRINT2(ha, "enter\n"); 1284 1285 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) { 1286 if (ha->idle_chk[i] != NULL) { 1287 free(ha->idle_chk[i], M_QLNXBUF); 1288 ha->idle_chk[i] = NULL; 1289 } 1290 1291 if (ha->grcdump[i] != NULL) { 1292 free(ha->grcdump[i], M_QLNXBUF); 1293 ha->grcdump[i] = NULL; 1294 } 1295 } 1296 1297 if (ha->flags.callout_init) 1298 callout_drain(&ha->qlnx_callout); 1299 1300 if (ha->flags.slowpath_start) { 1301 qlnx_slowpath_stop(ha); 1302 } 1303 1304 if (ha->flags.hw_init) 1305 ecore_hw_remove(&ha->cdev); 1306 1307 qlnx_del_cdev(ha); 1308 1309 if (ha->ifp != NULL) 1310 ether_ifdetach(ha->ifp); 1311 1312 qlnx_free_tx_dma_tag(ha); 1313 1314 qlnx_free_rx_dma_tag(ha); 1315 1316 qlnx_free_parent_dma_tag(ha); 1317 1318 if (qlnx_vf_device(ha) != 0) { 1319 qlnx_destroy_error_recovery_taskqueue(ha); 1320 } 1321 1322 for (i = 0; i < ha->num_rss; i++) { 1323 struct qlnx_fastpath *fp = &ha->fp_array[i]; 1324 1325 if (ha->irq_vec[i].handle) { 1326 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 1327 ha->irq_vec[i].handle); 1328 } 1329 1330 if (ha->irq_vec[i].irq) { 1331 (void)bus_release_resource(dev, SYS_RES_IRQ, 1332 ha->irq_vec[i].irq_rid, 1333 ha->irq_vec[i].irq); 1334 } 1335 1336 qlnx_free_tx_br(ha, fp); 1337 } 1338 qlnx_destroy_fp_taskqueues(ha); 1339 1340 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1341 if (ha->sp_handle[i]) 1342 (void)bus_teardown_intr(dev, ha->sp_irq[i], 1343 ha->sp_handle[i]); 1344 1345 if (ha->sp_irq[i]) 1346 (void) bus_release_resource(dev, SYS_RES_IRQ, 1347 ha->sp_irq_rid[i], ha->sp_irq[i]); 1348 } 1349 1350 qlnx_destroy_sp_taskqueues(ha); 1351 1352 if (ha->msix_count) 1353 pci_release_msi(dev); 1354 1355 if (ha->flags.lock_init) { 1356 mtx_destroy(&ha->hw_lock); 1357 } 1358 1359 if (ha->pci_reg) 1360 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 1361 ha->pci_reg); 1362 1363 if (ha->dbells_size && ha->pci_dbells) 1364 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid, 1365 ha->pci_dbells); 1366 1367 if (ha->msix_bar) 1368 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid, 1369 ha->msix_bar); 1370 1371 QL_DPRINT2(ha, "exit\n"); 1372 return; 1373 } 1374 1375 static void 1376 qlnx_trigger_dump(qlnx_host_t *ha) 1377 { 1378 int i; 1379 1380 if (ha->ifp != NULL) 1381 ha->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 1382 1383 QL_DPRINT2(ha, "enter\n"); 1384 1385 if (qlnx_vf_device(ha) == 0) 1386 return; 1387 1388 ha->error_recovery = 1; 1389 1390 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1391 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i); 1392 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i); 1393 } 1394 1395 QL_DPRINT2(ha, "exit\n"); 1396 1397 return; 1398 } 1399 1400 static int 1401 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS) 1402 { 1403 int err, ret = 0; 1404 qlnx_host_t *ha; 1405 1406 err = sysctl_handle_int(oidp, &ret, 0, req); 1407 1408 if (err || !req->newptr) 1409 return (err); 1410 1411 if (ret == 1) { 1412 ha = (qlnx_host_t *)arg1; 1413 qlnx_trigger_dump(ha); 1414 } 1415 return (err); 1416 } 1417 1418 static int 1419 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS) 1420 { 1421 int err, i, ret = 0, usecs = 0; 1422 qlnx_host_t *ha; 1423 struct ecore_hwfn *p_hwfn; 1424 struct qlnx_fastpath *fp; 1425 1426 err = sysctl_handle_int(oidp, &usecs, 0, req); 1427 1428 if (err || !req->newptr || !usecs || (usecs > 255)) 1429 return (err); 1430 1431 ha = (qlnx_host_t *)arg1; 1432 1433 if (qlnx_vf_device(ha) == 0) 1434 return (-1); 1435 1436 for (i = 0; i < ha->num_rss; i++) { 1437 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1438 1439 fp = &ha->fp_array[i]; 1440 1441 if (fp->txq[0]->handle != NULL) { 1442 ret = ecore_set_queue_coalesce(p_hwfn, 0, 1443 (uint16_t)usecs, fp->txq[0]->handle); 1444 } 1445 } 1446 1447 if (!ret) 1448 ha->tx_coalesce_usecs = (uint8_t)usecs; 1449 1450 return (err); 1451 } 1452 1453 static int 1454 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS) 1455 { 1456 int err, i, ret = 0, usecs = 0; 1457 qlnx_host_t *ha; 1458 struct ecore_hwfn *p_hwfn; 1459 struct qlnx_fastpath *fp; 1460 1461 err = sysctl_handle_int(oidp, &usecs, 0, req); 1462 1463 if (err || !req->newptr || !usecs || (usecs > 255)) 1464 return (err); 1465 1466 ha = (qlnx_host_t *)arg1; 1467 1468 if (qlnx_vf_device(ha) == 0) 1469 return (-1); 1470 1471 for (i = 0; i < ha->num_rss; i++) { 1472 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1473 1474 fp = &ha->fp_array[i]; 1475 1476 if (fp->rxq->handle != NULL) { 1477 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs, 1478 0, fp->rxq->handle); 1479 } 1480 } 1481 1482 if (!ret) 1483 ha->rx_coalesce_usecs = (uint8_t)usecs; 1484 1485 return (err); 1486 } 1487 1488 static void 1489 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha) 1490 { 1491 struct sysctl_ctx_list *ctx; 1492 struct sysctl_oid_list *children; 1493 struct sysctl_oid *ctx_oid; 1494 1495 ctx = device_get_sysctl_ctx(ha->pci_dev); 1496 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1497 1498 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat", 1499 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat"); 1500 children = SYSCTL_CHILDREN(ctx_oid); 1501 1502 SYSCTL_ADD_QUAD(ctx, children, 1503 OID_AUTO, "sp_interrupts", 1504 CTLFLAG_RD, &ha->sp_interrupts, 1505 "No. of slowpath interrupts"); 1506 1507 return; 1508 } 1509 1510 static void 1511 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha) 1512 { 1513 struct sysctl_ctx_list *ctx; 1514 struct sysctl_oid_list *children; 1515 struct sysctl_oid_list *node_children; 1516 struct sysctl_oid *ctx_oid; 1517 int i, j; 1518 uint8_t name_str[16]; 1519 1520 ctx = device_get_sysctl_ctx(ha->pci_dev); 1521 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1522 1523 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat", 1524 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat"); 1525 children = SYSCTL_CHILDREN(ctx_oid); 1526 1527 for (i = 0; i < ha->num_rss; i++) { 1528 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1529 snprintf(name_str, sizeof(name_str), "%d", i); 1530 1531 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 1532 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str); 1533 node_children = SYSCTL_CHILDREN(ctx_oid); 1534 1535 /* Tx Related */ 1536 1537 SYSCTL_ADD_QUAD(ctx, node_children, 1538 OID_AUTO, "tx_pkts_processed", 1539 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed, 1540 "No. of packets processed for transmission"); 1541 1542 SYSCTL_ADD_QUAD(ctx, node_children, 1543 OID_AUTO, "tx_pkts_freed", 1544 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed, 1545 "No. of freed packets"); 1546 1547 SYSCTL_ADD_QUAD(ctx, node_children, 1548 OID_AUTO, "tx_pkts_transmitted", 1549 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted, 1550 "No. of transmitted packets"); 1551 1552 SYSCTL_ADD_QUAD(ctx, node_children, 1553 OID_AUTO, "tx_pkts_completed", 1554 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed, 1555 "No. of transmit completions"); 1556 1557 SYSCTL_ADD_QUAD(ctx, node_children, 1558 OID_AUTO, "tx_non_tso_pkts", 1559 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts, 1560 "No. of non LSO transmited packets"); 1561 1562 #ifdef QLNX_TRACE_PERF_DATA 1563 1564 SYSCTL_ADD_QUAD(ctx, node_children, 1565 OID_AUTO, "tx_pkts_trans_ctx", 1566 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx, 1567 "No. of transmitted packets in transmit context"); 1568 1569 SYSCTL_ADD_QUAD(ctx, node_children, 1570 OID_AUTO, "tx_pkts_compl_ctx", 1571 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx, 1572 "No. of transmit completions in transmit context"); 1573 1574 SYSCTL_ADD_QUAD(ctx, node_children, 1575 OID_AUTO, "tx_pkts_trans_fp", 1576 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp, 1577 "No. of transmitted packets in taskqueue"); 1578 1579 SYSCTL_ADD_QUAD(ctx, node_children, 1580 OID_AUTO, "tx_pkts_compl_fp", 1581 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp, 1582 "No. of transmit completions in taskqueue"); 1583 1584 SYSCTL_ADD_QUAD(ctx, node_children, 1585 OID_AUTO, "tx_pkts_compl_intr", 1586 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr, 1587 "No. of transmit completions in interrupt ctx"); 1588 #endif 1589 1590 SYSCTL_ADD_QUAD(ctx, node_children, 1591 OID_AUTO, "tx_tso_pkts", 1592 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts, 1593 "No. of LSO transmited packets"); 1594 1595 SYSCTL_ADD_QUAD(ctx, node_children, 1596 OID_AUTO, "tx_lso_wnd_min_len", 1597 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len, 1598 "tx_lso_wnd_min_len"); 1599 1600 SYSCTL_ADD_QUAD(ctx, node_children, 1601 OID_AUTO, "tx_defrag", 1602 CTLFLAG_RD, &ha->fp_array[i].tx_defrag, 1603 "tx_defrag"); 1604 1605 SYSCTL_ADD_QUAD(ctx, node_children, 1606 OID_AUTO, "tx_nsegs_gt_elem_left", 1607 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left, 1608 "tx_nsegs_gt_elem_left"); 1609 1610 SYSCTL_ADD_UINT(ctx, node_children, 1611 OID_AUTO, "tx_tso_max_nsegs", 1612 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs, 1613 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs"); 1614 1615 SYSCTL_ADD_UINT(ctx, node_children, 1616 OID_AUTO, "tx_tso_min_nsegs", 1617 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs, 1618 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs"); 1619 1620 SYSCTL_ADD_UINT(ctx, node_children, 1621 OID_AUTO, "tx_tso_max_pkt_len", 1622 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len, 1623 ha->fp_array[i].tx_tso_max_pkt_len, 1624 "tx_tso_max_pkt_len"); 1625 1626 SYSCTL_ADD_UINT(ctx, node_children, 1627 OID_AUTO, "tx_tso_min_pkt_len", 1628 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len, 1629 ha->fp_array[i].tx_tso_min_pkt_len, 1630 "tx_tso_min_pkt_len"); 1631 1632 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) { 1633 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1634 snprintf(name_str, sizeof(name_str), 1635 "tx_pkts_nseg_%02d", (j+1)); 1636 1637 SYSCTL_ADD_QUAD(ctx, node_children, 1638 OID_AUTO, name_str, CTLFLAG_RD, 1639 &ha->fp_array[i].tx_pkts[j], name_str); 1640 } 1641 1642 #ifdef QLNX_TRACE_PERF_DATA 1643 for (j = 0; j < 18; j++) { 1644 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1645 snprintf(name_str, sizeof(name_str), 1646 "tx_pkts_hist_%02d", (j+1)); 1647 1648 SYSCTL_ADD_QUAD(ctx, node_children, 1649 OID_AUTO, name_str, CTLFLAG_RD, 1650 &ha->fp_array[i].tx_pkts_hist[j], name_str); 1651 } 1652 for (j = 0; j < 5; j++) { 1653 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1654 snprintf(name_str, sizeof(name_str), 1655 "tx_comInt_%02d", (j+1)); 1656 1657 SYSCTL_ADD_QUAD(ctx, node_children, 1658 OID_AUTO, name_str, CTLFLAG_RD, 1659 &ha->fp_array[i].tx_comInt[j], name_str); 1660 } 1661 for (j = 0; j < 18; j++) { 1662 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1663 snprintf(name_str, sizeof(name_str), 1664 "tx_pkts_q_%02d", (j+1)); 1665 1666 SYSCTL_ADD_QUAD(ctx, node_children, 1667 OID_AUTO, name_str, CTLFLAG_RD, 1668 &ha->fp_array[i].tx_pkts_q[j], name_str); 1669 } 1670 #endif 1671 1672 SYSCTL_ADD_QUAD(ctx, node_children, 1673 OID_AUTO, "err_tx_nsegs_gt_elem_left", 1674 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left, 1675 "err_tx_nsegs_gt_elem_left"); 1676 1677 SYSCTL_ADD_QUAD(ctx, node_children, 1678 OID_AUTO, "err_tx_dmamap_create", 1679 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create, 1680 "err_tx_dmamap_create"); 1681 1682 SYSCTL_ADD_QUAD(ctx, node_children, 1683 OID_AUTO, "err_tx_defrag_dmamap_load", 1684 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load, 1685 "err_tx_defrag_dmamap_load"); 1686 1687 SYSCTL_ADD_QUAD(ctx, node_children, 1688 OID_AUTO, "err_tx_non_tso_max_seg", 1689 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg, 1690 "err_tx_non_tso_max_seg"); 1691 1692 SYSCTL_ADD_QUAD(ctx, node_children, 1693 OID_AUTO, "err_tx_dmamap_load", 1694 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load, 1695 "err_tx_dmamap_load"); 1696 1697 SYSCTL_ADD_QUAD(ctx, node_children, 1698 OID_AUTO, "err_tx_defrag", 1699 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag, 1700 "err_tx_defrag"); 1701 1702 SYSCTL_ADD_QUAD(ctx, node_children, 1703 OID_AUTO, "err_tx_free_pkt_null", 1704 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null, 1705 "err_tx_free_pkt_null"); 1706 1707 SYSCTL_ADD_QUAD(ctx, node_children, 1708 OID_AUTO, "err_tx_cons_idx_conflict", 1709 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict, 1710 "err_tx_cons_idx_conflict"); 1711 1712 SYSCTL_ADD_QUAD(ctx, node_children, 1713 OID_AUTO, "lro_cnt_64", 1714 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64, 1715 "lro_cnt_64"); 1716 1717 SYSCTL_ADD_QUAD(ctx, node_children, 1718 OID_AUTO, "lro_cnt_128", 1719 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128, 1720 "lro_cnt_128"); 1721 1722 SYSCTL_ADD_QUAD(ctx, node_children, 1723 OID_AUTO, "lro_cnt_256", 1724 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256, 1725 "lro_cnt_256"); 1726 1727 SYSCTL_ADD_QUAD(ctx, node_children, 1728 OID_AUTO, "lro_cnt_512", 1729 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512, 1730 "lro_cnt_512"); 1731 1732 SYSCTL_ADD_QUAD(ctx, node_children, 1733 OID_AUTO, "lro_cnt_1024", 1734 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024, 1735 "lro_cnt_1024"); 1736 1737 /* Rx Related */ 1738 1739 SYSCTL_ADD_QUAD(ctx, node_children, 1740 OID_AUTO, "rx_pkts", 1741 CTLFLAG_RD, &ha->fp_array[i].rx_pkts, 1742 "No. of received packets"); 1743 1744 SYSCTL_ADD_QUAD(ctx, node_children, 1745 OID_AUTO, "tpa_start", 1746 CTLFLAG_RD, &ha->fp_array[i].tpa_start, 1747 "No. of tpa_start packets"); 1748 1749 SYSCTL_ADD_QUAD(ctx, node_children, 1750 OID_AUTO, "tpa_cont", 1751 CTLFLAG_RD, &ha->fp_array[i].tpa_cont, 1752 "No. of tpa_cont packets"); 1753 1754 SYSCTL_ADD_QUAD(ctx, node_children, 1755 OID_AUTO, "tpa_end", 1756 CTLFLAG_RD, &ha->fp_array[i].tpa_end, 1757 "No. of tpa_end packets"); 1758 1759 SYSCTL_ADD_QUAD(ctx, node_children, 1760 OID_AUTO, "err_m_getcl", 1761 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl, 1762 "err_m_getcl"); 1763 1764 SYSCTL_ADD_QUAD(ctx, node_children, 1765 OID_AUTO, "err_m_getjcl", 1766 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl, 1767 "err_m_getjcl"); 1768 1769 SYSCTL_ADD_QUAD(ctx, node_children, 1770 OID_AUTO, "err_rx_hw_errors", 1771 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors, 1772 "err_rx_hw_errors"); 1773 1774 SYSCTL_ADD_QUAD(ctx, node_children, 1775 OID_AUTO, "err_rx_alloc_errors", 1776 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors, 1777 "err_rx_alloc_errors"); 1778 } 1779 1780 return; 1781 } 1782 1783 static void 1784 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha) 1785 { 1786 struct sysctl_ctx_list *ctx; 1787 struct sysctl_oid_list *children; 1788 struct sysctl_oid *ctx_oid; 1789 1790 ctx = device_get_sysctl_ctx(ha->pci_dev); 1791 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1792 1793 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat", 1794 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat"); 1795 children = SYSCTL_CHILDREN(ctx_oid); 1796 1797 SYSCTL_ADD_QUAD(ctx, children, 1798 OID_AUTO, "no_buff_discards", 1799 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards, 1800 "No. of packets discarded due to lack of buffer"); 1801 1802 SYSCTL_ADD_QUAD(ctx, children, 1803 OID_AUTO, "packet_too_big_discard", 1804 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard, 1805 "No. of packets discarded because packet was too big"); 1806 1807 SYSCTL_ADD_QUAD(ctx, children, 1808 OID_AUTO, "ttl0_discard", 1809 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard, 1810 "ttl0_discard"); 1811 1812 SYSCTL_ADD_QUAD(ctx, children, 1813 OID_AUTO, "rx_ucast_bytes", 1814 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes, 1815 "rx_ucast_bytes"); 1816 1817 SYSCTL_ADD_QUAD(ctx, children, 1818 OID_AUTO, "rx_mcast_bytes", 1819 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes, 1820 "rx_mcast_bytes"); 1821 1822 SYSCTL_ADD_QUAD(ctx, children, 1823 OID_AUTO, "rx_bcast_bytes", 1824 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes, 1825 "rx_bcast_bytes"); 1826 1827 SYSCTL_ADD_QUAD(ctx, children, 1828 OID_AUTO, "rx_ucast_pkts", 1829 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts, 1830 "rx_ucast_pkts"); 1831 1832 SYSCTL_ADD_QUAD(ctx, children, 1833 OID_AUTO, "rx_mcast_pkts", 1834 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts, 1835 "rx_mcast_pkts"); 1836 1837 SYSCTL_ADD_QUAD(ctx, children, 1838 OID_AUTO, "rx_bcast_pkts", 1839 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts, 1840 "rx_bcast_pkts"); 1841 1842 SYSCTL_ADD_QUAD(ctx, children, 1843 OID_AUTO, "mftag_filter_discards", 1844 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards, 1845 "mftag_filter_discards"); 1846 1847 SYSCTL_ADD_QUAD(ctx, children, 1848 OID_AUTO, "mac_filter_discards", 1849 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards, 1850 "mac_filter_discards"); 1851 1852 SYSCTL_ADD_QUAD(ctx, children, 1853 OID_AUTO, "tx_ucast_bytes", 1854 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes, 1855 "tx_ucast_bytes"); 1856 1857 SYSCTL_ADD_QUAD(ctx, children, 1858 OID_AUTO, "tx_mcast_bytes", 1859 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes, 1860 "tx_mcast_bytes"); 1861 1862 SYSCTL_ADD_QUAD(ctx, children, 1863 OID_AUTO, "tx_bcast_bytes", 1864 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes, 1865 "tx_bcast_bytes"); 1866 1867 SYSCTL_ADD_QUAD(ctx, children, 1868 OID_AUTO, "tx_ucast_pkts", 1869 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts, 1870 "tx_ucast_pkts"); 1871 1872 SYSCTL_ADD_QUAD(ctx, children, 1873 OID_AUTO, "tx_mcast_pkts", 1874 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts, 1875 "tx_mcast_pkts"); 1876 1877 SYSCTL_ADD_QUAD(ctx, children, 1878 OID_AUTO, "tx_bcast_pkts", 1879 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts, 1880 "tx_bcast_pkts"); 1881 1882 SYSCTL_ADD_QUAD(ctx, children, 1883 OID_AUTO, "tx_err_drop_pkts", 1884 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts, 1885 "tx_err_drop_pkts"); 1886 1887 SYSCTL_ADD_QUAD(ctx, children, 1888 OID_AUTO, "tpa_coalesced_pkts", 1889 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts, 1890 "tpa_coalesced_pkts"); 1891 1892 SYSCTL_ADD_QUAD(ctx, children, 1893 OID_AUTO, "tpa_coalesced_events", 1894 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events, 1895 "tpa_coalesced_events"); 1896 1897 SYSCTL_ADD_QUAD(ctx, children, 1898 OID_AUTO, "tpa_aborts_num", 1899 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num, 1900 "tpa_aborts_num"); 1901 1902 SYSCTL_ADD_QUAD(ctx, children, 1903 OID_AUTO, "tpa_not_coalesced_pkts", 1904 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts, 1905 "tpa_not_coalesced_pkts"); 1906 1907 SYSCTL_ADD_QUAD(ctx, children, 1908 OID_AUTO, "tpa_coalesced_bytes", 1909 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes, 1910 "tpa_coalesced_bytes"); 1911 1912 SYSCTL_ADD_QUAD(ctx, children, 1913 OID_AUTO, "rx_64_byte_packets", 1914 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets, 1915 "rx_64_byte_packets"); 1916 1917 SYSCTL_ADD_QUAD(ctx, children, 1918 OID_AUTO, "rx_65_to_127_byte_packets", 1919 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets, 1920 "rx_65_to_127_byte_packets"); 1921 1922 SYSCTL_ADD_QUAD(ctx, children, 1923 OID_AUTO, "rx_128_to_255_byte_packets", 1924 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets, 1925 "rx_128_to_255_byte_packets"); 1926 1927 SYSCTL_ADD_QUAD(ctx, children, 1928 OID_AUTO, "rx_256_to_511_byte_packets", 1929 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets, 1930 "rx_256_to_511_byte_packets"); 1931 1932 SYSCTL_ADD_QUAD(ctx, children, 1933 OID_AUTO, "rx_512_to_1023_byte_packets", 1934 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets, 1935 "rx_512_to_1023_byte_packets"); 1936 1937 SYSCTL_ADD_QUAD(ctx, children, 1938 OID_AUTO, "rx_1024_to_1518_byte_packets", 1939 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets, 1940 "rx_1024_to_1518_byte_packets"); 1941 1942 SYSCTL_ADD_QUAD(ctx, children, 1943 OID_AUTO, "rx_1519_to_1522_byte_packets", 1944 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets, 1945 "rx_1519_to_1522_byte_packets"); 1946 1947 SYSCTL_ADD_QUAD(ctx, children, 1948 OID_AUTO, "rx_1523_to_2047_byte_packets", 1949 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets, 1950 "rx_1523_to_2047_byte_packets"); 1951 1952 SYSCTL_ADD_QUAD(ctx, children, 1953 OID_AUTO, "rx_2048_to_4095_byte_packets", 1954 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets, 1955 "rx_2048_to_4095_byte_packets"); 1956 1957 SYSCTL_ADD_QUAD(ctx, children, 1958 OID_AUTO, "rx_4096_to_9216_byte_packets", 1959 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets, 1960 "rx_4096_to_9216_byte_packets"); 1961 1962 SYSCTL_ADD_QUAD(ctx, children, 1963 OID_AUTO, "rx_9217_to_16383_byte_packets", 1964 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets, 1965 "rx_9217_to_16383_byte_packets"); 1966 1967 SYSCTL_ADD_QUAD(ctx, children, 1968 OID_AUTO, "rx_crc_errors", 1969 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors, 1970 "rx_crc_errors"); 1971 1972 SYSCTL_ADD_QUAD(ctx, children, 1973 OID_AUTO, "rx_mac_crtl_frames", 1974 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames, 1975 "rx_mac_crtl_frames"); 1976 1977 SYSCTL_ADD_QUAD(ctx, children, 1978 OID_AUTO, "rx_pause_frames", 1979 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames, 1980 "rx_pause_frames"); 1981 1982 SYSCTL_ADD_QUAD(ctx, children, 1983 OID_AUTO, "rx_pfc_frames", 1984 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames, 1985 "rx_pfc_frames"); 1986 1987 SYSCTL_ADD_QUAD(ctx, children, 1988 OID_AUTO, "rx_align_errors", 1989 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors, 1990 "rx_align_errors"); 1991 1992 SYSCTL_ADD_QUAD(ctx, children, 1993 OID_AUTO, "rx_carrier_errors", 1994 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors, 1995 "rx_carrier_errors"); 1996 1997 SYSCTL_ADD_QUAD(ctx, children, 1998 OID_AUTO, "rx_oversize_packets", 1999 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets, 2000 "rx_oversize_packets"); 2001 2002 SYSCTL_ADD_QUAD(ctx, children, 2003 OID_AUTO, "rx_jabbers", 2004 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers, 2005 "rx_jabbers"); 2006 2007 SYSCTL_ADD_QUAD(ctx, children, 2008 OID_AUTO, "rx_undersize_packets", 2009 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets, 2010 "rx_undersize_packets"); 2011 2012 SYSCTL_ADD_QUAD(ctx, children, 2013 OID_AUTO, "rx_fragments", 2014 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments, 2015 "rx_fragments"); 2016 2017 SYSCTL_ADD_QUAD(ctx, children, 2018 OID_AUTO, "tx_64_byte_packets", 2019 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets, 2020 "tx_64_byte_packets"); 2021 2022 SYSCTL_ADD_QUAD(ctx, children, 2023 OID_AUTO, "tx_65_to_127_byte_packets", 2024 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets, 2025 "tx_65_to_127_byte_packets"); 2026 2027 SYSCTL_ADD_QUAD(ctx, children, 2028 OID_AUTO, "tx_128_to_255_byte_packets", 2029 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets, 2030 "tx_128_to_255_byte_packets"); 2031 2032 SYSCTL_ADD_QUAD(ctx, children, 2033 OID_AUTO, "tx_256_to_511_byte_packets", 2034 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets, 2035 "tx_256_to_511_byte_packets"); 2036 2037 SYSCTL_ADD_QUAD(ctx, children, 2038 OID_AUTO, "tx_512_to_1023_byte_packets", 2039 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets, 2040 "tx_512_to_1023_byte_packets"); 2041 2042 SYSCTL_ADD_QUAD(ctx, children, 2043 OID_AUTO, "tx_1024_to_1518_byte_packets", 2044 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets, 2045 "tx_1024_to_1518_byte_packets"); 2046 2047 SYSCTL_ADD_QUAD(ctx, children, 2048 OID_AUTO, "tx_1519_to_2047_byte_packets", 2049 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets, 2050 "tx_1519_to_2047_byte_packets"); 2051 2052 SYSCTL_ADD_QUAD(ctx, children, 2053 OID_AUTO, "tx_2048_to_4095_byte_packets", 2054 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets, 2055 "tx_2048_to_4095_byte_packets"); 2056 2057 SYSCTL_ADD_QUAD(ctx, children, 2058 OID_AUTO, "tx_4096_to_9216_byte_packets", 2059 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets, 2060 "tx_4096_to_9216_byte_packets"); 2061 2062 SYSCTL_ADD_QUAD(ctx, children, 2063 OID_AUTO, "tx_9217_to_16383_byte_packets", 2064 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets, 2065 "tx_9217_to_16383_byte_packets"); 2066 2067 SYSCTL_ADD_QUAD(ctx, children, 2068 OID_AUTO, "tx_pause_frames", 2069 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames, 2070 "tx_pause_frames"); 2071 2072 SYSCTL_ADD_QUAD(ctx, children, 2073 OID_AUTO, "tx_pfc_frames", 2074 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames, 2075 "tx_pfc_frames"); 2076 2077 SYSCTL_ADD_QUAD(ctx, children, 2078 OID_AUTO, "tx_lpi_entry_count", 2079 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count, 2080 "tx_lpi_entry_count"); 2081 2082 SYSCTL_ADD_QUAD(ctx, children, 2083 OID_AUTO, "tx_total_collisions", 2084 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions, 2085 "tx_total_collisions"); 2086 2087 SYSCTL_ADD_QUAD(ctx, children, 2088 OID_AUTO, "brb_truncates", 2089 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates, 2090 "brb_truncates"); 2091 2092 SYSCTL_ADD_QUAD(ctx, children, 2093 OID_AUTO, "brb_discards", 2094 CTLFLAG_RD, &ha->hw_stats.common.brb_discards, 2095 "brb_discards"); 2096 2097 SYSCTL_ADD_QUAD(ctx, children, 2098 OID_AUTO, "rx_mac_bytes", 2099 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes, 2100 "rx_mac_bytes"); 2101 2102 SYSCTL_ADD_QUAD(ctx, children, 2103 OID_AUTO, "rx_mac_uc_packets", 2104 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets, 2105 "rx_mac_uc_packets"); 2106 2107 SYSCTL_ADD_QUAD(ctx, children, 2108 OID_AUTO, "rx_mac_mc_packets", 2109 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets, 2110 "rx_mac_mc_packets"); 2111 2112 SYSCTL_ADD_QUAD(ctx, children, 2113 OID_AUTO, "rx_mac_bc_packets", 2114 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets, 2115 "rx_mac_bc_packets"); 2116 2117 SYSCTL_ADD_QUAD(ctx, children, 2118 OID_AUTO, "rx_mac_frames_ok", 2119 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok, 2120 "rx_mac_frames_ok"); 2121 2122 SYSCTL_ADD_QUAD(ctx, children, 2123 OID_AUTO, "tx_mac_bytes", 2124 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes, 2125 "tx_mac_bytes"); 2126 2127 SYSCTL_ADD_QUAD(ctx, children, 2128 OID_AUTO, "tx_mac_uc_packets", 2129 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets, 2130 "tx_mac_uc_packets"); 2131 2132 SYSCTL_ADD_QUAD(ctx, children, 2133 OID_AUTO, "tx_mac_mc_packets", 2134 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets, 2135 "tx_mac_mc_packets"); 2136 2137 SYSCTL_ADD_QUAD(ctx, children, 2138 OID_AUTO, "tx_mac_bc_packets", 2139 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets, 2140 "tx_mac_bc_packets"); 2141 2142 SYSCTL_ADD_QUAD(ctx, children, 2143 OID_AUTO, "tx_mac_ctrl_frames", 2144 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames, 2145 "tx_mac_ctrl_frames"); 2146 return; 2147 } 2148 2149 static void 2150 qlnx_add_sysctls(qlnx_host_t *ha) 2151 { 2152 device_t dev = ha->pci_dev; 2153 struct sysctl_ctx_list *ctx; 2154 struct sysctl_oid_list *children; 2155 2156 ctx = device_get_sysctl_ctx(dev); 2157 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2158 2159 qlnx_add_fp_stats_sysctls(ha); 2160 qlnx_add_sp_stats_sysctls(ha); 2161 2162 if (qlnx_vf_device(ha) != 0) 2163 qlnx_add_hw_stats_sysctls(ha); 2164 2165 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version", 2166 CTLFLAG_RD, qlnx_ver_str, 0, 2167 "Driver Version"); 2168 2169 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version", 2170 CTLFLAG_RD, ha->stormfw_ver, 0, 2171 "STORM Firmware Version"); 2172 2173 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version", 2174 CTLFLAG_RD, ha->mfw_ver, 0, 2175 "Management Firmware Version"); 2176 2177 SYSCTL_ADD_UINT(ctx, children, 2178 OID_AUTO, "personality", CTLFLAG_RD, 2179 &ha->personality, ha->personality, 2180 "\tpersonality = 0 => Ethernet Only\n" 2181 "\tpersonality = 3 => Ethernet and RoCE\n" 2182 "\tpersonality = 4 => Ethernet and iWARP\n" 2183 "\tpersonality = 6 => Default in Shared Memory\n"); 2184 2185 ha->dbg_level = 0; 2186 SYSCTL_ADD_UINT(ctx, children, 2187 OID_AUTO, "debug", CTLFLAG_RW, 2188 &ha->dbg_level, ha->dbg_level, "Debug Level"); 2189 2190 ha->dp_level = 0x01; 2191 SYSCTL_ADD_UINT(ctx, children, 2192 OID_AUTO, "dp_level", CTLFLAG_RW, 2193 &ha->dp_level, ha->dp_level, "DP Level"); 2194 2195 ha->dbg_trace_lro_cnt = 0; 2196 SYSCTL_ADD_UINT(ctx, children, 2197 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW, 2198 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt, 2199 "Trace LRO Counts"); 2200 2201 ha->dbg_trace_tso_pkt_len = 0; 2202 SYSCTL_ADD_UINT(ctx, children, 2203 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW, 2204 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len, 2205 "Trace TSO packet lengths"); 2206 2207 ha->dp_module = 0; 2208 SYSCTL_ADD_UINT(ctx, children, 2209 OID_AUTO, "dp_module", CTLFLAG_RW, 2210 &ha->dp_module, ha->dp_module, "DP Module"); 2211 2212 ha->err_inject = 0; 2213 2214 SYSCTL_ADD_UINT(ctx, children, 2215 OID_AUTO, "err_inject", CTLFLAG_RW, 2216 &ha->err_inject, ha->err_inject, "Error Inject"); 2217 2218 ha->storm_stats_enable = 0; 2219 2220 SYSCTL_ADD_UINT(ctx, children, 2221 OID_AUTO, "storm_stats_enable", CTLFLAG_RW, 2222 &ha->storm_stats_enable, ha->storm_stats_enable, 2223 "Enable Storm Statistics Gathering"); 2224 2225 ha->storm_stats_index = 0; 2226 2227 SYSCTL_ADD_UINT(ctx, children, 2228 OID_AUTO, "storm_stats_index", CTLFLAG_RD, 2229 &ha->storm_stats_index, ha->storm_stats_index, 2230 "Enable Storm Statistics Gathering Current Index"); 2231 2232 ha->grcdump_taken = 0; 2233 SYSCTL_ADD_UINT(ctx, children, 2234 OID_AUTO, "grcdump_taken", CTLFLAG_RD, 2235 &ha->grcdump_taken, ha->grcdump_taken, 2236 "grcdump_taken"); 2237 2238 ha->idle_chk_taken = 0; 2239 SYSCTL_ADD_UINT(ctx, children, 2240 OID_AUTO, "idle_chk_taken", CTLFLAG_RD, 2241 &ha->idle_chk_taken, ha->idle_chk_taken, 2242 "idle_chk_taken"); 2243 2244 SYSCTL_ADD_UINT(ctx, children, 2245 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD, 2246 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs, 2247 "rx_coalesce_usecs"); 2248 2249 SYSCTL_ADD_UINT(ctx, children, 2250 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD, 2251 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs, 2252 "tx_coalesce_usecs"); 2253 2254 SYSCTL_ADD_PROC(ctx, children, 2255 OID_AUTO, "trigger_dump", 2256 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2257 (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump"); 2258 2259 SYSCTL_ADD_PROC(ctx, children, 2260 OID_AUTO, "set_rx_coalesce_usecs", 2261 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2262 (void *)ha, 0, qlnx_set_rx_coalesce, "I", 2263 "rx interrupt coalesce period microseconds"); 2264 2265 SYSCTL_ADD_PROC(ctx, children, 2266 OID_AUTO, "set_tx_coalesce_usecs", 2267 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2268 (void *)ha, 0, qlnx_set_tx_coalesce, "I", 2269 "tx interrupt coalesce period microseconds"); 2270 2271 ha->rx_pkt_threshold = 128; 2272 SYSCTL_ADD_UINT(ctx, children, 2273 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW, 2274 &ha->rx_pkt_threshold, ha->rx_pkt_threshold, 2275 "No. of Rx Pkts to process at a time"); 2276 2277 ha->rx_jumbo_buf_eq_mtu = 0; 2278 SYSCTL_ADD_UINT(ctx, children, 2279 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW, 2280 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu, 2281 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n" 2282 "otherwise Rx Jumbo buffers are set to >= MTU size\n"); 2283 2284 SYSCTL_ADD_QUAD(ctx, children, 2285 OID_AUTO, "err_illegal_intr", CTLFLAG_RD, 2286 &ha->err_illegal_intr, "err_illegal_intr"); 2287 2288 SYSCTL_ADD_QUAD(ctx, children, 2289 OID_AUTO, "err_fp_null", CTLFLAG_RD, 2290 &ha->err_fp_null, "err_fp_null"); 2291 2292 SYSCTL_ADD_QUAD(ctx, children, 2293 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD, 2294 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type"); 2295 return; 2296 } 2297 2298 /***************************************************************************** 2299 * Operating System Network Interface Functions 2300 *****************************************************************************/ 2301 2302 static void 2303 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) 2304 { 2305 uint16_t device_id; 2306 struct ifnet *ifp; 2307 2308 ifp = ha->ifp = if_alloc(IFT_ETHER); 2309 2310 if (ifp == NULL) 2311 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 2312 2313 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2314 2315 device_id = pci_get_device(ha->pci_dev); 2316 2317 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) 2318 ifp->if_baudrate = IF_Gbps(40); 2319 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2320 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 2321 ifp->if_baudrate = IF_Gbps(25); 2322 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) 2323 ifp->if_baudrate = IF_Gbps(50); 2324 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) 2325 ifp->if_baudrate = IF_Gbps(100); 2326 2327 ifp->if_capabilities = IFCAP_LINKSTATE; 2328 2329 ifp->if_init = qlnx_init; 2330 ifp->if_softc = ha; 2331 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2332 ifp->if_ioctl = qlnx_ioctl; 2333 ifp->if_transmit = qlnx_transmit; 2334 ifp->if_qflush = qlnx_qflush; 2335 2336 IFQ_SET_MAXLEN(&ifp->if_snd, qlnx_get_ifq_snd_maxlen(ha)); 2337 ifp->if_snd.ifq_drv_maxlen = qlnx_get_ifq_snd_maxlen(ha); 2338 IFQ_SET_READY(&ifp->if_snd); 2339 2340 if_setgetcounterfn(ifp, qlnx_get_counter); 2341 2342 ha->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2343 2344 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN); 2345 2346 if (!ha->primary_mac[0] && !ha->primary_mac[1] && 2347 !ha->primary_mac[2] && !ha->primary_mac[3] && 2348 !ha->primary_mac[4] && !ha->primary_mac[5]) { 2349 uint32_t rnd; 2350 2351 rnd = arc4random(); 2352 2353 ha->primary_mac[0] = 0x00; 2354 ha->primary_mac[1] = 0x0e; 2355 ha->primary_mac[2] = 0x1e; 2356 ha->primary_mac[3] = rnd & 0xFF; 2357 ha->primary_mac[4] = (rnd >> 8) & 0xFF; 2358 ha->primary_mac[5] = (rnd >> 16) & 0xFF; 2359 } 2360 2361 ether_ifattach(ifp, ha->primary_mac); 2362 bcopy(IF_LLADDR(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN); 2363 2364 ifp->if_capabilities = IFCAP_HWCSUM; 2365 ifp->if_capabilities |= IFCAP_JUMBO_MTU; 2366 2367 ifp->if_capabilities |= IFCAP_VLAN_MTU; 2368 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 2369 ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; 2370 ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; 2371 ifp->if_capabilities |= IFCAP_VLAN_HWTSO; 2372 ifp->if_capabilities |= IFCAP_TSO4; 2373 ifp->if_capabilities |= IFCAP_TSO6; 2374 ifp->if_capabilities |= IFCAP_LRO; 2375 2376 ifp->if_hw_tsomax = QLNX_MAX_TSO_FRAME_SIZE - 2377 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); 2378 ifp->if_hw_tsomaxsegcount = QLNX_MAX_SEGMENTS - 1 /* hdr */; 2379 ifp->if_hw_tsomaxsegsize = QLNX_MAX_TX_MBUF_SIZE; 2380 2381 ifp->if_capenable = ifp->if_capabilities; 2382 2383 ifp->if_hwassist = CSUM_IP; 2384 ifp->if_hwassist |= CSUM_TCP | CSUM_UDP; 2385 ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; 2386 ifp->if_hwassist |= CSUM_TSO; 2387 2388 ifp->if_hdrlen = sizeof(struct ether_vlan_header); 2389 2390 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\ 2391 qlnx_media_status); 2392 2393 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) { 2394 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL); 2395 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL); 2396 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL); 2397 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2398 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) { 2399 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL); 2400 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL); 2401 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) { 2402 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL); 2403 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL); 2404 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) { 2405 ifmedia_add(&ha->media, 2406 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL); 2407 ifmedia_add(&ha->media, 2408 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL); 2409 ifmedia_add(&ha->media, 2410 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL); 2411 } 2412 2413 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL); 2414 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 2415 2416 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 2417 2418 QL_DPRINT2(ha, "exit\n"); 2419 2420 return; 2421 } 2422 2423 static void 2424 qlnx_init_locked(qlnx_host_t *ha) 2425 { 2426 struct ifnet *ifp = ha->ifp; 2427 2428 QL_DPRINT1(ha, "Driver Initialization start \n"); 2429 2430 qlnx_stop(ha); 2431 2432 if (qlnx_load(ha) == 0) { 2433 ifp->if_drv_flags |= IFF_DRV_RUNNING; 2434 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 2435 2436 #ifdef QLNX_ENABLE_IWARP 2437 if (qlnx_vf_device(ha) != 0) { 2438 qlnx_rdma_dev_open(ha); 2439 } 2440 #endif /* #ifdef QLNX_ENABLE_IWARP */ 2441 } 2442 2443 return; 2444 } 2445 2446 static void 2447 qlnx_init(void *arg) 2448 { 2449 qlnx_host_t *ha; 2450 2451 ha = (qlnx_host_t *)arg; 2452 2453 QL_DPRINT2(ha, "enter\n"); 2454 2455 QLNX_LOCK(ha); 2456 qlnx_init_locked(ha); 2457 QLNX_UNLOCK(ha); 2458 2459 QL_DPRINT2(ha, "exit\n"); 2460 2461 return; 2462 } 2463 2464 static int 2465 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac) 2466 { 2467 struct ecore_filter_mcast *mcast; 2468 struct ecore_dev *cdev; 2469 int rc; 2470 2471 cdev = &ha->cdev; 2472 2473 mcast = &ha->ecore_mcast; 2474 bzero(mcast, sizeof(struct ecore_filter_mcast)); 2475 2476 if (add_mac) 2477 mcast->opcode = ECORE_FILTER_ADD; 2478 else 2479 mcast->opcode = ECORE_FILTER_REMOVE; 2480 2481 mcast->num_mc_addrs = 1; 2482 memcpy(mcast->mac, mac_addr, ETH_ALEN); 2483 2484 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 2485 2486 return (rc); 2487 } 2488 2489 static int 2490 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta) 2491 { 2492 int i; 2493 2494 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2495 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) 2496 return 0; /* its been already added */ 2497 } 2498 2499 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2500 if ((ha->mcast[i].addr[0] == 0) && 2501 (ha->mcast[i].addr[1] == 0) && 2502 (ha->mcast[i].addr[2] == 0) && 2503 (ha->mcast[i].addr[3] == 0) && 2504 (ha->mcast[i].addr[4] == 0) && 2505 (ha->mcast[i].addr[5] == 0)) { 2506 if (qlnx_config_mcast_mac_addr(ha, mta, 1)) 2507 return (-1); 2508 2509 bcopy(mta, ha->mcast[i].addr, ETH_ALEN); 2510 ha->nmcast++; 2511 2512 return 0; 2513 } 2514 } 2515 return 0; 2516 } 2517 2518 static int 2519 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta) 2520 { 2521 int i; 2522 2523 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2524 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { 2525 if (qlnx_config_mcast_mac_addr(ha, mta, 0)) 2526 return (-1); 2527 2528 ha->mcast[i].addr[0] = 0; 2529 ha->mcast[i].addr[1] = 0; 2530 ha->mcast[i].addr[2] = 0; 2531 ha->mcast[i].addr[3] = 0; 2532 ha->mcast[i].addr[4] = 0; 2533 ha->mcast[i].addr[5] = 0; 2534 2535 ha->nmcast--; 2536 2537 return 0; 2538 } 2539 } 2540 return 0; 2541 } 2542 2543 /* 2544 * Name: qls_hw_set_multi 2545 * Function: Sets the Multicast Addresses provided the host O.S into the 2546 * hardware (for the given interface) 2547 */ 2548 static void 2549 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 2550 uint32_t add_mac) 2551 { 2552 int i; 2553 2554 for (i = 0; i < mcnt; i++) { 2555 if (add_mac) { 2556 if (qlnx_hw_add_mcast(ha, mta)) 2557 break; 2558 } else { 2559 if (qlnx_hw_del_mcast(ha, mta)) 2560 break; 2561 } 2562 2563 mta += ETHER_HDR_LEN; 2564 } 2565 return; 2566 } 2567 2568 static u_int 2569 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) 2570 { 2571 uint8_t *mta = arg; 2572 2573 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS) 2574 return (0); 2575 2576 bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN); 2577 2578 return (1); 2579 } 2580 2581 static int 2582 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi) 2583 { 2584 uint8_t mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN]; 2585 struct ifnet *ifp = ha->ifp; 2586 u_int mcnt; 2587 2588 if (qlnx_vf_device(ha) == 0) 2589 return (0); 2590 2591 mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta); 2592 2593 QLNX_LOCK(ha); 2594 qlnx_hw_set_multi(ha, mta, mcnt, add_multi); 2595 QLNX_UNLOCK(ha); 2596 2597 return (0); 2598 } 2599 2600 static int 2601 qlnx_set_promisc(qlnx_host_t *ha) 2602 { 2603 int rc = 0; 2604 uint8_t filter; 2605 2606 if (qlnx_vf_device(ha) == 0) 2607 return (0); 2608 2609 filter = ha->filter; 2610 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2611 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 2612 2613 rc = qlnx_set_rx_accept_filter(ha, filter); 2614 return (rc); 2615 } 2616 2617 static int 2618 qlnx_set_allmulti(qlnx_host_t *ha) 2619 { 2620 int rc = 0; 2621 uint8_t filter; 2622 2623 if (qlnx_vf_device(ha) == 0) 2624 return (0); 2625 2626 filter = ha->filter; 2627 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2628 rc = qlnx_set_rx_accept_filter(ha, filter); 2629 2630 return (rc); 2631 } 2632 2633 static int 2634 qlnx_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 2635 { 2636 int ret = 0, mask; 2637 struct ifreq *ifr = (struct ifreq *)data; 2638 struct ifaddr *ifa = (struct ifaddr *)data; 2639 qlnx_host_t *ha; 2640 2641 ha = (qlnx_host_t *)ifp->if_softc; 2642 2643 switch (cmd) { 2644 case SIOCSIFADDR: 2645 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd); 2646 2647 if (ifa->ifa_addr->sa_family == AF_INET) { 2648 ifp->if_flags |= IFF_UP; 2649 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { 2650 QLNX_LOCK(ha); 2651 qlnx_init_locked(ha); 2652 QLNX_UNLOCK(ha); 2653 } 2654 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 2655 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)); 2656 2657 arp_ifinit(ifp, ifa); 2658 } else { 2659 ether_ioctl(ifp, cmd, data); 2660 } 2661 break; 2662 2663 case SIOCSIFMTU: 2664 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd); 2665 2666 if (ifr->ifr_mtu > QLNX_MAX_MTU) { 2667 ret = EINVAL; 2668 } else { 2669 QLNX_LOCK(ha); 2670 ifp->if_mtu = ifr->ifr_mtu; 2671 ha->max_frame_size = 2672 ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; 2673 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2674 qlnx_init_locked(ha); 2675 } 2676 2677 QLNX_UNLOCK(ha); 2678 } 2679 2680 break; 2681 2682 case SIOCSIFFLAGS: 2683 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd); 2684 2685 QLNX_LOCK(ha); 2686 2687 if (ifp->if_flags & IFF_UP) { 2688 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2689 if ((ifp->if_flags ^ ha->if_flags) & 2690 IFF_PROMISC) { 2691 ret = qlnx_set_promisc(ha); 2692 } else if ((ifp->if_flags ^ ha->if_flags) & 2693 IFF_ALLMULTI) { 2694 ret = qlnx_set_allmulti(ha); 2695 } 2696 } else { 2697 ha->max_frame_size = ifp->if_mtu + 2698 ETHER_HDR_LEN + ETHER_CRC_LEN; 2699 qlnx_init_locked(ha); 2700 } 2701 } else { 2702 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2703 qlnx_stop(ha); 2704 ha->if_flags = ifp->if_flags; 2705 } 2706 2707 QLNX_UNLOCK(ha); 2708 break; 2709 2710 case SIOCADDMULTI: 2711 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd); 2712 2713 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2714 if (qlnx_set_multi(ha, 1)) 2715 ret = EINVAL; 2716 } 2717 break; 2718 2719 case SIOCDELMULTI: 2720 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd); 2721 2722 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 2723 if (qlnx_set_multi(ha, 0)) 2724 ret = EINVAL; 2725 } 2726 break; 2727 2728 case SIOCSIFMEDIA: 2729 case SIOCGIFMEDIA: 2730 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd); 2731 2732 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 2733 break; 2734 2735 case SIOCSIFCAP: 2736 2737 mask = ifr->ifr_reqcap ^ ifp->if_capenable; 2738 2739 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd); 2740 2741 if (mask & IFCAP_HWCSUM) 2742 ifp->if_capenable ^= IFCAP_HWCSUM; 2743 if (mask & IFCAP_TSO4) 2744 ifp->if_capenable ^= IFCAP_TSO4; 2745 if (mask & IFCAP_TSO6) 2746 ifp->if_capenable ^= IFCAP_TSO6; 2747 if (mask & IFCAP_VLAN_HWTAGGING) 2748 ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2749 if (mask & IFCAP_VLAN_HWTSO) 2750 ifp->if_capenable ^= IFCAP_VLAN_HWTSO; 2751 if (mask & IFCAP_LRO) 2752 ifp->if_capenable ^= IFCAP_LRO; 2753 2754 QLNX_LOCK(ha); 2755 2756 if (ifp->if_drv_flags & IFF_DRV_RUNNING) 2757 qlnx_init_locked(ha); 2758 2759 QLNX_UNLOCK(ha); 2760 2761 VLAN_CAPABILITIES(ifp); 2762 break; 2763 2764 case SIOCGI2C: 2765 { 2766 struct ifi2creq i2c; 2767 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 2768 struct ecore_ptt *p_ptt; 2769 2770 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 2771 2772 if (ret) 2773 break; 2774 2775 if ((i2c.len > sizeof (i2c.data)) || 2776 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) { 2777 ret = EINVAL; 2778 break; 2779 } 2780 2781 p_ptt = ecore_ptt_acquire(p_hwfn); 2782 2783 if (!p_ptt) { 2784 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 2785 ret = -1; 2786 break; 2787 } 2788 2789 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt, 2790 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset, 2791 i2c.len, &i2c.data[0]); 2792 2793 ecore_ptt_release(p_hwfn, p_ptt); 2794 2795 if (ret) { 2796 ret = -1; 2797 break; 2798 } 2799 2800 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 2801 2802 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \ 2803 len = %d addr = 0x%02x offset = 0x%04x \ 2804 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \ 2805 0x%02x 0x%02x 0x%02x\n", 2806 ret, i2c.len, i2c.dev_addr, i2c.offset, 2807 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3], 2808 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]); 2809 break; 2810 } 2811 2812 default: 2813 QL_DPRINT4(ha, "default (0x%lx)\n", cmd); 2814 ret = ether_ioctl(ifp, cmd, data); 2815 break; 2816 } 2817 2818 return (ret); 2819 } 2820 2821 static int 2822 qlnx_media_change(struct ifnet *ifp) 2823 { 2824 qlnx_host_t *ha; 2825 struct ifmedia *ifm; 2826 int ret = 0; 2827 2828 ha = (qlnx_host_t *)ifp->if_softc; 2829 2830 QL_DPRINT2(ha, "enter\n"); 2831 2832 ifm = &ha->media; 2833 2834 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2835 ret = EINVAL; 2836 2837 QL_DPRINT2(ha, "exit\n"); 2838 2839 return (ret); 2840 } 2841 2842 static void 2843 qlnx_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) 2844 { 2845 qlnx_host_t *ha; 2846 2847 ha = (qlnx_host_t *)ifp->if_softc; 2848 2849 QL_DPRINT2(ha, "enter\n"); 2850 2851 ifmr->ifm_status = IFM_AVALID; 2852 ifmr->ifm_active = IFM_ETHER; 2853 2854 if (ha->link_up) { 2855 ifmr->ifm_status |= IFM_ACTIVE; 2856 ifmr->ifm_active |= 2857 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link)); 2858 2859 if (ha->if_link.link_partner_caps & 2860 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause)) 2861 ifmr->ifm_active |= 2862 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 2863 } 2864 2865 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down")); 2866 2867 return; 2868 } 2869 2870 static void 2871 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2872 struct qlnx_tx_queue *txq) 2873 { 2874 u16 idx; 2875 struct mbuf *mp; 2876 bus_dmamap_t map; 2877 int i; 2878 // struct eth_tx_bd *tx_data_bd; 2879 struct eth_tx_1st_bd *first_bd; 2880 int nbds = 0; 2881 2882 idx = txq->sw_tx_cons; 2883 mp = txq->sw_tx_ring[idx].mp; 2884 map = txq->sw_tx_ring[idx].map; 2885 2886 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){ 2887 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL); 2888 2889 QL_DPRINT1(ha, "(mp == NULL) " 2890 " tx_idx = 0x%x" 2891 " ecore_prod_idx = 0x%x" 2892 " ecore_cons_idx = 0x%x" 2893 " hw_bd_cons = 0x%x" 2894 " txq_db_last = 0x%x" 2895 " elem_left = 0x%x\n", 2896 fp->rss_id, 2897 ecore_chain_get_prod_idx(&txq->tx_pbl), 2898 ecore_chain_get_cons_idx(&txq->tx_pbl), 2899 le16toh(*txq->hw_cons_ptr), 2900 txq->tx_db.raw, 2901 ecore_chain_get_elem_left(&txq->tx_pbl)); 2902 2903 fp->err_tx_free_pkt_null++; 2904 2905 //DEBUG 2906 qlnx_trigger_dump(ha); 2907 2908 return; 2909 } else { 2910 QLNX_INC_OPACKETS((ha->ifp)); 2911 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len)); 2912 2913 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE); 2914 bus_dmamap_unload(ha->tx_tag, map); 2915 2916 fp->tx_pkts_freed++; 2917 fp->tx_pkts_completed++; 2918 2919 m_freem(mp); 2920 } 2921 2922 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl); 2923 nbds = first_bd->data.nbds; 2924 2925 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0); 2926 2927 for (i = 1; i < nbds; i++) { 2928 /* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl); 2929 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0); 2930 } 2931 txq->sw_tx_ring[idx].flags = 0; 2932 txq->sw_tx_ring[idx].mp = NULL; 2933 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0; 2934 2935 return; 2936 } 2937 2938 static void 2939 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2940 struct qlnx_tx_queue *txq) 2941 { 2942 u16 hw_bd_cons; 2943 u16 ecore_cons_idx; 2944 uint16_t diff; 2945 uint16_t idx, idx2; 2946 2947 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 2948 2949 while (hw_bd_cons != 2950 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 2951 if (hw_bd_cons < ecore_cons_idx) { 2952 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 2953 } else { 2954 diff = hw_bd_cons - ecore_cons_idx; 2955 } 2956 if ((diff > TX_RING_SIZE) || 2957 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){ 2958 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF); 2959 2960 QL_DPRINT1(ha, "(diff = 0x%x) " 2961 " tx_idx = 0x%x" 2962 " ecore_prod_idx = 0x%x" 2963 " ecore_cons_idx = 0x%x" 2964 " hw_bd_cons = 0x%x" 2965 " txq_db_last = 0x%x" 2966 " elem_left = 0x%x\n", 2967 diff, 2968 fp->rss_id, 2969 ecore_chain_get_prod_idx(&txq->tx_pbl), 2970 ecore_chain_get_cons_idx(&txq->tx_pbl), 2971 le16toh(*txq->hw_cons_ptr), 2972 txq->tx_db.raw, 2973 ecore_chain_get_elem_left(&txq->tx_pbl)); 2974 2975 fp->err_tx_cons_idx_conflict++; 2976 2977 //DEBUG 2978 qlnx_trigger_dump(ha); 2979 } 2980 2981 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 2982 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1); 2983 prefetch(txq->sw_tx_ring[idx].mp); 2984 prefetch(txq->sw_tx_ring[idx2].mp); 2985 2986 qlnx_free_tx_pkt(ha, fp, txq); 2987 2988 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 2989 } 2990 return; 2991 } 2992 2993 static int 2994 qlnx_transmit_locked(struct ifnet *ifp,struct qlnx_fastpath *fp, struct mbuf *mp) 2995 { 2996 int ret = 0; 2997 struct qlnx_tx_queue *txq; 2998 qlnx_host_t * ha; 2999 uint16_t elem_left; 3000 3001 txq = fp->txq[0]; 3002 ha = (qlnx_host_t *)fp->edev; 3003 3004 if ((!(ifp->if_drv_flags & IFF_DRV_RUNNING)) || (!ha->link_up)) { 3005 if(mp != NULL) 3006 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3007 return (ret); 3008 } 3009 3010 if(mp != NULL) 3011 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3012 3013 mp = drbr_peek(ifp, fp->tx_br); 3014 3015 while (mp != NULL) { 3016 if (qlnx_send(ha, fp, &mp)) { 3017 if (mp != NULL) { 3018 drbr_putback(ifp, fp->tx_br, mp); 3019 } else { 3020 fp->tx_pkts_processed++; 3021 drbr_advance(ifp, fp->tx_br); 3022 } 3023 goto qlnx_transmit_locked_exit; 3024 3025 } else { 3026 drbr_advance(ifp, fp->tx_br); 3027 fp->tx_pkts_transmitted++; 3028 fp->tx_pkts_processed++; 3029 } 3030 3031 mp = drbr_peek(ifp, fp->tx_br); 3032 } 3033 3034 qlnx_transmit_locked_exit: 3035 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) || 3036 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) 3037 < QLNX_TX_ELEM_MAX_THRESH)) 3038 (void)qlnx_tx_int(ha, fp, fp->txq[0]); 3039 3040 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret); 3041 return ret; 3042 } 3043 3044 static int 3045 qlnx_transmit(struct ifnet *ifp, struct mbuf *mp) 3046 { 3047 qlnx_host_t *ha = (qlnx_host_t *)ifp->if_softc; 3048 struct qlnx_fastpath *fp; 3049 int rss_id = 0, ret = 0; 3050 3051 #ifdef QLNX_TRACEPERF_DATA 3052 uint64_t tx_pkts = 0, tx_compl = 0; 3053 #endif 3054 3055 QL_DPRINT2(ha, "enter\n"); 3056 3057 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 3058 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) % 3059 ha->num_rss; 3060 3061 fp = &ha->fp_array[rss_id]; 3062 3063 if (fp->tx_br == NULL) { 3064 ret = EINVAL; 3065 goto qlnx_transmit_exit; 3066 } 3067 3068 if (mtx_trylock(&fp->tx_mtx)) { 3069 #ifdef QLNX_TRACEPERF_DATA 3070 tx_pkts = fp->tx_pkts_transmitted; 3071 tx_compl = fp->tx_pkts_completed; 3072 #endif 3073 3074 ret = qlnx_transmit_locked(ifp, fp, mp); 3075 3076 #ifdef QLNX_TRACEPERF_DATA 3077 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts); 3078 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl); 3079 #endif 3080 mtx_unlock(&fp->tx_mtx); 3081 } else { 3082 if (mp != NULL && (fp->fp_taskqueue != NULL)) { 3083 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3084 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 3085 } 3086 } 3087 3088 qlnx_transmit_exit: 3089 3090 QL_DPRINT2(ha, "exit ret = %d\n", ret); 3091 return ret; 3092 } 3093 3094 static void 3095 qlnx_qflush(struct ifnet *ifp) 3096 { 3097 int rss_id; 3098 struct qlnx_fastpath *fp; 3099 struct mbuf *mp; 3100 qlnx_host_t *ha; 3101 3102 ha = (qlnx_host_t *)ifp->if_softc; 3103 3104 QL_DPRINT2(ha, "enter\n"); 3105 3106 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 3107 fp = &ha->fp_array[rss_id]; 3108 3109 if (fp == NULL) 3110 continue; 3111 3112 if (fp->tx_br) { 3113 mtx_lock(&fp->tx_mtx); 3114 3115 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 3116 fp->tx_pkts_freed++; 3117 m_freem(mp); 3118 } 3119 mtx_unlock(&fp->tx_mtx); 3120 } 3121 } 3122 QL_DPRINT2(ha, "exit\n"); 3123 3124 return; 3125 } 3126 3127 static void 3128 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value) 3129 { 3130 uint32_t offset; 3131 3132 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells); 3133 3134 bus_write_4(ha->pci_dbells, offset, value); 3135 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ); 3136 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ); 3137 3138 return; 3139 } 3140 3141 static uint32_t 3142 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp) 3143 { 3144 struct ether_vlan_header *eh = NULL; 3145 struct ip *ip = NULL; 3146 struct ip6_hdr *ip6 = NULL; 3147 struct tcphdr *th = NULL; 3148 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0; 3149 uint16_t etype = 0; 3150 uint8_t buf[sizeof(struct ip6_hdr)]; 3151 3152 eh = mtod(mp, struct ether_vlan_header *); 3153 3154 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 3155 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3156 etype = ntohs(eh->evl_proto); 3157 } else { 3158 ehdrlen = ETHER_HDR_LEN; 3159 etype = ntohs(eh->evl_encap_proto); 3160 } 3161 3162 switch (etype) { 3163 case ETHERTYPE_IP: 3164 ip = (struct ip *)(mp->m_data + ehdrlen); 3165 3166 ip_hlen = sizeof (struct ip); 3167 3168 if (mp->m_len < (ehdrlen + ip_hlen)) { 3169 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 3170 ip = (struct ip *)buf; 3171 } 3172 3173 th = (struct tcphdr *)(ip + 1); 3174 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3175 break; 3176 3177 case ETHERTYPE_IPV6: 3178 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 3179 3180 ip_hlen = sizeof(struct ip6_hdr); 3181 3182 if (mp->m_len < (ehdrlen + ip_hlen)) { 3183 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 3184 buf); 3185 ip6 = (struct ip6_hdr *)buf; 3186 } 3187 th = (struct tcphdr *)(ip6 + 1); 3188 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3189 break; 3190 3191 default: 3192 break; 3193 } 3194 3195 return (offset); 3196 } 3197 3198 static __inline int 3199 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs, 3200 uint32_t offset) 3201 { 3202 int i; 3203 uint32_t sum, nbds_in_hdr = 1; 3204 uint32_t window; 3205 bus_dma_segment_t *s_seg; 3206 3207 /* If the header spans multiple segments, skip those segments */ 3208 3209 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM) 3210 return (0); 3211 3212 i = 0; 3213 3214 while ((i < nsegs) && (offset >= segs->ds_len)) { 3215 offset = offset - segs->ds_len; 3216 segs++; 3217 i++; 3218 nbds_in_hdr++; 3219 } 3220 3221 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr; 3222 3223 nsegs = nsegs - i; 3224 3225 while (nsegs >= window) { 3226 sum = 0; 3227 s_seg = segs; 3228 3229 for (i = 0; i < window; i++){ 3230 sum += s_seg->ds_len; 3231 s_seg++; 3232 } 3233 3234 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) { 3235 fp->tx_lso_wnd_min_len++; 3236 return (-1); 3237 } 3238 3239 nsegs = nsegs - 1; 3240 segs++; 3241 } 3242 3243 return (0); 3244 } 3245 3246 static int 3247 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp) 3248 { 3249 bus_dma_segment_t *segs; 3250 bus_dmamap_t map = 0; 3251 uint32_t nsegs = 0; 3252 int ret = -1; 3253 struct mbuf *m_head = *m_headp; 3254 uint16_t idx = 0; 3255 uint16_t elem_left; 3256 3257 uint8_t nbd = 0; 3258 struct qlnx_tx_queue *txq; 3259 3260 struct eth_tx_1st_bd *first_bd; 3261 struct eth_tx_2nd_bd *second_bd; 3262 struct eth_tx_3rd_bd *third_bd; 3263 struct eth_tx_bd *tx_data_bd; 3264 3265 int seg_idx = 0; 3266 uint32_t nbds_in_hdr = 0; 3267 uint32_t offset = 0; 3268 3269 #ifdef QLNX_TRACE_PERF_DATA 3270 uint16_t bd_used; 3271 #endif 3272 3273 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id); 3274 3275 if (!ha->link_up) 3276 return (-1); 3277 3278 first_bd = NULL; 3279 second_bd = NULL; 3280 third_bd = NULL; 3281 tx_data_bd = NULL; 3282 3283 txq = fp->txq[0]; 3284 3285 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) < 3286 QLNX_TX_ELEM_MIN_THRESH) { 3287 fp->tx_nsegs_gt_elem_left++; 3288 fp->err_tx_nsegs_gt_elem_left++; 3289 3290 return (ENOBUFS); 3291 } 3292 3293 idx = txq->sw_tx_prod; 3294 3295 map = txq->sw_tx_ring[idx].map; 3296 segs = txq->segs; 3297 3298 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 3299 BUS_DMA_NOWAIT); 3300 3301 if (ha->dbg_trace_tso_pkt_len) { 3302 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3303 if (!fp->tx_tso_min_pkt_len) { 3304 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3305 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3306 } else { 3307 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len) 3308 fp->tx_tso_min_pkt_len = 3309 m_head->m_pkthdr.len; 3310 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len) 3311 fp->tx_tso_max_pkt_len = 3312 m_head->m_pkthdr.len; 3313 } 3314 } 3315 } 3316 3317 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3318 offset = qlnx_tcp_offset(ha, m_head); 3319 3320 if ((ret == EFBIG) || 3321 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && ( 3322 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) || 3323 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) && 3324 qlnx_tso_check(fp, segs, nsegs, offset))))) { 3325 struct mbuf *m; 3326 3327 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len); 3328 3329 fp->tx_defrag++; 3330 3331 m = m_defrag(m_head, M_NOWAIT); 3332 if (m == NULL) { 3333 fp->err_tx_defrag++; 3334 fp->tx_pkts_freed++; 3335 m_freem(m_head); 3336 *m_headp = NULL; 3337 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret); 3338 return (ENOBUFS); 3339 } 3340 3341 m_head = m; 3342 *m_headp = m_head; 3343 3344 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 3345 segs, &nsegs, BUS_DMA_NOWAIT))) { 3346 fp->err_tx_defrag_dmamap_load++; 3347 3348 QL_DPRINT1(ha, 3349 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n", 3350 ret, m_head->m_pkthdr.len); 3351 3352 fp->tx_pkts_freed++; 3353 m_freem(m_head); 3354 *m_headp = NULL; 3355 3356 return (ret); 3357 } 3358 3359 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && 3360 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) { 3361 fp->err_tx_non_tso_max_seg++; 3362 3363 QL_DPRINT1(ha, 3364 "(%d) nsegs too many for non-TSO [%d, %d]\n", 3365 ret, nsegs, m_head->m_pkthdr.len); 3366 3367 fp->tx_pkts_freed++; 3368 m_freem(m_head); 3369 *m_headp = NULL; 3370 3371 return (ret); 3372 } 3373 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3374 offset = qlnx_tcp_offset(ha, m_head); 3375 3376 } else if (ret) { 3377 fp->err_tx_dmamap_load++; 3378 3379 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n", 3380 ret, m_head->m_pkthdr.len); 3381 fp->tx_pkts_freed++; 3382 m_freem(m_head); 3383 *m_headp = NULL; 3384 return (ret); 3385 } 3386 3387 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet")); 3388 3389 if (ha->dbg_trace_tso_pkt_len) { 3390 if (nsegs < QLNX_FP_MAX_SEGS) 3391 fp->tx_pkts[(nsegs - 1)]++; 3392 else 3393 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; 3394 } 3395 3396 #ifdef QLNX_TRACE_PERF_DATA 3397 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3398 if(m_head->m_pkthdr.len <= 2048) 3399 fp->tx_pkts_hist[0]++; 3400 else if((m_head->m_pkthdr.len > 2048) && 3401 (m_head->m_pkthdr.len <= 4096)) 3402 fp->tx_pkts_hist[1]++; 3403 else if((m_head->m_pkthdr.len > 4096) && 3404 (m_head->m_pkthdr.len <= 8192)) 3405 fp->tx_pkts_hist[2]++; 3406 else if((m_head->m_pkthdr.len > 8192) && 3407 (m_head->m_pkthdr.len <= 12288 )) 3408 fp->tx_pkts_hist[3]++; 3409 else if((m_head->m_pkthdr.len > 11288) && 3410 (m_head->m_pkthdr.len <= 16394)) 3411 fp->tx_pkts_hist[4]++; 3412 else if((m_head->m_pkthdr.len > 16384) && 3413 (m_head->m_pkthdr.len <= 20480)) 3414 fp->tx_pkts_hist[5]++; 3415 else if((m_head->m_pkthdr.len > 20480) && 3416 (m_head->m_pkthdr.len <= 24576)) 3417 fp->tx_pkts_hist[6]++; 3418 else if((m_head->m_pkthdr.len > 24576) && 3419 (m_head->m_pkthdr.len <= 28672)) 3420 fp->tx_pkts_hist[7]++; 3421 else if((m_head->m_pkthdr.len > 28762) && 3422 (m_head->m_pkthdr.len <= 32768)) 3423 fp->tx_pkts_hist[8]++; 3424 else if((m_head->m_pkthdr.len > 32768) && 3425 (m_head->m_pkthdr.len <= 36864)) 3426 fp->tx_pkts_hist[9]++; 3427 else if((m_head->m_pkthdr.len > 36864) && 3428 (m_head->m_pkthdr.len <= 40960)) 3429 fp->tx_pkts_hist[10]++; 3430 else if((m_head->m_pkthdr.len > 40960) && 3431 (m_head->m_pkthdr.len <= 45056)) 3432 fp->tx_pkts_hist[11]++; 3433 else if((m_head->m_pkthdr.len > 45056) && 3434 (m_head->m_pkthdr.len <= 49152)) 3435 fp->tx_pkts_hist[12]++; 3436 else if((m_head->m_pkthdr.len > 49512) && 3437 m_head->m_pkthdr.len <= 53248)) 3438 fp->tx_pkts_hist[13]++; 3439 else if((m_head->m_pkthdr.len > 53248) && 3440 (m_head->m_pkthdr.len <= 57344)) 3441 fp->tx_pkts_hist[14]++; 3442 else if((m_head->m_pkthdr.len > 53248) && 3443 (m_head->m_pkthdr.len <= 57344)) 3444 fp->tx_pkts_hist[15]++; 3445 else if((m_head->m_pkthdr.len > 57344) && 3446 (m_head->m_pkthdr.len <= 61440)) 3447 fp->tx_pkts_hist[16]++; 3448 else 3449 fp->tx_pkts_hist[17]++; 3450 } 3451 3452 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3453 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl); 3454 bd_used = TX_RING_SIZE - elem_left; 3455 3456 if(bd_used <= 100) 3457 fp->tx_pkts_q[0]++; 3458 else if((bd_used > 100) && (bd_used <= 500)) 3459 fp->tx_pkts_q[1]++; 3460 else if((bd_used > 500) && (bd_used <= 1000)) 3461 fp->tx_pkts_q[2]++; 3462 else if((bd_used > 1000) && (bd_used <= 2000)) 3463 fp->tx_pkts_q[3]++; 3464 else if((bd_used > 3000) && (bd_used <= 4000)) 3465 fp->tx_pkts_q[4]++; 3466 else if((bd_used > 4000) && (bd_used <= 5000)) 3467 fp->tx_pkts_q[5]++; 3468 else if((bd_used > 6000) && (bd_used <= 7000)) 3469 fp->tx_pkts_q[6]++; 3470 else if((bd_used > 7000) && (bd_used <= 8000)) 3471 fp->tx_pkts_q[7]++; 3472 else if((bd_used > 8000) && (bd_used <= 9000)) 3473 fp->tx_pkts_q[8]++; 3474 else if((bd_used > 9000) && (bd_used <= 10000)) 3475 fp->tx_pkts_q[9]++; 3476 else if((bd_used > 10000) && (bd_used <= 11000)) 3477 fp->tx_pkts_q[10]++; 3478 else if((bd_used > 11000) && (bd_used <= 12000)) 3479 fp->tx_pkts_q[11]++; 3480 else if((bd_used > 12000) && (bd_used <= 13000)) 3481 fp->tx_pkts_q[12]++; 3482 else if((bd_used > 13000) && (bd_used <= 14000)) 3483 fp->tx_pkts_q[13]++; 3484 else if((bd_used > 14000) && (bd_used <= 15000)) 3485 fp->tx_pkts_q[14]++; 3486 else if((bd_used > 15000) && (bd_used <= 16000)) 3487 fp->tx_pkts_q[15]++; 3488 else 3489 fp->tx_pkts_q[16]++; 3490 } 3491 3492 #endif /* end of QLNX_TRACE_PERF_DATA */ 3493 3494 if ((nsegs + QLNX_TX_ELEM_RESERVE) > 3495 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) { 3496 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs" 3497 " in chain[%d] trying to free packets\n", 3498 nsegs, elem_left, fp->rss_id); 3499 3500 fp->tx_nsegs_gt_elem_left++; 3501 3502 (void)qlnx_tx_int(ha, fp, txq); 3503 3504 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left = 3505 ecore_chain_get_elem_left(&txq->tx_pbl))) { 3506 QL_DPRINT1(ha, 3507 "(%d, 0x%x) insuffient BDs in chain[%d]\n", 3508 nsegs, elem_left, fp->rss_id); 3509 3510 fp->err_tx_nsegs_gt_elem_left++; 3511 fp->tx_ring_full = 1; 3512 if (ha->storm_stats_enable) 3513 ha->storm_stats_gather = 1; 3514 return (ENOBUFS); 3515 } 3516 } 3517 3518 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 3519 3520 txq->sw_tx_ring[idx].mp = m_head; 3521 3522 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); 3523 3524 memset(first_bd, 0, sizeof(*first_bd)); 3525 3526 first_bd->data.bd_flags.bitfields = 3527 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 3528 3529 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len); 3530 3531 nbd++; 3532 3533 if (m_head->m_pkthdr.csum_flags & CSUM_IP) { 3534 first_bd->data.bd_flags.bitfields |= 3535 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3536 } 3537 3538 if (m_head->m_pkthdr.csum_flags & 3539 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) { 3540 first_bd->data.bd_flags.bitfields |= 3541 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT); 3542 } 3543 3544 if (m_head->m_flags & M_VLANTAG) { 3545 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag; 3546 first_bd->data.bd_flags.bitfields |= 3547 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT); 3548 } 3549 3550 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3551 first_bd->data.bd_flags.bitfields |= 3552 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); 3553 first_bd->data.bd_flags.bitfields |= 3554 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3555 3556 nbds_in_hdr = 1; 3557 3558 if (offset == segs->ds_len) { 3559 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3560 segs++; 3561 seg_idx++; 3562 3563 second_bd = (struct eth_tx_2nd_bd *) 3564 ecore_chain_produce(&txq->tx_pbl); 3565 memset(second_bd, 0, sizeof(*second_bd)); 3566 nbd++; 3567 3568 if (seg_idx < nsegs) { 3569 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3570 (segs->ds_addr), (segs->ds_len)); 3571 segs++; 3572 seg_idx++; 3573 } 3574 3575 third_bd = (struct eth_tx_3rd_bd *) 3576 ecore_chain_produce(&txq->tx_pbl); 3577 memset(third_bd, 0, sizeof(*third_bd)); 3578 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3579 third_bd->data.bitfields |= 3580 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3581 nbd++; 3582 3583 if (seg_idx < nsegs) { 3584 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3585 (segs->ds_addr), (segs->ds_len)); 3586 segs++; 3587 seg_idx++; 3588 } 3589 3590 for (; seg_idx < nsegs; seg_idx++) { 3591 tx_data_bd = (struct eth_tx_bd *) 3592 ecore_chain_produce(&txq->tx_pbl); 3593 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3594 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3595 segs->ds_addr,\ 3596 segs->ds_len); 3597 segs++; 3598 nbd++; 3599 } 3600 3601 } else if (offset < segs->ds_len) { 3602 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3603 3604 second_bd = (struct eth_tx_2nd_bd *) 3605 ecore_chain_produce(&txq->tx_pbl); 3606 memset(second_bd, 0, sizeof(*second_bd)); 3607 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3608 (segs->ds_addr + offset),\ 3609 (segs->ds_len - offset)); 3610 nbd++; 3611 segs++; 3612 3613 third_bd = (struct eth_tx_3rd_bd *) 3614 ecore_chain_produce(&txq->tx_pbl); 3615 memset(third_bd, 0, sizeof(*third_bd)); 3616 3617 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3618 segs->ds_addr,\ 3619 segs->ds_len); 3620 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3621 third_bd->data.bitfields |= 3622 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3623 segs++; 3624 nbd++; 3625 3626 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) { 3627 tx_data_bd = (struct eth_tx_bd *) 3628 ecore_chain_produce(&txq->tx_pbl); 3629 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3630 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3631 segs->ds_addr,\ 3632 segs->ds_len); 3633 segs++; 3634 nbd++; 3635 } 3636 3637 } else { 3638 offset = offset - segs->ds_len; 3639 segs++; 3640 3641 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3642 if (offset) 3643 nbds_in_hdr++; 3644 3645 tx_data_bd = (struct eth_tx_bd *) 3646 ecore_chain_produce(&txq->tx_pbl); 3647 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3648 3649 if (second_bd == NULL) { 3650 second_bd = (struct eth_tx_2nd_bd *) 3651 tx_data_bd; 3652 } else if (third_bd == NULL) { 3653 third_bd = (struct eth_tx_3rd_bd *) 3654 tx_data_bd; 3655 } 3656 3657 if (offset && (offset < segs->ds_len)) { 3658 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3659 segs->ds_addr, offset); 3660 3661 tx_data_bd = (struct eth_tx_bd *) 3662 ecore_chain_produce(&txq->tx_pbl); 3663 3664 memset(tx_data_bd, 0, 3665 sizeof(*tx_data_bd)); 3666 3667 if (second_bd == NULL) { 3668 second_bd = 3669 (struct eth_tx_2nd_bd *)tx_data_bd; 3670 } else if (third_bd == NULL) { 3671 third_bd = 3672 (struct eth_tx_3rd_bd *)tx_data_bd; 3673 } 3674 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3675 (segs->ds_addr + offset), \ 3676 (segs->ds_len - offset)); 3677 nbd++; 3678 offset = 0; 3679 } else { 3680 if (offset) 3681 offset = offset - segs->ds_len; 3682 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3683 segs->ds_addr, segs->ds_len); 3684 } 3685 segs++; 3686 nbd++; 3687 } 3688 3689 if (third_bd == NULL) { 3690 third_bd = (struct eth_tx_3rd_bd *) 3691 ecore_chain_produce(&txq->tx_pbl); 3692 memset(third_bd, 0, sizeof(*third_bd)); 3693 } 3694 3695 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3696 third_bd->data.bitfields |= 3697 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3698 } 3699 fp->tx_tso_pkts++; 3700 } else { 3701 segs++; 3702 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3703 tx_data_bd = (struct eth_tx_bd *) 3704 ecore_chain_produce(&txq->tx_pbl); 3705 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3706 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\ 3707 segs->ds_len); 3708 segs++; 3709 nbd++; 3710 } 3711 first_bd->data.bitfields = 3712 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) 3713 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 3714 first_bd->data.bitfields = 3715 htole16(first_bd->data.bitfields); 3716 fp->tx_non_tso_pkts++; 3717 } 3718 3719 first_bd->data.nbds = nbd; 3720 3721 if (ha->dbg_trace_tso_pkt_len) { 3722 if (fp->tx_tso_max_nsegs < nsegs) 3723 fp->tx_tso_max_nsegs = nsegs; 3724 3725 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs)) 3726 fp->tx_tso_min_nsegs = nsegs; 3727 } 3728 3729 txq->sw_tx_ring[idx].nsegs = nsegs; 3730 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1); 3731 3732 txq->tx_db.data.bd_prod = 3733 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl)); 3734 3735 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw); 3736 3737 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id); 3738 return (0); 3739 } 3740 3741 static void 3742 qlnx_stop(qlnx_host_t *ha) 3743 { 3744 struct ifnet *ifp = ha->ifp; 3745 int i; 3746 3747 ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE | IFF_DRV_RUNNING); 3748 3749 /* 3750 * We simply lock and unlock each fp->tx_mtx to 3751 * propagate the if_drv_flags 3752 * state to each tx thread 3753 */ 3754 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state); 3755 3756 if (ha->state == QLNX_STATE_OPEN) { 3757 for (i = 0; i < ha->num_rss; i++) { 3758 struct qlnx_fastpath *fp = &ha->fp_array[i]; 3759 3760 mtx_lock(&fp->tx_mtx); 3761 mtx_unlock(&fp->tx_mtx); 3762 3763 if (fp->fp_taskqueue != NULL) 3764 taskqueue_enqueue(fp->fp_taskqueue, 3765 &fp->fp_task); 3766 } 3767 } 3768 #ifdef QLNX_ENABLE_IWARP 3769 if (qlnx_vf_device(ha) != 0) { 3770 qlnx_rdma_dev_close(ha); 3771 } 3772 #endif /* #ifdef QLNX_ENABLE_IWARP */ 3773 3774 qlnx_unload(ha); 3775 3776 return; 3777 } 3778 3779 static int 3780 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha) 3781 { 3782 return(TX_RING_SIZE - 1); 3783 } 3784 3785 uint8_t * 3786 qlnx_get_mac_addr(qlnx_host_t *ha) 3787 { 3788 struct ecore_hwfn *p_hwfn; 3789 unsigned char mac[ETHER_ADDR_LEN]; 3790 uint8_t p_is_forced; 3791 3792 p_hwfn = &ha->cdev.hwfns[0]; 3793 3794 if (qlnx_vf_device(ha) != 0) 3795 return (p_hwfn->hw_info.hw_mac_addr); 3796 3797 ecore_vf_read_bulletin(p_hwfn, &p_is_forced); 3798 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) == 3799 true) { 3800 device_printf(ha->pci_dev, "%s: p_is_forced = %d" 3801 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, 3802 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3803 memcpy(ha->primary_mac, mac, ETH_ALEN); 3804 } 3805 3806 return (ha->primary_mac); 3807 } 3808 3809 static uint32_t 3810 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link) 3811 { 3812 uint32_t ifm_type = 0; 3813 3814 switch (if_link->media_type) { 3815 case MEDIA_MODULE_FIBER: 3816 case MEDIA_UNSPECIFIED: 3817 if (if_link->speed == (100 * 1000)) 3818 ifm_type = QLNX_IFM_100G_SR4; 3819 else if (if_link->speed == (40 * 1000)) 3820 ifm_type = IFM_40G_SR4; 3821 else if (if_link->speed == (25 * 1000)) 3822 ifm_type = QLNX_IFM_25G_SR; 3823 else if (if_link->speed == (10 * 1000)) 3824 ifm_type = (IFM_10G_LR | IFM_10G_SR); 3825 else if (if_link->speed == (1 * 1000)) 3826 ifm_type = (IFM_1000_SX | IFM_1000_LX); 3827 3828 break; 3829 3830 case MEDIA_DA_TWINAX: 3831 if (if_link->speed == (100 * 1000)) 3832 ifm_type = QLNX_IFM_100G_CR4; 3833 else if (if_link->speed == (40 * 1000)) 3834 ifm_type = IFM_40G_CR4; 3835 else if (if_link->speed == (25 * 1000)) 3836 ifm_type = QLNX_IFM_25G_CR; 3837 else if (if_link->speed == (10 * 1000)) 3838 ifm_type = IFM_10G_TWINAX; 3839 3840 break; 3841 3842 default : 3843 ifm_type = IFM_UNKNOWN; 3844 break; 3845 } 3846 return (ifm_type); 3847 } 3848 3849 /***************************************************************************** 3850 * Interrupt Service Functions 3851 *****************************************************************************/ 3852 3853 static int 3854 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3855 struct mbuf *mp_head, uint16_t len) 3856 { 3857 struct mbuf *mp, *mpf, *mpl; 3858 struct sw_rx_data *sw_rx_data; 3859 struct qlnx_rx_queue *rxq; 3860 uint16_t len_in_buffer; 3861 3862 rxq = fp->rxq; 3863 mpf = mpl = mp = NULL; 3864 3865 while (len) { 3866 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3867 3868 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3869 mp = sw_rx_data->data; 3870 3871 if (mp == NULL) { 3872 QL_DPRINT1(ha, "mp = NULL\n"); 3873 fp->err_rx_mp_null++; 3874 rxq->sw_rx_cons = 3875 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3876 3877 if (mpf != NULL) 3878 m_freem(mpf); 3879 3880 return (-1); 3881 } 3882 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3883 BUS_DMASYNC_POSTREAD); 3884 3885 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3886 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 3887 " incoming packet and reusing its buffer\n"); 3888 3889 qlnx_reuse_rx_data(rxq); 3890 fp->err_rx_alloc_errors++; 3891 3892 if (mpf != NULL) 3893 m_freem(mpf); 3894 3895 return (-1); 3896 } 3897 ecore_chain_consume(&rxq->rx_bd_ring); 3898 3899 if (len > rxq->rx_buf_size) 3900 len_in_buffer = rxq->rx_buf_size; 3901 else 3902 len_in_buffer = len; 3903 3904 len = len - len_in_buffer; 3905 3906 mp->m_flags &= ~M_PKTHDR; 3907 mp->m_next = NULL; 3908 mp->m_len = len_in_buffer; 3909 3910 if (mpf == NULL) 3911 mpf = mpl = mp; 3912 else { 3913 mpl->m_next = mp; 3914 mpl = mp; 3915 } 3916 } 3917 3918 if (mpf != NULL) 3919 mp_head->m_next = mpf; 3920 3921 return (0); 3922 } 3923 3924 static void 3925 qlnx_tpa_start(qlnx_host_t *ha, 3926 struct qlnx_fastpath *fp, 3927 struct qlnx_rx_queue *rxq, 3928 struct eth_fast_path_rx_tpa_start_cqe *cqe) 3929 { 3930 uint32_t agg_index; 3931 struct ifnet *ifp = ha->ifp; 3932 struct mbuf *mp; 3933 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3934 struct sw_rx_data *sw_rx_data; 3935 dma_addr_t addr; 3936 bus_dmamap_t map; 3937 struct eth_rx_bd *rx_bd; 3938 int i; 3939 uint8_t hash_type; 3940 3941 agg_index = cqe->tpa_agg_index; 3942 3943 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \ 3944 \t type = 0x%x\n \ 3945 \t bitfields = 0x%x\n \ 3946 \t seg_len = 0x%x\n \ 3947 \t pars_flags = 0x%x\n \ 3948 \t vlan_tag = 0x%x\n \ 3949 \t rss_hash = 0x%x\n \ 3950 \t len_on_first_bd = 0x%x\n \ 3951 \t placement_offset = 0x%x\n \ 3952 \t tpa_agg_index = 0x%x\n \ 3953 \t header_len = 0x%x\n \ 3954 \t ext_bd_len_list[0] = 0x%x\n \ 3955 \t ext_bd_len_list[1] = 0x%x\n \ 3956 \t ext_bd_len_list[2] = 0x%x\n \ 3957 \t ext_bd_len_list[3] = 0x%x\n \ 3958 \t ext_bd_len_list[4] = 0x%x\n", 3959 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len, 3960 cqe->pars_flags.flags, cqe->vlan_tag, 3961 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset, 3962 cqe->tpa_agg_index, cqe->header_len, 3963 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1], 3964 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3], 3965 cqe->ext_bd_len_list[4]); 3966 3967 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3968 fp->err_rx_tpa_invalid_agg_num++; 3969 return; 3970 } 3971 3972 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3973 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); 3974 mp = sw_rx_data->data; 3975 3976 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp); 3977 3978 if (mp == NULL) { 3979 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id); 3980 fp->err_rx_mp_null++; 3981 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3982 3983 return; 3984 } 3985 3986 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) { 3987 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error," 3988 " flags = %x, dropping incoming packet\n", fp->rss_id, 3989 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags)); 3990 3991 fp->err_rx_hw_errors++; 3992 3993 qlnx_reuse_rx_data(rxq); 3994 3995 QLNX_INC_IERRORS(ifp); 3996 3997 return; 3998 } 3999 4000 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4001 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4002 " dropping incoming packet and reusing its buffer\n", 4003 fp->rss_id); 4004 4005 fp->err_rx_alloc_errors++; 4006 QLNX_INC_IQDROPS(ifp); 4007 4008 /* 4009 * Load the tpa mbuf into the rx ring and save the 4010 * posted mbuf 4011 */ 4012 4013 map = sw_rx_data->map; 4014 addr = sw_rx_data->dma_addr; 4015 4016 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 4017 4018 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data; 4019 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr; 4020 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map; 4021 4022 rxq->tpa_info[agg_index].rx_buf.data = mp; 4023 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr; 4024 rxq->tpa_info[agg_index].rx_buf.map = map; 4025 4026 rx_bd = (struct eth_rx_bd *) 4027 ecore_chain_produce(&rxq->rx_bd_ring); 4028 4029 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr)); 4030 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr)); 4031 4032 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4033 BUS_DMASYNC_PREREAD); 4034 4035 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 4036 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4037 4038 ecore_chain_consume(&rxq->rx_bd_ring); 4039 4040 /* Now reuse any buffers posted in ext_bd_len_list */ 4041 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4042 if (cqe->ext_bd_len_list[i] == 0) 4043 break; 4044 4045 qlnx_reuse_rx_data(rxq); 4046 } 4047 4048 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4049 return; 4050 } 4051 4052 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4053 QL_DPRINT7(ha, "[%d]: invalid aggregation state," 4054 " dropping incoming packet and reusing its buffer\n", 4055 fp->rss_id); 4056 4057 QLNX_INC_IQDROPS(ifp); 4058 4059 /* if we already have mbuf head in aggregation free it */ 4060 if (rxq->tpa_info[agg_index].mpf) { 4061 m_freem(rxq->tpa_info[agg_index].mpf); 4062 rxq->tpa_info[agg_index].mpl = NULL; 4063 } 4064 rxq->tpa_info[agg_index].mpf = mp; 4065 rxq->tpa_info[agg_index].mpl = NULL; 4066 4067 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4068 ecore_chain_consume(&rxq->rx_bd_ring); 4069 4070 /* Now reuse any buffers posted in ext_bd_len_list */ 4071 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4072 if (cqe->ext_bd_len_list[i] == 0) 4073 break; 4074 4075 qlnx_reuse_rx_data(rxq); 4076 } 4077 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4078 4079 return; 4080 } 4081 4082 /* 4083 * first process the ext_bd_len_list 4084 * if this fails then we simply drop the packet 4085 */ 4086 ecore_chain_consume(&rxq->rx_bd_ring); 4087 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4088 4089 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4090 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id); 4091 4092 if (cqe->ext_bd_len_list[i] == 0) 4093 break; 4094 4095 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4096 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4097 BUS_DMASYNC_POSTREAD); 4098 4099 mpc = sw_rx_data->data; 4100 4101 if (mpc == NULL) { 4102 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4103 fp->err_rx_mp_null++; 4104 if (mpf != NULL) 4105 m_freem(mpf); 4106 mpf = mpl = NULL; 4107 rxq->tpa_info[agg_index].agg_state = 4108 QLNX_AGG_STATE_ERROR; 4109 ecore_chain_consume(&rxq->rx_bd_ring); 4110 rxq->sw_rx_cons = 4111 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4112 continue; 4113 } 4114 4115 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4116 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4117 " dropping incoming packet and reusing its" 4118 " buffer\n", fp->rss_id); 4119 4120 qlnx_reuse_rx_data(rxq); 4121 4122 if (mpf != NULL) 4123 m_freem(mpf); 4124 mpf = mpl = NULL; 4125 4126 rxq->tpa_info[agg_index].agg_state = 4127 QLNX_AGG_STATE_ERROR; 4128 4129 ecore_chain_consume(&rxq->rx_bd_ring); 4130 rxq->sw_rx_cons = 4131 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4132 4133 continue; 4134 } 4135 4136 mpc->m_flags &= ~M_PKTHDR; 4137 mpc->m_next = NULL; 4138 mpc->m_len = cqe->ext_bd_len_list[i]; 4139 4140 if (mpf == NULL) { 4141 mpf = mpl = mpc; 4142 } else { 4143 mpl->m_len = ha->rx_buf_size; 4144 mpl->m_next = mpc; 4145 mpl = mpc; 4146 } 4147 4148 ecore_chain_consume(&rxq->rx_bd_ring); 4149 rxq->sw_rx_cons = 4150 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4151 } 4152 4153 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4154 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping" 4155 " incoming packet and reusing its buffer\n", 4156 fp->rss_id); 4157 4158 QLNX_INC_IQDROPS(ifp); 4159 4160 rxq->tpa_info[agg_index].mpf = mp; 4161 rxq->tpa_info[agg_index].mpl = NULL; 4162 4163 return; 4164 } 4165 4166 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset; 4167 4168 if (mpf != NULL) { 4169 mp->m_len = ha->rx_buf_size; 4170 mp->m_next = mpf; 4171 rxq->tpa_info[agg_index].mpf = mp; 4172 rxq->tpa_info[agg_index].mpl = mpl; 4173 } else { 4174 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset; 4175 rxq->tpa_info[agg_index].mpf = mp; 4176 rxq->tpa_info[agg_index].mpl = mp; 4177 mp->m_next = NULL; 4178 } 4179 4180 mp->m_flags |= M_PKTHDR; 4181 4182 /* assign packet to this interface interface */ 4183 mp->m_pkthdr.rcvif = ifp; 4184 4185 /* assume no hardware checksum has complated */ 4186 mp->m_pkthdr.csum_flags = 0; 4187 4188 //mp->m_pkthdr.flowid = fp->rss_id; 4189 mp->m_pkthdr.flowid = cqe->rss_hash; 4190 4191 hash_type = cqe->bitfields & 4192 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4193 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4194 4195 switch (hash_type) { 4196 case RSS_HASH_TYPE_IPV4: 4197 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4198 break; 4199 4200 case RSS_HASH_TYPE_TCP_IPV4: 4201 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4202 break; 4203 4204 case RSS_HASH_TYPE_IPV6: 4205 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4206 break; 4207 4208 case RSS_HASH_TYPE_TCP_IPV6: 4209 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4210 break; 4211 4212 default: 4213 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4214 break; 4215 } 4216 4217 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | 4218 CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4219 4220 mp->m_pkthdr.csum_data = 0xFFFF; 4221 4222 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) { 4223 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag); 4224 mp->m_flags |= M_VLANTAG; 4225 } 4226 4227 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START; 4228 4229 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n", 4230 fp->rss_id, rxq->tpa_info[agg_index].agg_state, 4231 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl); 4232 4233 return; 4234 } 4235 4236 static void 4237 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4238 struct qlnx_rx_queue *rxq, 4239 struct eth_fast_path_rx_tpa_cont_cqe *cqe) 4240 { 4241 struct sw_rx_data *sw_rx_data; 4242 int i; 4243 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4244 struct mbuf *mp; 4245 uint32_t agg_index; 4246 4247 QL_DPRINT7(ha, "[%d]: enter\n \ 4248 \t type = 0x%x\n \ 4249 \t tpa_agg_index = 0x%x\n \ 4250 \t len_list[0] = 0x%x\n \ 4251 \t len_list[1] = 0x%x\n \ 4252 \t len_list[2] = 0x%x\n \ 4253 \t len_list[3] = 0x%x\n \ 4254 \t len_list[4] = 0x%x\n \ 4255 \t len_list[5] = 0x%x\n", 4256 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4257 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4258 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]); 4259 4260 agg_index = cqe->tpa_agg_index; 4261 4262 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4263 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4264 fp->err_rx_tpa_invalid_agg_num++; 4265 return; 4266 } 4267 4268 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) { 4269 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4270 4271 if (cqe->len_list[i] == 0) 4272 break; 4273 4274 if (rxq->tpa_info[agg_index].agg_state != 4275 QLNX_AGG_STATE_START) { 4276 qlnx_reuse_rx_data(rxq); 4277 continue; 4278 } 4279 4280 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4281 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4282 BUS_DMASYNC_POSTREAD); 4283 4284 mpc = sw_rx_data->data; 4285 4286 if (mpc == NULL) { 4287 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4288 4289 fp->err_rx_mp_null++; 4290 if (mpf != NULL) 4291 m_freem(mpf); 4292 mpf = mpl = NULL; 4293 rxq->tpa_info[agg_index].agg_state = 4294 QLNX_AGG_STATE_ERROR; 4295 ecore_chain_consume(&rxq->rx_bd_ring); 4296 rxq->sw_rx_cons = 4297 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4298 continue; 4299 } 4300 4301 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4302 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4303 " dropping incoming packet and reusing its" 4304 " buffer\n", fp->rss_id); 4305 4306 qlnx_reuse_rx_data(rxq); 4307 4308 if (mpf != NULL) 4309 m_freem(mpf); 4310 mpf = mpl = NULL; 4311 4312 rxq->tpa_info[agg_index].agg_state = 4313 QLNX_AGG_STATE_ERROR; 4314 4315 ecore_chain_consume(&rxq->rx_bd_ring); 4316 rxq->sw_rx_cons = 4317 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4318 4319 continue; 4320 } 4321 4322 mpc->m_flags &= ~M_PKTHDR; 4323 mpc->m_next = NULL; 4324 mpc->m_len = cqe->len_list[i]; 4325 4326 if (mpf == NULL) { 4327 mpf = mpl = mpc; 4328 } else { 4329 mpl->m_len = ha->rx_buf_size; 4330 mpl->m_next = mpc; 4331 mpl = mpc; 4332 } 4333 4334 ecore_chain_consume(&rxq->rx_bd_ring); 4335 rxq->sw_rx_cons = 4336 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4337 } 4338 4339 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n", 4340 fp->rss_id, mpf, mpl); 4341 4342 if (mpf != NULL) { 4343 mp = rxq->tpa_info[agg_index].mpl; 4344 mp->m_len = ha->rx_buf_size; 4345 mp->m_next = mpf; 4346 rxq->tpa_info[agg_index].mpl = mpl; 4347 } 4348 4349 return; 4350 } 4351 4352 static int 4353 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4354 struct qlnx_rx_queue *rxq, 4355 struct eth_fast_path_rx_tpa_end_cqe *cqe) 4356 { 4357 struct sw_rx_data *sw_rx_data; 4358 int i; 4359 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4360 struct mbuf *mp; 4361 uint32_t agg_index; 4362 uint32_t len = 0; 4363 struct ifnet *ifp = ha->ifp; 4364 4365 QL_DPRINT7(ha, "[%d]: enter\n \ 4366 \t type = 0x%x\n \ 4367 \t tpa_agg_index = 0x%x\n \ 4368 \t total_packet_len = 0x%x\n \ 4369 \t num_of_bds = 0x%x\n \ 4370 \t end_reason = 0x%x\n \ 4371 \t num_of_coalesced_segs = 0x%x\n \ 4372 \t ts_delta = 0x%x\n \ 4373 \t len_list[0] = 0x%x\n \ 4374 \t len_list[1] = 0x%x\n \ 4375 \t len_list[2] = 0x%x\n \ 4376 \t len_list[3] = 0x%x\n", 4377 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4378 cqe->total_packet_len, cqe->num_of_bds, 4379 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta, 4380 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4381 cqe->len_list[3]); 4382 4383 agg_index = cqe->tpa_agg_index; 4384 4385 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4386 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4387 4388 fp->err_rx_tpa_invalid_agg_num++; 4389 return (0); 4390 } 4391 4392 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) { 4393 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4394 4395 if (cqe->len_list[i] == 0) 4396 break; 4397 4398 if (rxq->tpa_info[agg_index].agg_state != 4399 QLNX_AGG_STATE_START) { 4400 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id); 4401 4402 qlnx_reuse_rx_data(rxq); 4403 continue; 4404 } 4405 4406 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4407 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4408 BUS_DMASYNC_POSTREAD); 4409 4410 mpc = sw_rx_data->data; 4411 4412 if (mpc == NULL) { 4413 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4414 4415 fp->err_rx_mp_null++; 4416 if (mpf != NULL) 4417 m_freem(mpf); 4418 mpf = mpl = NULL; 4419 rxq->tpa_info[agg_index].agg_state = 4420 QLNX_AGG_STATE_ERROR; 4421 ecore_chain_consume(&rxq->rx_bd_ring); 4422 rxq->sw_rx_cons = 4423 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4424 continue; 4425 } 4426 4427 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4428 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4429 " dropping incoming packet and reusing its" 4430 " buffer\n", fp->rss_id); 4431 4432 qlnx_reuse_rx_data(rxq); 4433 4434 if (mpf != NULL) 4435 m_freem(mpf); 4436 mpf = mpl = NULL; 4437 4438 rxq->tpa_info[agg_index].agg_state = 4439 QLNX_AGG_STATE_ERROR; 4440 4441 ecore_chain_consume(&rxq->rx_bd_ring); 4442 rxq->sw_rx_cons = 4443 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4444 4445 continue; 4446 } 4447 4448 mpc->m_flags &= ~M_PKTHDR; 4449 mpc->m_next = NULL; 4450 mpc->m_len = cqe->len_list[i]; 4451 4452 if (mpf == NULL) { 4453 mpf = mpl = mpc; 4454 } else { 4455 mpl->m_len = ha->rx_buf_size; 4456 mpl->m_next = mpc; 4457 mpl = mpc; 4458 } 4459 4460 ecore_chain_consume(&rxq->rx_bd_ring); 4461 rxq->sw_rx_cons = 4462 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4463 } 4464 4465 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id); 4466 4467 if (mpf != NULL) { 4468 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id); 4469 4470 mp = rxq->tpa_info[agg_index].mpl; 4471 mp->m_len = ha->rx_buf_size; 4472 mp->m_next = mpf; 4473 } 4474 4475 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { 4476 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id); 4477 4478 if (rxq->tpa_info[agg_index].mpf != NULL) 4479 m_freem(rxq->tpa_info[agg_index].mpf); 4480 rxq->tpa_info[agg_index].mpf = NULL; 4481 rxq->tpa_info[agg_index].mpl = NULL; 4482 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4483 return (0); 4484 } 4485 4486 mp = rxq->tpa_info[agg_index].mpf; 4487 m_adj(mp, rxq->tpa_info[agg_index].placement_offset); 4488 mp->m_pkthdr.len = cqe->total_packet_len; 4489 4490 if (mp->m_next == NULL) 4491 mp->m_len = mp->m_pkthdr.len; 4492 else { 4493 /* compute the total packet length */ 4494 mpf = mp; 4495 while (mpf != NULL) { 4496 len += mpf->m_len; 4497 mpf = mpf->m_next; 4498 } 4499 4500 if (cqe->total_packet_len > len) { 4501 mpl = rxq->tpa_info[agg_index].mpl; 4502 mpl->m_len += (cqe->total_packet_len - len); 4503 } 4504 } 4505 4506 QLNX_INC_IPACKETS(ifp); 4507 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len)); 4508 4509 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \ 4510 m_len = 0x%x m_pkthdr_len = 0x%x\n", 4511 fp->rss_id, mp->m_pkthdr.csum_data, 4512 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len); 4513 4514 (*ifp->if_input)(ifp, mp); 4515 4516 rxq->tpa_info[agg_index].mpf = NULL; 4517 rxq->tpa_info[agg_index].mpl = NULL; 4518 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4519 4520 return (cqe->num_of_coalesced_segs); 4521 } 4522 4523 static int 4524 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 4525 int lro_enable) 4526 { 4527 uint16_t hw_comp_cons, sw_comp_cons; 4528 int rx_pkt = 0; 4529 struct qlnx_rx_queue *rxq = fp->rxq; 4530 struct ifnet *ifp = ha->ifp; 4531 struct ecore_dev *cdev = &ha->cdev; 4532 struct ecore_hwfn *p_hwfn; 4533 4534 #ifdef QLNX_SOFT_LRO 4535 struct lro_ctrl *lro; 4536 4537 lro = &rxq->lro; 4538 #endif /* #ifdef QLNX_SOFT_LRO */ 4539 4540 hw_comp_cons = le16toh(*rxq->hw_cons_ptr); 4541 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4542 4543 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)]; 4544 4545 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 4546 * / BD in the while-loop before reading hw_comp_cons. If the CQE is 4547 * read before it is written by FW, then FW writes CQE and SB, and then 4548 * the CPU reads the hw_comp_cons, it will use an old CQE. 4549 */ 4550 4551 /* Loop to complete all indicated BDs */ 4552 while (sw_comp_cons != hw_comp_cons) { 4553 union eth_rx_cqe *cqe; 4554 struct eth_fast_path_rx_reg_cqe *fp_cqe; 4555 struct sw_rx_data *sw_rx_data; 4556 register struct mbuf *mp; 4557 enum eth_rx_cqe_type cqe_type; 4558 uint16_t len, pad, len_on_first_bd; 4559 uint8_t *data; 4560 uint8_t hash_type; 4561 4562 /* Get the CQE from the completion ring */ 4563 cqe = (union eth_rx_cqe *) 4564 ecore_chain_consume(&rxq->rx_comp_ring); 4565 cqe_type = cqe->fast_path_regular.type; 4566 4567 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { 4568 QL_DPRINT3(ha, "Got a slowath CQE\n"); 4569 4570 ecore_eth_cqe_completion(p_hwfn, 4571 (struct eth_slow_path_rx_cqe *)cqe); 4572 goto next_cqe; 4573 } 4574 4575 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { 4576 switch (cqe_type) { 4577 case ETH_RX_CQE_TYPE_TPA_START: 4578 qlnx_tpa_start(ha, fp, rxq, 4579 &cqe->fast_path_tpa_start); 4580 fp->tpa_start++; 4581 break; 4582 4583 case ETH_RX_CQE_TYPE_TPA_CONT: 4584 qlnx_tpa_cont(ha, fp, rxq, 4585 &cqe->fast_path_tpa_cont); 4586 fp->tpa_cont++; 4587 break; 4588 4589 case ETH_RX_CQE_TYPE_TPA_END: 4590 rx_pkt += qlnx_tpa_end(ha, fp, rxq, 4591 &cqe->fast_path_tpa_end); 4592 fp->tpa_end++; 4593 break; 4594 4595 default: 4596 break; 4597 } 4598 4599 goto next_cqe; 4600 } 4601 4602 /* Get the data from the SW ring */ 4603 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4604 mp = sw_rx_data->data; 4605 4606 if (mp == NULL) { 4607 QL_DPRINT1(ha, "mp = NULL\n"); 4608 fp->err_rx_mp_null++; 4609 rxq->sw_rx_cons = 4610 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4611 goto next_cqe; 4612 } 4613 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4614 BUS_DMASYNC_POSTREAD); 4615 4616 /* non GRO */ 4617 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */ 4618 len = le16toh(fp_cqe->pkt_len); 4619 pad = fp_cqe->placement_offset; 4620 #if 0 4621 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x," 4622 " len %u, parsing flags = %d pad = %d\n", 4623 cqe_type, fp_cqe->bitfields, 4624 le16toh(fp_cqe->vlan_tag), 4625 len, le16toh(fp_cqe->pars_flags.flags), pad); 4626 #endif 4627 data = mtod(mp, uint8_t *); 4628 data = data + pad; 4629 4630 if (0) 4631 qlnx_dump_buf8(ha, __func__, data, len); 4632 4633 /* For every Rx BD consumed, we allocate a new BD so the BD ring 4634 * is always with a fixed size. If allocation fails, we take the 4635 * consumed BD and return it to the ring in the PROD position. 4636 * The packet that was received on that BD will be dropped (and 4637 * not passed to the upper stack). 4638 */ 4639 /* If this is an error packet then drop it */ 4640 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) & 4641 CQE_FLAGS_ERR) { 4642 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x," 4643 " dropping incoming packet\n", sw_comp_cons, 4644 le16toh(cqe->fast_path_regular.pars_flags.flags)); 4645 fp->err_rx_hw_errors++; 4646 4647 qlnx_reuse_rx_data(rxq); 4648 4649 QLNX_INC_IERRORS(ifp); 4650 4651 goto next_cqe; 4652 } 4653 4654 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4655 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 4656 " incoming packet and reusing its buffer\n"); 4657 qlnx_reuse_rx_data(rxq); 4658 4659 fp->err_rx_alloc_errors++; 4660 4661 QLNX_INC_IQDROPS(ifp); 4662 4663 goto next_cqe; 4664 } 4665 4666 ecore_chain_consume(&rxq->rx_bd_ring); 4667 4668 len_on_first_bd = fp_cqe->len_on_first_bd; 4669 m_adj(mp, pad); 4670 mp->m_pkthdr.len = len; 4671 4672 if ((len > 60 ) && (len > len_on_first_bd)) { 4673 mp->m_len = len_on_first_bd; 4674 4675 if (qlnx_rx_jumbo_chain(ha, fp, mp, 4676 (len - len_on_first_bd)) != 0) { 4677 m_freem(mp); 4678 4679 QLNX_INC_IQDROPS(ifp); 4680 4681 goto next_cqe; 4682 } 4683 4684 } else if (len_on_first_bd < len) { 4685 fp->err_rx_jumbo_chain_pkts++; 4686 } else { 4687 mp->m_len = len; 4688 } 4689 4690 mp->m_flags |= M_PKTHDR; 4691 4692 /* assign packet to this interface interface */ 4693 mp->m_pkthdr.rcvif = ifp; 4694 4695 /* assume no hardware checksum has complated */ 4696 mp->m_pkthdr.csum_flags = 0; 4697 4698 mp->m_pkthdr.flowid = fp_cqe->rss_hash; 4699 4700 hash_type = fp_cqe->bitfields & 4701 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4702 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4703 4704 switch (hash_type) { 4705 case RSS_HASH_TYPE_IPV4: 4706 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4707 break; 4708 4709 case RSS_HASH_TYPE_TCP_IPV4: 4710 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4711 break; 4712 4713 case RSS_HASH_TYPE_IPV6: 4714 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4715 break; 4716 4717 case RSS_HASH_TYPE_TCP_IPV6: 4718 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4719 break; 4720 4721 default: 4722 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4723 break; 4724 } 4725 4726 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) { 4727 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4728 } 4729 4730 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) { 4731 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4732 } 4733 4734 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) { 4735 mp->m_pkthdr.csum_data = 0xFFFF; 4736 mp->m_pkthdr.csum_flags |= 4737 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4738 } 4739 4740 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) { 4741 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag); 4742 mp->m_flags |= M_VLANTAG; 4743 } 4744 4745 QLNX_INC_IPACKETS(ifp); 4746 QLNX_INC_IBYTES(ifp, len); 4747 4748 #ifdef QLNX_SOFT_LRO 4749 if (lro_enable) 4750 tcp_lro_queue_mbuf(lro, mp); 4751 else 4752 (*ifp->if_input)(ifp, mp); 4753 #else 4754 4755 (*ifp->if_input)(ifp, mp); 4756 4757 #endif /* #ifdef QLNX_SOFT_LRO */ 4758 4759 rx_pkt++; 4760 4761 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4762 4763 next_cqe: /* don't consume bd rx buffer */ 4764 ecore_chain_recycle_consumed(&rxq->rx_comp_ring); 4765 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4766 4767 /* CR TPA - revisit how to handle budget in TPA perhaps 4768 increase on "end" */ 4769 if (rx_pkt == budget) 4770 break; 4771 } /* repeat while sw_comp_cons != hw_comp_cons... */ 4772 4773 /* Update producers */ 4774 qlnx_update_rx_prod(p_hwfn, rxq); 4775 4776 return rx_pkt; 4777 } 4778 4779 /* 4780 * fast path interrupt 4781 */ 4782 4783 static void 4784 qlnx_fp_isr(void *arg) 4785 { 4786 qlnx_ivec_t *ivec = arg; 4787 qlnx_host_t *ha; 4788 struct qlnx_fastpath *fp = NULL; 4789 int idx; 4790 4791 ha = ivec->ha; 4792 4793 if (ha->state != QLNX_STATE_OPEN) { 4794 return; 4795 } 4796 4797 idx = ivec->rss_idx; 4798 4799 if ((idx = ivec->rss_idx) >= ha->num_rss) { 4800 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx); 4801 ha->err_illegal_intr++; 4802 return; 4803 } 4804 fp = &ha->fp_array[idx]; 4805 4806 if (fp == NULL) { 4807 ha->err_fp_null++; 4808 } else { 4809 int rx_int = 0; 4810 #ifdef QLNX_SOFT_LRO 4811 int total_rx_count = 0; 4812 #endif 4813 int lro_enable, tc; 4814 struct qlnx_tx_queue *txq; 4815 uint16_t elem_left; 4816 4817 lro_enable = ha->ifp->if_capenable & IFCAP_LRO; 4818 4819 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); 4820 4821 do { 4822 for (tc = 0; tc < ha->num_tc; tc++) { 4823 txq = fp->txq[tc]; 4824 4825 if((int)(elem_left = 4826 ecore_chain_get_elem_left(&txq->tx_pbl)) < 4827 QLNX_TX_ELEM_THRESH) { 4828 if (mtx_trylock(&fp->tx_mtx)) { 4829 #ifdef QLNX_TRACE_PERF_DATA 4830 tx_compl = fp->tx_pkts_completed; 4831 #endif 4832 4833 qlnx_tx_int(ha, fp, fp->txq[tc]); 4834 #ifdef QLNX_TRACE_PERF_DATA 4835 fp->tx_pkts_compl_intr += 4836 (fp->tx_pkts_completed - tx_compl); 4837 if ((fp->tx_pkts_completed - tx_compl) <= 32) 4838 fp->tx_comInt[0]++; 4839 else if (((fp->tx_pkts_completed - tx_compl) > 32) && 4840 ((fp->tx_pkts_completed - tx_compl) <= 64)) 4841 fp->tx_comInt[1]++; 4842 else if(((fp->tx_pkts_completed - tx_compl) > 64) && 4843 ((fp->tx_pkts_completed - tx_compl) <= 128)) 4844 fp->tx_comInt[2]++; 4845 else if(((fp->tx_pkts_completed - tx_compl) > 128)) 4846 fp->tx_comInt[3]++; 4847 #endif 4848 mtx_unlock(&fp->tx_mtx); 4849 } 4850 } 4851 } 4852 4853 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, 4854 lro_enable); 4855 4856 if (rx_int) { 4857 fp->rx_pkts += rx_int; 4858 #ifdef QLNX_SOFT_LRO 4859 total_rx_count += rx_int; 4860 #endif 4861 } 4862 4863 } while (rx_int); 4864 4865 #ifdef QLNX_SOFT_LRO 4866 { 4867 struct lro_ctrl *lro; 4868 4869 lro = &fp->rxq->lro; 4870 4871 if (lro_enable && total_rx_count) { 4872 4873 #ifdef QLNX_TRACE_LRO_CNT 4874 if (lro->lro_mbuf_count & ~1023) 4875 fp->lro_cnt_1024++; 4876 else if (lro->lro_mbuf_count & ~511) 4877 fp->lro_cnt_512++; 4878 else if (lro->lro_mbuf_count & ~255) 4879 fp->lro_cnt_256++; 4880 else if (lro->lro_mbuf_count & ~127) 4881 fp->lro_cnt_128++; 4882 else if (lro->lro_mbuf_count & ~63) 4883 fp->lro_cnt_64++; 4884 #endif /* #ifdef QLNX_TRACE_LRO_CNT */ 4885 4886 tcp_lro_flush_all(lro); 4887 } 4888 } 4889 #endif /* #ifdef QLNX_SOFT_LRO */ 4890 4891 ecore_sb_update_sb_idx(fp->sb_info); 4892 rmb(); 4893 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 4894 } 4895 4896 return; 4897 } 4898 4899 /* 4900 * slow path interrupt processing function 4901 * can be invoked in polled mode or in interrupt mode via taskqueue. 4902 */ 4903 void 4904 qlnx_sp_isr(void *arg) 4905 { 4906 struct ecore_hwfn *p_hwfn; 4907 qlnx_host_t *ha; 4908 4909 p_hwfn = arg; 4910 4911 ha = (qlnx_host_t *)p_hwfn->p_dev; 4912 4913 ha->sp_interrupts++; 4914 4915 QL_DPRINT2(ha, "enter\n"); 4916 4917 ecore_int_sp_dpc(p_hwfn); 4918 4919 QL_DPRINT2(ha, "exit\n"); 4920 4921 return; 4922 } 4923 4924 /***************************************************************************** 4925 * Support Functions for DMA'able Memory 4926 *****************************************************************************/ 4927 4928 static void 4929 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 4930 { 4931 *((bus_addr_t *)arg) = 0; 4932 4933 if (error) { 4934 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 4935 return; 4936 } 4937 4938 *((bus_addr_t *)arg) = segs[0].ds_addr; 4939 4940 return; 4941 } 4942 4943 static int 4944 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 4945 { 4946 int ret = 0; 4947 bus_addr_t b_addr; 4948 4949 ret = bus_dma_tag_create( 4950 ha->parent_tag,/* parent */ 4951 dma_buf->alignment, 4952 ((bus_size_t)(1ULL << 32)),/* boundary */ 4953 BUS_SPACE_MAXADDR, /* lowaddr */ 4954 BUS_SPACE_MAXADDR, /* highaddr */ 4955 NULL, NULL, /* filter, filterarg */ 4956 dma_buf->size, /* maxsize */ 4957 1, /* nsegments */ 4958 dma_buf->size, /* maxsegsize */ 4959 0, /* flags */ 4960 NULL, NULL, /* lockfunc, lockarg */ 4961 &dma_buf->dma_tag); 4962 4963 if (ret) { 4964 QL_DPRINT1(ha, "could not create dma tag\n"); 4965 goto qlnx_alloc_dmabuf_exit; 4966 } 4967 ret = bus_dmamem_alloc(dma_buf->dma_tag, 4968 (void **)&dma_buf->dma_b, 4969 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 4970 &dma_buf->dma_map); 4971 if (ret) { 4972 bus_dma_tag_destroy(dma_buf->dma_tag); 4973 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n"); 4974 goto qlnx_alloc_dmabuf_exit; 4975 } 4976 4977 ret = bus_dmamap_load(dma_buf->dma_tag, 4978 dma_buf->dma_map, 4979 dma_buf->dma_b, 4980 dma_buf->size, 4981 qlnx_dmamap_callback, 4982 &b_addr, BUS_DMA_NOWAIT); 4983 4984 if (ret || !b_addr) { 4985 bus_dma_tag_destroy(dma_buf->dma_tag); 4986 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 4987 dma_buf->dma_map); 4988 ret = -1; 4989 goto qlnx_alloc_dmabuf_exit; 4990 } 4991 4992 dma_buf->dma_addr = b_addr; 4993 4994 qlnx_alloc_dmabuf_exit: 4995 4996 return ret; 4997 } 4998 4999 static void 5000 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 5001 { 5002 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 5003 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 5004 bus_dma_tag_destroy(dma_buf->dma_tag); 5005 return; 5006 } 5007 5008 void * 5009 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size) 5010 { 5011 qlnx_dma_t dma_buf; 5012 qlnx_dma_t *dma_p; 5013 qlnx_host_t *ha __unused; 5014 5015 ha = (qlnx_host_t *)ecore_dev; 5016 5017 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5018 5019 memset(&dma_buf, 0, sizeof (qlnx_dma_t)); 5020 5021 dma_buf.size = size + PAGE_SIZE; 5022 dma_buf.alignment = 8; 5023 5024 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0) 5025 return (NULL); 5026 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size); 5027 5028 *phys = dma_buf.dma_addr; 5029 5030 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size); 5031 5032 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t)); 5033 5034 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5035 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag, 5036 dma_buf.dma_b, (void *)dma_buf.dma_addr, size); 5037 5038 return (dma_buf.dma_b); 5039 } 5040 5041 void 5042 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys, 5043 uint32_t size) 5044 { 5045 qlnx_dma_t dma_buf, *dma_p; 5046 qlnx_host_t *ha; 5047 5048 ha = (qlnx_host_t *)ecore_dev; 5049 5050 if (v_addr == NULL) 5051 return; 5052 5053 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5054 5055 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size); 5056 5057 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5058 (void *)dma_p->dma_map, (void *)dma_p->dma_tag, 5059 dma_p->dma_b, (void *)dma_p->dma_addr, size); 5060 5061 dma_buf = *dma_p; 5062 5063 if (!ha->qlnxr_debug) 5064 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf); 5065 return; 5066 } 5067 5068 static int 5069 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha) 5070 { 5071 int ret; 5072 device_t dev; 5073 5074 dev = ha->pci_dev; 5075 5076 /* 5077 * Allocate parent DMA Tag 5078 */ 5079 ret = bus_dma_tag_create( 5080 bus_get_dma_tag(dev), /* parent */ 5081 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 5082 BUS_SPACE_MAXADDR, /* lowaddr */ 5083 BUS_SPACE_MAXADDR, /* highaddr */ 5084 NULL, NULL, /* filter, filterarg */ 5085 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 5086 0, /* nsegments */ 5087 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 5088 0, /* flags */ 5089 NULL, NULL, /* lockfunc, lockarg */ 5090 &ha->parent_tag); 5091 5092 if (ret) { 5093 QL_DPRINT1(ha, "could not create parent dma tag\n"); 5094 return (-1); 5095 } 5096 5097 ha->flags.parent_tag = 1; 5098 5099 return (0); 5100 } 5101 5102 static void 5103 qlnx_free_parent_dma_tag(qlnx_host_t *ha) 5104 { 5105 if (ha->parent_tag != NULL) { 5106 bus_dma_tag_destroy(ha->parent_tag); 5107 ha->parent_tag = NULL; 5108 } 5109 return; 5110 } 5111 5112 static int 5113 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha) 5114 { 5115 if (bus_dma_tag_create(NULL, /* parent */ 5116 1, 0, /* alignment, bounds */ 5117 BUS_SPACE_MAXADDR, /* lowaddr */ 5118 BUS_SPACE_MAXADDR, /* highaddr */ 5119 NULL, NULL, /* filter, filterarg */ 5120 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */ 5121 QLNX_MAX_SEGMENTS, /* nsegments */ 5122 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */ 5123 0, /* flags */ 5124 NULL, /* lockfunc */ 5125 NULL, /* lockfuncarg */ 5126 &ha->tx_tag)) { 5127 QL_DPRINT1(ha, "tx_tag alloc failed\n"); 5128 return (-1); 5129 } 5130 5131 return (0); 5132 } 5133 5134 static void 5135 qlnx_free_tx_dma_tag(qlnx_host_t *ha) 5136 { 5137 if (ha->tx_tag != NULL) { 5138 bus_dma_tag_destroy(ha->tx_tag); 5139 ha->tx_tag = NULL; 5140 } 5141 return; 5142 } 5143 5144 static int 5145 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha) 5146 { 5147 if (bus_dma_tag_create(NULL, /* parent */ 5148 1, 0, /* alignment, bounds */ 5149 BUS_SPACE_MAXADDR, /* lowaddr */ 5150 BUS_SPACE_MAXADDR, /* highaddr */ 5151 NULL, NULL, /* filter, filterarg */ 5152 MJUM9BYTES, /* maxsize */ 5153 1, /* nsegments */ 5154 MJUM9BYTES, /* maxsegsize */ 5155 0, /* flags */ 5156 NULL, /* lockfunc */ 5157 NULL, /* lockfuncarg */ 5158 &ha->rx_tag)) { 5159 QL_DPRINT1(ha, " rx_tag alloc failed\n"); 5160 5161 return (-1); 5162 } 5163 return (0); 5164 } 5165 5166 static void 5167 qlnx_free_rx_dma_tag(qlnx_host_t *ha) 5168 { 5169 if (ha->rx_tag != NULL) { 5170 bus_dma_tag_destroy(ha->rx_tag); 5171 ha->rx_tag = NULL; 5172 } 5173 return; 5174 } 5175 5176 /********************************* 5177 * Exported functions 5178 *********************************/ 5179 uint32_t 5180 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id) 5181 { 5182 uint32_t bar_size; 5183 5184 bar_id = bar_id * 2; 5185 5186 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev, 5187 SYS_RES_MEMORY, 5188 PCIR_BAR(bar_id)); 5189 5190 return (bar_size); 5191 } 5192 5193 uint32_t 5194 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value) 5195 { 5196 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5197 pci_reg, 1); 5198 return 0; 5199 } 5200 5201 uint32_t 5202 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg, 5203 uint16_t *reg_value) 5204 { 5205 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5206 pci_reg, 2); 5207 return 0; 5208 } 5209 5210 uint32_t 5211 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg, 5212 uint32_t *reg_value) 5213 { 5214 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5215 pci_reg, 4); 5216 return 0; 5217 } 5218 5219 void 5220 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value) 5221 { 5222 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5223 pci_reg, reg_value, 1); 5224 return; 5225 } 5226 5227 void 5228 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg, 5229 uint16_t reg_value) 5230 { 5231 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5232 pci_reg, reg_value, 2); 5233 return; 5234 } 5235 5236 void 5237 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg, 5238 uint32_t reg_value) 5239 { 5240 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5241 pci_reg, reg_value, 4); 5242 return; 5243 } 5244 5245 int 5246 qlnx_pci_find_capability(void *ecore_dev, int cap) 5247 { 5248 int reg; 5249 qlnx_host_t *ha; 5250 5251 ha = ecore_dev; 5252 5253 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0) 5254 return reg; 5255 else { 5256 QL_DPRINT1(ha, "failed\n"); 5257 return 0; 5258 } 5259 } 5260 5261 int 5262 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap) 5263 { 5264 int reg; 5265 qlnx_host_t *ha; 5266 5267 ha = ecore_dev; 5268 5269 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0) 5270 return reg; 5271 else { 5272 QL_DPRINT1(ha, "failed\n"); 5273 return 0; 5274 } 5275 } 5276 5277 uint32_t 5278 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr) 5279 { 5280 uint32_t data32; 5281 struct ecore_hwfn *p_hwfn; 5282 5283 p_hwfn = hwfn; 5284 5285 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5286 (bus_size_t)(p_hwfn->reg_offset + reg_addr)); 5287 5288 return (data32); 5289 } 5290 5291 void 5292 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5293 { 5294 struct ecore_hwfn *p_hwfn = hwfn; 5295 5296 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5297 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5298 5299 return; 5300 } 5301 5302 void 5303 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value) 5304 { 5305 struct ecore_hwfn *p_hwfn = hwfn; 5306 5307 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5308 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5309 return; 5310 } 5311 5312 void 5313 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value) 5314 { 5315 struct ecore_dev *cdev; 5316 struct ecore_hwfn *p_hwfn; 5317 uint32_t offset; 5318 5319 p_hwfn = hwfn; 5320 5321 cdev = p_hwfn->p_dev; 5322 5323 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells)); 5324 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value); 5325 5326 return; 5327 } 5328 5329 void 5330 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5331 { 5332 struct ecore_hwfn *p_hwfn = hwfn; 5333 5334 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \ 5335 (bus_size_t)(p_hwfn->db_offset + reg_addr), value); 5336 5337 return; 5338 } 5339 5340 uint32_t 5341 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr) 5342 { 5343 uint32_t data32; 5344 bus_size_t offset; 5345 struct ecore_dev *cdev; 5346 5347 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5348 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5349 5350 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset); 5351 5352 return (data32); 5353 } 5354 5355 void 5356 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value) 5357 { 5358 bus_size_t offset; 5359 struct ecore_dev *cdev; 5360 5361 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5362 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5363 5364 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5365 5366 return; 5367 } 5368 5369 void 5370 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value) 5371 { 5372 bus_size_t offset; 5373 struct ecore_dev *cdev; 5374 5375 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5376 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5377 5378 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5379 return; 5380 } 5381 5382 void * 5383 qlnx_zalloc(uint32_t size) 5384 { 5385 caddr_t va; 5386 5387 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT); 5388 bzero(va, size); 5389 return ((void *)va); 5390 } 5391 5392 void 5393 qlnx_barrier(void *p_hwfn) 5394 { 5395 qlnx_host_t *ha; 5396 5397 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5398 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE); 5399 } 5400 5401 void 5402 qlnx_link_update(void *p_hwfn) 5403 { 5404 qlnx_host_t *ha; 5405 int prev_link_state; 5406 5407 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5408 5409 qlnx_fill_link(ha, p_hwfn, &ha->if_link); 5410 5411 prev_link_state = ha->link_up; 5412 ha->link_up = ha->if_link.link_up; 5413 5414 if (prev_link_state != ha->link_up) { 5415 if (ha->link_up) { 5416 if_link_state_change(ha->ifp, LINK_STATE_UP); 5417 } else { 5418 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 5419 } 5420 } 5421 #ifndef QLNX_VF 5422 #ifdef CONFIG_ECORE_SRIOV 5423 5424 if (qlnx_vf_device(ha) != 0) { 5425 if (ha->sriov_initialized) 5426 qlnx_inform_vf_link_state(p_hwfn, ha); 5427 } 5428 5429 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 5430 #endif /* #ifdef QLNX_VF */ 5431 5432 return; 5433 } 5434 5435 static void 5436 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn, 5437 struct ecore_vf_acquire_sw_info *p_sw_info) 5438 { 5439 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) | 5440 (QLNX_VERSION_MINOR << 16) | 5441 QLNX_VERSION_BUILD; 5442 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD; 5443 5444 return; 5445 } 5446 5447 void 5448 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req, 5449 void *p_sw_info) 5450 { 5451 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info); 5452 5453 return; 5454 } 5455 5456 void 5457 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn, 5458 struct qlnx_link_output *if_link) 5459 { 5460 struct ecore_mcp_link_params link_params; 5461 struct ecore_mcp_link_state link_state; 5462 uint8_t p_change; 5463 struct ecore_ptt *p_ptt = NULL; 5464 5465 memset(if_link, 0, sizeof(*if_link)); 5466 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params)); 5467 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state)); 5468 5469 ha = (qlnx_host_t *)hwfn->p_dev; 5470 5471 /* Prepare source inputs */ 5472 /* we only deal with physical functions */ 5473 if (qlnx_vf_device(ha) != 0) { 5474 p_ptt = ecore_ptt_acquire(hwfn); 5475 5476 if (p_ptt == NULL) { 5477 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 5478 return; 5479 } 5480 5481 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type); 5482 ecore_ptt_release(hwfn, p_ptt); 5483 5484 memcpy(&link_params, ecore_mcp_get_link_params(hwfn), 5485 sizeof(link_params)); 5486 memcpy(&link_state, ecore_mcp_get_link_state(hwfn), 5487 sizeof(link_state)); 5488 } else { 5489 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type); 5490 ecore_vf_read_bulletin(hwfn, &p_change); 5491 ecore_vf_get_link_params(hwfn, &link_params); 5492 ecore_vf_get_link_state(hwfn, &link_state); 5493 } 5494 5495 /* Set the link parameters to pass to protocol driver */ 5496 if (link_state.link_up) { 5497 if_link->link_up = true; 5498 if_link->speed = link_state.speed; 5499 } 5500 5501 if_link->supported_caps = QLNX_LINK_CAP_FIBRE; 5502 5503 if (link_params.speed.autoneg) 5504 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg; 5505 5506 if (link_params.pause.autoneg || 5507 (link_params.pause.forced_rx && link_params.pause.forced_tx)) 5508 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause; 5509 5510 if (link_params.pause.autoneg || link_params.pause.forced_rx || 5511 link_params.pause.forced_tx) 5512 if_link->supported_caps |= QLNX_LINK_CAP_Pause; 5513 5514 if (link_params.speed.advertised_speeds & 5515 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 5516 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half | 5517 QLNX_LINK_CAP_1000baseT_Full; 5518 5519 if (link_params.speed.advertised_speeds & 5520 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 5521 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5522 5523 if (link_params.speed.advertised_speeds & 5524 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 5525 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5526 5527 if (link_params.speed.advertised_speeds & 5528 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) 5529 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5530 5531 if (link_params.speed.advertised_speeds & 5532 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 5533 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5534 5535 if (link_params.speed.advertised_speeds & 5536 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 5537 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5538 5539 if_link->advertised_caps = if_link->supported_caps; 5540 5541 if_link->autoneg = link_params.speed.autoneg; 5542 if_link->duplex = QLNX_LINK_DUPLEX; 5543 5544 /* Link partner capabilities */ 5545 5546 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD) 5547 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half; 5548 5549 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD) 5550 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full; 5551 5552 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G) 5553 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5554 5555 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G) 5556 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5557 5558 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G) 5559 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5560 5561 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G) 5562 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5563 5564 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G) 5565 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5566 5567 if (link_state.an_complete) 5568 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg; 5569 5570 if (link_state.partner_adv_pause) 5571 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause; 5572 5573 if ((link_state.partner_adv_pause == 5574 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) || 5575 (link_state.partner_adv_pause == 5576 ECORE_LINK_PARTNER_BOTH_PAUSE)) 5577 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause; 5578 5579 return; 5580 } 5581 5582 void 5583 qlnx_schedule_recovery(void *p_hwfn) 5584 { 5585 qlnx_host_t *ha; 5586 5587 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5588 5589 if (qlnx_vf_device(ha) != 0) { 5590 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 5591 } 5592 5593 return; 5594 } 5595 5596 static int 5597 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params) 5598 { 5599 int rc, i; 5600 5601 for (i = 0; i < cdev->num_hwfns; i++) { 5602 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 5603 p_hwfn->pf_params = *func_params; 5604 5605 #ifdef QLNX_ENABLE_IWARP 5606 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) { 5607 p_hwfn->using_ll2 = true; 5608 } 5609 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5610 } 5611 5612 rc = ecore_resc_alloc(cdev); 5613 if (rc) 5614 goto qlnx_nic_setup_exit; 5615 5616 ecore_resc_setup(cdev); 5617 5618 qlnx_nic_setup_exit: 5619 5620 return rc; 5621 } 5622 5623 static int 5624 qlnx_nic_start(struct ecore_dev *cdev) 5625 { 5626 int rc; 5627 struct ecore_hw_init_params params; 5628 5629 bzero(¶ms, sizeof (struct ecore_hw_init_params)); 5630 5631 params.p_tunn = NULL; 5632 params.b_hw_start = true; 5633 params.int_mode = cdev->int_mode; 5634 params.allow_npar_tx_switch = true; 5635 params.bin_fw_data = NULL; 5636 5637 rc = ecore_hw_init(cdev, ¶ms); 5638 if (rc) { 5639 ecore_resc_free(cdev); 5640 return rc; 5641 } 5642 5643 return 0; 5644 } 5645 5646 static int 5647 qlnx_slowpath_start(qlnx_host_t *ha) 5648 { 5649 struct ecore_dev *cdev; 5650 struct ecore_pf_params pf_params; 5651 int rc; 5652 5653 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 5654 pf_params.eth_pf_params.num_cons = 5655 (ha->num_rss) * (ha->num_tc + 1); 5656 5657 #ifdef QLNX_ENABLE_IWARP 5658 if (qlnx_vf_device(ha) != 0) { 5659 if(ha->personality == ECORE_PCI_ETH_IWARP) { 5660 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n"); 5661 pf_params.rdma_pf_params.num_qps = 1024; 5662 pf_params.rdma_pf_params.num_srqs = 1024; 5663 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5664 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP; 5665 } else if(ha->personality == ECORE_PCI_ETH_ROCE) { 5666 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n"); 5667 pf_params.rdma_pf_params.num_qps = 8192; 5668 pf_params.rdma_pf_params.num_srqs = 8192; 5669 //pf_params.rdma_pf_params.min_dpis = 0; 5670 pf_params.rdma_pf_params.min_dpis = 8; 5671 pf_params.rdma_pf_params.roce_edpm_mode = 0; 5672 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5673 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE; 5674 } 5675 } 5676 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5677 5678 cdev = &ha->cdev; 5679 5680 rc = qlnx_nic_setup(cdev, &pf_params); 5681 if (rc) 5682 goto qlnx_slowpath_start_exit; 5683 5684 cdev->int_mode = ECORE_INT_MODE_MSIX; 5685 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; 5686 5687 #ifdef QLNX_MAX_COALESCE 5688 cdev->rx_coalesce_usecs = 255; 5689 cdev->tx_coalesce_usecs = 255; 5690 #endif 5691 5692 rc = qlnx_nic_start(cdev); 5693 5694 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs; 5695 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs; 5696 5697 #ifdef QLNX_USER_LLDP 5698 (void)qlnx_set_lldp_tlvx(ha, NULL); 5699 #endif /* #ifdef QLNX_USER_LLDP */ 5700 5701 qlnx_slowpath_start_exit: 5702 5703 return (rc); 5704 } 5705 5706 static int 5707 qlnx_slowpath_stop(qlnx_host_t *ha) 5708 { 5709 struct ecore_dev *cdev; 5710 device_t dev = ha->pci_dev; 5711 int i; 5712 5713 cdev = &ha->cdev; 5714 5715 ecore_hw_stop(cdev); 5716 5717 for (i = 0; i < ha->cdev.num_hwfns; i++) { 5718 if (ha->sp_handle[i]) 5719 (void)bus_teardown_intr(dev, ha->sp_irq[i], 5720 ha->sp_handle[i]); 5721 5722 ha->sp_handle[i] = NULL; 5723 5724 if (ha->sp_irq[i]) 5725 (void) bus_release_resource(dev, SYS_RES_IRQ, 5726 ha->sp_irq_rid[i], ha->sp_irq[i]); 5727 ha->sp_irq[i] = NULL; 5728 } 5729 5730 ecore_resc_free(cdev); 5731 5732 return 0; 5733 } 5734 5735 static void 5736 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 5737 char ver_str[VER_SIZE]) 5738 { 5739 int i; 5740 5741 memcpy(cdev->name, name, NAME_SIZE); 5742 5743 for_each_hwfn(cdev, i) { 5744 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 5745 } 5746 5747 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD; 5748 5749 return ; 5750 } 5751 5752 void 5753 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats) 5754 { 5755 enum ecore_mcp_protocol_type type; 5756 union ecore_mcp_protocol_stats *stats; 5757 struct ecore_eth_stats eth_stats; 5758 qlnx_host_t *ha; 5759 5760 ha = cdev; 5761 stats = proto_stats; 5762 type = proto_type; 5763 5764 switch (type) { 5765 case ECORE_MCP_LAN_STATS: 5766 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats); 5767 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; 5768 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts; 5769 stats->lan_stats.fcs_err = -1; 5770 break; 5771 5772 default: 5773 ha->err_get_proto_invalid_type++; 5774 5775 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type); 5776 break; 5777 } 5778 return; 5779 } 5780 5781 static int 5782 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver) 5783 { 5784 struct ecore_hwfn *p_hwfn; 5785 struct ecore_ptt *p_ptt; 5786 5787 p_hwfn = &ha->cdev.hwfns[0]; 5788 p_ptt = ecore_ptt_acquire(p_hwfn); 5789 5790 if (p_ptt == NULL) { 5791 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 5792 return (-1); 5793 } 5794 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL); 5795 5796 ecore_ptt_release(p_hwfn, p_ptt); 5797 5798 return (0); 5799 } 5800 5801 static int 5802 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size) 5803 { 5804 struct ecore_hwfn *p_hwfn; 5805 struct ecore_ptt *p_ptt; 5806 5807 p_hwfn = &ha->cdev.hwfns[0]; 5808 p_ptt = ecore_ptt_acquire(p_hwfn); 5809 5810 if (p_ptt == NULL) { 5811 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n"); 5812 return (-1); 5813 } 5814 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size); 5815 5816 ecore_ptt_release(p_hwfn, p_ptt); 5817 5818 return (0); 5819 } 5820 5821 static int 5822 qlnx_alloc_mem_arrays(qlnx_host_t *ha) 5823 { 5824 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS)); 5825 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS)); 5826 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS)); 5827 5828 return 0; 5829 } 5830 5831 static void 5832 qlnx_init_fp(qlnx_host_t *ha) 5833 { 5834 int rss_id, txq_array_index, tc; 5835 5836 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 5837 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 5838 5839 fp->rss_id = rss_id; 5840 fp->edev = ha; 5841 fp->sb_info = &ha->sb_array[rss_id]; 5842 fp->rxq = &ha->rxq_array[rss_id]; 5843 fp->rxq->rxq_id = rss_id; 5844 5845 for (tc = 0; tc < ha->num_tc; tc++) { 5846 txq_array_index = tc * ha->num_rss + rss_id; 5847 fp->txq[tc] = &ha->txq_array[txq_array_index]; 5848 fp->txq[tc]->index = txq_array_index; 5849 } 5850 5851 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str, 5852 rss_id); 5853 5854 fp->tx_ring_full = 0; 5855 5856 /* reset all the statistics counters */ 5857 5858 fp->tx_pkts_processed = 0; 5859 fp->tx_pkts_freed = 0; 5860 fp->tx_pkts_transmitted = 0; 5861 fp->tx_pkts_completed = 0; 5862 5863 #ifdef QLNX_TRACE_PERF_DATA 5864 fp->tx_pkts_trans_ctx = 0; 5865 fp->tx_pkts_compl_ctx = 0; 5866 fp->tx_pkts_trans_fp = 0; 5867 fp->tx_pkts_compl_fp = 0; 5868 fp->tx_pkts_compl_intr = 0; 5869 #endif 5870 fp->tx_lso_wnd_min_len = 0; 5871 fp->tx_defrag = 0; 5872 fp->tx_nsegs_gt_elem_left = 0; 5873 fp->tx_tso_max_nsegs = 0; 5874 fp->tx_tso_min_nsegs = 0; 5875 fp->err_tx_nsegs_gt_elem_left = 0; 5876 fp->err_tx_dmamap_create = 0; 5877 fp->err_tx_defrag_dmamap_load = 0; 5878 fp->err_tx_non_tso_max_seg = 0; 5879 fp->err_tx_dmamap_load = 0; 5880 fp->err_tx_defrag = 0; 5881 fp->err_tx_free_pkt_null = 0; 5882 fp->err_tx_cons_idx_conflict = 0; 5883 5884 fp->rx_pkts = 0; 5885 fp->err_m_getcl = 0; 5886 fp->err_m_getjcl = 0; 5887 } 5888 return; 5889 } 5890 5891 void 5892 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info) 5893 { 5894 struct ecore_dev *cdev; 5895 5896 cdev = &ha->cdev; 5897 5898 if (sb_info->sb_virt) { 5899 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt), 5900 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt))); 5901 sb_info->sb_virt = NULL; 5902 } 5903 } 5904 5905 static int 5906 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info, 5907 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id) 5908 { 5909 struct ecore_hwfn *p_hwfn; 5910 int hwfn_index, rc; 5911 u16 rel_sb_id; 5912 5913 hwfn_index = sb_id % cdev->num_hwfns; 5914 p_hwfn = &cdev->hwfns[hwfn_index]; 5915 rel_sb_id = sb_id / cdev->num_hwfns; 5916 5917 QL_DPRINT2(((qlnx_host_t *)cdev), 5918 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \ 5919 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n", 5920 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info, 5921 sb_virt_addr, (void *)sb_phy_addr); 5922 5923 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, 5924 sb_virt_addr, sb_phy_addr, rel_sb_id); 5925 5926 return rc; 5927 } 5928 5929 /* This function allocates fast-path status block memory */ 5930 int 5931 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id) 5932 { 5933 struct status_block_e4 *sb_virt; 5934 bus_addr_t sb_phys; 5935 int rc; 5936 uint32_t size; 5937 struct ecore_dev *cdev; 5938 5939 cdev = &ha->cdev; 5940 5941 size = sizeof(*sb_virt); 5942 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size); 5943 5944 if (!sb_virt) { 5945 QL_DPRINT1(ha, "Status block allocation failed\n"); 5946 return -ENOMEM; 5947 } 5948 5949 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id); 5950 if (rc) { 5951 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size); 5952 } 5953 5954 return rc; 5955 } 5956 5957 static void 5958 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5959 { 5960 int i; 5961 struct sw_rx_data *rx_buf; 5962 5963 for (i = 0; i < rxq->num_rx_buffers; i++) { 5964 rx_buf = &rxq->sw_rx_ring[i]; 5965 5966 if (rx_buf->data != NULL) { 5967 if (rx_buf->map != NULL) { 5968 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 5969 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 5970 rx_buf->map = NULL; 5971 } 5972 m_freem(rx_buf->data); 5973 rx_buf->data = NULL; 5974 } 5975 } 5976 return; 5977 } 5978 5979 static void 5980 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5981 { 5982 struct ecore_dev *cdev; 5983 int i; 5984 5985 cdev = &ha->cdev; 5986 5987 qlnx_free_rx_buffers(ha, rxq); 5988 5989 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 5990 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]); 5991 if (rxq->tpa_info[i].mpf != NULL) 5992 m_freem(rxq->tpa_info[i].mpf); 5993 } 5994 5995 bzero((void *)&rxq->sw_rx_ring[0], 5996 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 5997 5998 /* Free the real RQ ring used by FW */ 5999 if (rxq->rx_bd_ring.p_virt_addr) { 6000 ecore_chain_free(cdev, &rxq->rx_bd_ring); 6001 rxq->rx_bd_ring.p_virt_addr = NULL; 6002 } 6003 6004 /* Free the real completion ring used by FW */ 6005 if (rxq->rx_comp_ring.p_virt_addr && 6006 rxq->rx_comp_ring.pbl_sp.p_virt_table) { 6007 ecore_chain_free(cdev, &rxq->rx_comp_ring); 6008 rxq->rx_comp_ring.p_virt_addr = NULL; 6009 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL; 6010 } 6011 6012 #ifdef QLNX_SOFT_LRO 6013 { 6014 struct lro_ctrl *lro; 6015 6016 lro = &rxq->lro; 6017 tcp_lro_free(lro); 6018 } 6019 #endif /* #ifdef QLNX_SOFT_LRO */ 6020 6021 return; 6022 } 6023 6024 static int 6025 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6026 { 6027 register struct mbuf *mp; 6028 uint16_t rx_buf_size; 6029 struct sw_rx_data *sw_rx_data; 6030 struct eth_rx_bd *rx_bd; 6031 dma_addr_t dma_addr; 6032 bus_dmamap_t map; 6033 bus_dma_segment_t segs[1]; 6034 int nsegs; 6035 int ret; 6036 6037 rx_buf_size = rxq->rx_buf_size; 6038 6039 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6040 6041 if (mp == NULL) { 6042 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6043 return -ENOMEM; 6044 } 6045 6046 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6047 6048 map = (bus_dmamap_t)0; 6049 6050 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6051 BUS_DMA_NOWAIT); 6052 dma_addr = segs[0].ds_addr; 6053 6054 if (ret || !dma_addr || (nsegs != 1)) { 6055 m_freem(mp); 6056 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6057 ret, (long long unsigned int)dma_addr, nsegs); 6058 return -ENOMEM; 6059 } 6060 6061 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6062 sw_rx_data->data = mp; 6063 sw_rx_data->dma_addr = dma_addr; 6064 sw_rx_data->map = map; 6065 6066 /* Advance PROD and get BD pointer */ 6067 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); 6068 rx_bd->addr.hi = htole32(U64_HI(dma_addr)); 6069 rx_bd->addr.lo = htole32(U64_LO(dma_addr)); 6070 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6071 6072 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6073 6074 return 0; 6075 } 6076 6077 static int 6078 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 6079 struct qlnx_agg_info *tpa) 6080 { 6081 struct mbuf *mp; 6082 dma_addr_t dma_addr; 6083 bus_dmamap_t map; 6084 bus_dma_segment_t segs[1]; 6085 int nsegs; 6086 int ret; 6087 struct sw_rx_data *rx_buf; 6088 6089 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6090 6091 if (mp == NULL) { 6092 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6093 return -ENOMEM; 6094 } 6095 6096 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6097 6098 map = (bus_dmamap_t)0; 6099 6100 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6101 BUS_DMA_NOWAIT); 6102 dma_addr = segs[0].ds_addr; 6103 6104 if (ret || !dma_addr || (nsegs != 1)) { 6105 m_freem(mp); 6106 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6107 ret, (long long unsigned int)dma_addr, nsegs); 6108 return -ENOMEM; 6109 } 6110 6111 rx_buf = &tpa->rx_buf; 6112 6113 memset(rx_buf, 0, sizeof (struct sw_rx_data)); 6114 6115 rx_buf->data = mp; 6116 rx_buf->dma_addr = dma_addr; 6117 rx_buf->map = map; 6118 6119 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6120 6121 return (0); 6122 } 6123 6124 static void 6125 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa) 6126 { 6127 struct sw_rx_data *rx_buf; 6128 6129 rx_buf = &tpa->rx_buf; 6130 6131 if (rx_buf->data != NULL) { 6132 if (rx_buf->map != NULL) { 6133 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 6134 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 6135 rx_buf->map = NULL; 6136 } 6137 m_freem(rx_buf->data); 6138 rx_buf->data = NULL; 6139 } 6140 return; 6141 } 6142 6143 /* This function allocates all memory needed per Rx queue */ 6144 static int 6145 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6146 { 6147 int i, rc, num_allocated; 6148 struct ecore_dev *cdev; 6149 6150 cdev = &ha->cdev; 6151 6152 rxq->num_rx_buffers = RX_RING_SIZE; 6153 6154 rxq->rx_buf_size = ha->rx_buf_size; 6155 6156 /* Allocate the parallel driver ring for Rx buffers */ 6157 bzero((void *)&rxq->sw_rx_ring[0], 6158 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 6159 6160 /* Allocate FW Rx ring */ 6161 6162 rc = ecore_chain_alloc(cdev, 6163 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6164 ECORE_CHAIN_MODE_NEXT_PTR, 6165 ECORE_CHAIN_CNT_TYPE_U16, 6166 RX_RING_SIZE, 6167 sizeof(struct eth_rx_bd), 6168 &rxq->rx_bd_ring, NULL); 6169 6170 if (rc) 6171 goto err; 6172 6173 /* Allocate FW completion ring */ 6174 rc = ecore_chain_alloc(cdev, 6175 ECORE_CHAIN_USE_TO_CONSUME, 6176 ECORE_CHAIN_MODE_PBL, 6177 ECORE_CHAIN_CNT_TYPE_U16, 6178 RX_RING_SIZE, 6179 sizeof(union eth_rx_cqe), 6180 &rxq->rx_comp_ring, NULL); 6181 6182 if (rc) 6183 goto err; 6184 6185 /* Allocate buffers for the Rx ring */ 6186 6187 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 6188 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size, 6189 &rxq->tpa_info[i]); 6190 if (rc) 6191 break; 6192 } 6193 6194 for (i = 0; i < rxq->num_rx_buffers; i++) { 6195 rc = qlnx_alloc_rx_buffer(ha, rxq); 6196 if (rc) 6197 break; 6198 } 6199 num_allocated = i; 6200 if (!num_allocated) { 6201 QL_DPRINT1(ha, "Rx buffers allocation failed\n"); 6202 goto err; 6203 } else if (num_allocated < rxq->num_rx_buffers) { 6204 QL_DPRINT1(ha, "Allocated less buffers than" 6205 " desired (%d allocated)\n", num_allocated); 6206 } 6207 6208 #ifdef QLNX_SOFT_LRO 6209 6210 { 6211 struct lro_ctrl *lro; 6212 6213 lro = &rxq->lro; 6214 6215 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) { 6216 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 6217 rxq->rxq_id); 6218 goto err; 6219 } 6220 6221 lro->ifp = ha->ifp; 6222 } 6223 #endif /* #ifdef QLNX_SOFT_LRO */ 6224 return 0; 6225 6226 err: 6227 qlnx_free_mem_rxq(ha, rxq); 6228 return -ENOMEM; 6229 } 6230 6231 static void 6232 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6233 struct qlnx_tx_queue *txq) 6234 { 6235 struct ecore_dev *cdev; 6236 6237 cdev = &ha->cdev; 6238 6239 bzero((void *)&txq->sw_tx_ring[0], 6240 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6241 6242 /* Free the real RQ ring used by FW */ 6243 if (txq->tx_pbl.p_virt_addr) { 6244 ecore_chain_free(cdev, &txq->tx_pbl); 6245 txq->tx_pbl.p_virt_addr = NULL; 6246 } 6247 return; 6248 } 6249 6250 /* This function allocates all memory needed per Tx queue */ 6251 static int 6252 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6253 struct qlnx_tx_queue *txq) 6254 { 6255 int ret = ECORE_SUCCESS; 6256 union eth_tx_bd_types *p_virt; 6257 struct ecore_dev *cdev; 6258 6259 cdev = &ha->cdev; 6260 6261 bzero((void *)&txq->sw_tx_ring[0], 6262 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6263 6264 /* Allocate the real Tx ring to be used by FW */ 6265 ret = ecore_chain_alloc(cdev, 6266 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6267 ECORE_CHAIN_MODE_PBL, 6268 ECORE_CHAIN_CNT_TYPE_U16, 6269 TX_RING_SIZE, 6270 sizeof(*p_virt), 6271 &txq->tx_pbl, NULL); 6272 6273 if (ret != ECORE_SUCCESS) { 6274 goto err; 6275 } 6276 6277 txq->num_tx_buffers = TX_RING_SIZE; 6278 6279 return 0; 6280 6281 err: 6282 qlnx_free_mem_txq(ha, fp, txq); 6283 return -ENOMEM; 6284 } 6285 6286 static void 6287 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6288 { 6289 struct mbuf *mp; 6290 struct ifnet *ifp = ha->ifp; 6291 6292 if (mtx_initialized(&fp->tx_mtx)) { 6293 if (fp->tx_br != NULL) { 6294 mtx_lock(&fp->tx_mtx); 6295 6296 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 6297 fp->tx_pkts_freed++; 6298 m_freem(mp); 6299 } 6300 6301 mtx_unlock(&fp->tx_mtx); 6302 6303 buf_ring_free(fp->tx_br, M_DEVBUF); 6304 fp->tx_br = NULL; 6305 } 6306 mtx_destroy(&fp->tx_mtx); 6307 } 6308 return; 6309 } 6310 6311 static void 6312 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6313 { 6314 int tc; 6315 6316 qlnx_free_mem_sb(ha, fp->sb_info); 6317 6318 qlnx_free_mem_rxq(ha, fp->rxq); 6319 6320 for (tc = 0; tc < ha->num_tc; tc++) 6321 qlnx_free_mem_txq(ha, fp, fp->txq[tc]); 6322 6323 return; 6324 } 6325 6326 static int 6327 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6328 { 6329 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 6330 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id); 6331 6332 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 6333 6334 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF, 6335 M_NOWAIT, &fp->tx_mtx); 6336 if (fp->tx_br == NULL) { 6337 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n", 6338 ha->dev_unit, fp->rss_id); 6339 return -ENOMEM; 6340 } 6341 return 0; 6342 } 6343 6344 static int 6345 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6346 { 6347 int rc, tc; 6348 6349 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id); 6350 if (rc) 6351 goto err; 6352 6353 if (ha->rx_jumbo_buf_eq_mtu) { 6354 if (ha->max_frame_size <= MCLBYTES) 6355 ha->rx_buf_size = MCLBYTES; 6356 else if (ha->max_frame_size <= MJUMPAGESIZE) 6357 ha->rx_buf_size = MJUMPAGESIZE; 6358 else if (ha->max_frame_size <= MJUM9BYTES) 6359 ha->rx_buf_size = MJUM9BYTES; 6360 else if (ha->max_frame_size <= MJUM16BYTES) 6361 ha->rx_buf_size = MJUM16BYTES; 6362 } else { 6363 if (ha->max_frame_size <= MCLBYTES) 6364 ha->rx_buf_size = MCLBYTES; 6365 else 6366 ha->rx_buf_size = MJUMPAGESIZE; 6367 } 6368 6369 rc = qlnx_alloc_mem_rxq(ha, fp->rxq); 6370 if (rc) 6371 goto err; 6372 6373 for (tc = 0; tc < ha->num_tc; tc++) { 6374 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]); 6375 if (rc) 6376 goto err; 6377 } 6378 6379 return 0; 6380 6381 err: 6382 qlnx_free_mem_fp(ha, fp); 6383 return -ENOMEM; 6384 } 6385 6386 static void 6387 qlnx_free_mem_load(qlnx_host_t *ha) 6388 { 6389 int i; 6390 6391 for (i = 0; i < ha->num_rss; i++) { 6392 struct qlnx_fastpath *fp = &ha->fp_array[i]; 6393 6394 qlnx_free_mem_fp(ha, fp); 6395 } 6396 return; 6397 } 6398 6399 static int 6400 qlnx_alloc_mem_load(qlnx_host_t *ha) 6401 { 6402 int rc = 0, rss_id; 6403 6404 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 6405 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 6406 6407 rc = qlnx_alloc_mem_fp(ha, fp); 6408 if (rc) 6409 break; 6410 } 6411 return (rc); 6412 } 6413 6414 static int 6415 qlnx_start_vport(struct ecore_dev *cdev, 6416 u8 vport_id, 6417 u16 mtu, 6418 u8 drop_ttl0_flg, 6419 u8 inner_vlan_removal_en_flg, 6420 u8 tx_switching, 6421 u8 hw_lro_enable) 6422 { 6423 int rc, i; 6424 struct ecore_sp_vport_start_params vport_start_params = { 0 }; 6425 qlnx_host_t *ha __unused; 6426 6427 ha = (qlnx_host_t *)cdev; 6428 6429 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg; 6430 vport_start_params.tx_switching = 0; 6431 vport_start_params.handle_ptp_pkts = 0; 6432 vport_start_params.only_untagged = 0; 6433 vport_start_params.drop_ttl0 = drop_ttl0_flg; 6434 6435 vport_start_params.tpa_mode = 6436 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE); 6437 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6438 6439 vport_start_params.vport_id = vport_id; 6440 vport_start_params.mtu = mtu; 6441 6442 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id); 6443 6444 for_each_hwfn(cdev, i) { 6445 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6446 6447 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid; 6448 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6449 6450 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params); 6451 6452 if (rc) { 6453 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d" 6454 " with MTU %d\n" , vport_id, mtu); 6455 return -ENOMEM; 6456 } 6457 6458 ecore_hw_start_fastpath(p_hwfn); 6459 6460 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n", 6461 vport_id, mtu); 6462 } 6463 return 0; 6464 } 6465 6466 static int 6467 qlnx_update_vport(struct ecore_dev *cdev, 6468 struct qlnx_update_vport_params *params) 6469 { 6470 struct ecore_sp_vport_update_params sp_params; 6471 int rc, i, j, fp_index; 6472 struct ecore_hwfn *p_hwfn; 6473 struct ecore_rss_params *rss; 6474 qlnx_host_t *ha = (qlnx_host_t *)cdev; 6475 struct qlnx_fastpath *fp; 6476 6477 memset(&sp_params, 0, sizeof(sp_params)); 6478 /* Translate protocol params into sp params */ 6479 sp_params.vport_id = params->vport_id; 6480 6481 sp_params.update_vport_active_rx_flg = 6482 params->update_vport_active_rx_flg; 6483 sp_params.vport_active_rx_flg = params->vport_active_rx_flg; 6484 6485 sp_params.update_vport_active_tx_flg = 6486 params->update_vport_active_tx_flg; 6487 sp_params.vport_active_tx_flg = params->vport_active_tx_flg; 6488 6489 sp_params.update_inner_vlan_removal_flg = 6490 params->update_inner_vlan_removal_flg; 6491 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; 6492 6493 sp_params.sge_tpa_params = params->sge_tpa_params; 6494 6495 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 6496 * We need to re-fix the rss values per engine for CMT. 6497 */ 6498 if (params->rss_params->update_rss_config) 6499 sp_params.rss_params = params->rss_params; 6500 else 6501 sp_params.rss_params = NULL; 6502 6503 for_each_hwfn(cdev, i) { 6504 p_hwfn = &cdev->hwfns[i]; 6505 6506 if ((cdev->num_hwfns > 1) && 6507 params->rss_params->update_rss_config && 6508 params->rss_params->rss_enable) { 6509 rss = params->rss_params; 6510 6511 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) { 6512 fp_index = ((cdev->num_hwfns * j) + i) % 6513 ha->num_rss; 6514 6515 fp = &ha->fp_array[fp_index]; 6516 rss->rss_ind_table[j] = fp->rxq->handle; 6517 } 6518 6519 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) { 6520 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n", 6521 rss->rss_ind_table[j], 6522 rss->rss_ind_table[j+1], 6523 rss->rss_ind_table[j+2], 6524 rss->rss_ind_table[j+3], 6525 rss->rss_ind_table[j+4], 6526 rss->rss_ind_table[j+5], 6527 rss->rss_ind_table[j+6], 6528 rss->rss_ind_table[j+7]); 6529 j += 8; 6530 } 6531 } 6532 6533 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6534 6535 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id); 6536 6537 rc = ecore_sp_vport_update(p_hwfn, &sp_params, 6538 ECORE_SPQ_MODE_EBLOCK, NULL); 6539 if (rc) { 6540 QL_DPRINT1(ha, "Failed to update VPORT\n"); 6541 return rc; 6542 } 6543 6544 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \ 6545 rx_active_flag %d [tx_update %d], [rx_update %d]\n", 6546 params->vport_id, params->vport_active_tx_flg, 6547 params->vport_active_rx_flg, 6548 params->update_vport_active_tx_flg, 6549 params->update_vport_active_rx_flg); 6550 } 6551 6552 return 0; 6553 } 6554 6555 static void 6556 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq) 6557 { 6558 struct eth_rx_bd *rx_bd_cons = 6559 ecore_chain_consume(&rxq->rx_bd_ring); 6560 struct eth_rx_bd *rx_bd_prod = 6561 ecore_chain_produce(&rxq->rx_bd_ring); 6562 struct sw_rx_data *sw_rx_data_cons = 6563 &rxq->sw_rx_ring[rxq->sw_rx_cons]; 6564 struct sw_rx_data *sw_rx_data_prod = 6565 &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6566 6567 sw_rx_data_prod->data = sw_rx_data_cons->data; 6568 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); 6569 6570 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 6571 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6572 6573 return; 6574 } 6575 6576 static void 6577 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq) 6578 { 6579 6580 uint16_t bd_prod; 6581 uint16_t cqe_prod; 6582 union { 6583 struct eth_rx_prod_data rx_prod_data; 6584 uint32_t data32; 6585 } rx_prods; 6586 6587 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); 6588 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); 6589 6590 /* Update producers */ 6591 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod); 6592 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod); 6593 6594 /* Make sure that the BD and SGE data is updated before updating the 6595 * producers since FW might read the BD/SGE right after the producer 6596 * is updated. 6597 */ 6598 wmb(); 6599 6600 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr, 6601 sizeof(rx_prods), &rx_prods.data32); 6602 6603 /* mmiowb is needed to synchronize doorbell writes from more than one 6604 * processor. It guarantees that the write arrives to the device before 6605 * the napi lock is released and another qlnx_poll is called (possibly 6606 * on another CPU). Without this barrier, the next doorbell can bypass 6607 * this doorbell. This is applicable to IA64/Altix systems. 6608 */ 6609 wmb(); 6610 6611 return; 6612 } 6613 6614 static uint32_t qlnx_hash_key[] = { 6615 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda), 6616 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2), 6617 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d), 6618 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0), 6619 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb), 6620 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4), 6621 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3), 6622 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c), 6623 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b), 6624 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)}; 6625 6626 static int 6627 qlnx_start_queues(qlnx_host_t *ha) 6628 { 6629 int rc, tc, i, vport_id = 0, 6630 drop_ttl0_flg = 1, vlan_removal_en = 1, 6631 tx_switching = 0, hw_lro_enable = 0; 6632 struct ecore_dev *cdev = &ha->cdev; 6633 struct ecore_rss_params *rss_params = &ha->rss_params; 6634 struct qlnx_update_vport_params vport_update_params; 6635 struct ifnet *ifp; 6636 struct ecore_hwfn *p_hwfn; 6637 struct ecore_sge_tpa_params tpa_params; 6638 struct ecore_queue_start_common_params qparams; 6639 struct qlnx_fastpath *fp; 6640 6641 ifp = ha->ifp; 6642 6643 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss); 6644 6645 if (!ha->num_rss) { 6646 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there" 6647 " are no Rx queues\n"); 6648 return -EINVAL; 6649 } 6650 6651 #ifndef QLNX_SOFT_LRO 6652 hw_lro_enable = ifp->if_capenable & IFCAP_LRO; 6653 #endif /* #ifndef QLNX_SOFT_LRO */ 6654 6655 rc = qlnx_start_vport(cdev, vport_id, ifp->if_mtu, drop_ttl0_flg, 6656 vlan_removal_en, tx_switching, hw_lro_enable); 6657 6658 if (rc) { 6659 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc); 6660 return rc; 6661 } 6662 6663 QL_DPRINT2(ha, "Start vport ramrod passed, " 6664 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n", 6665 vport_id, (int)(ifp->if_mtu + 0xe), vlan_removal_en); 6666 6667 for_each_rss(i) { 6668 struct ecore_rxq_start_ret_params rx_ret_params; 6669 struct ecore_txq_start_ret_params tx_ret_params; 6670 6671 fp = &ha->fp_array[i]; 6672 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)]; 6673 6674 bzero(&qparams, sizeof(struct ecore_queue_start_common_params)); 6675 bzero(&rx_ret_params, 6676 sizeof (struct ecore_rxq_start_ret_params)); 6677 6678 qparams.queue_id = i ; 6679 qparams.vport_id = vport_id; 6680 qparams.stats_id = vport_id; 6681 qparams.p_sb = fp->sb_info; 6682 qparams.sb_idx = RX_PI; 6683 6684 6685 rc = ecore_eth_rx_queue_start(p_hwfn, 6686 p_hwfn->hw_info.opaque_fid, 6687 &qparams, 6688 fp->rxq->rx_buf_size, /* bd_max_bytes */ 6689 /* bd_chain_phys_addr */ 6690 fp->rxq->rx_bd_ring.p_phys_addr, 6691 /* cqe_pbl_addr */ 6692 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring), 6693 /* cqe_pbl_size */ 6694 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring), 6695 &rx_ret_params); 6696 6697 if (rc) { 6698 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc); 6699 return rc; 6700 } 6701 6702 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod; 6703 fp->rxq->handle = rx_ret_params.p_handle; 6704 fp->rxq->hw_cons_ptr = 6705 &fp->sb_info->sb_virt->pi_array[RX_PI]; 6706 6707 qlnx_update_rx_prod(p_hwfn, fp->rxq); 6708 6709 for (tc = 0; tc < ha->num_tc; tc++) { 6710 struct qlnx_tx_queue *txq = fp->txq[tc]; 6711 6712 bzero(&qparams, 6713 sizeof(struct ecore_queue_start_common_params)); 6714 bzero(&tx_ret_params, 6715 sizeof (struct ecore_txq_start_ret_params)); 6716 6717 qparams.queue_id = txq->index / cdev->num_hwfns ; 6718 qparams.vport_id = vport_id; 6719 qparams.stats_id = vport_id; 6720 qparams.p_sb = fp->sb_info; 6721 qparams.sb_idx = TX_PI(tc); 6722 6723 rc = ecore_eth_tx_queue_start(p_hwfn, 6724 p_hwfn->hw_info.opaque_fid, 6725 &qparams, tc, 6726 /* bd_chain_phys_addr */ 6727 ecore_chain_get_pbl_phys(&txq->tx_pbl), 6728 ecore_chain_get_page_cnt(&txq->tx_pbl), 6729 &tx_ret_params); 6730 6731 if (rc) { 6732 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n", 6733 txq->index, rc); 6734 return rc; 6735 } 6736 6737 txq->doorbell_addr = tx_ret_params.p_doorbell; 6738 txq->handle = tx_ret_params.p_handle; 6739 6740 txq->hw_cons_ptr = 6741 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; 6742 SET_FIELD(txq->tx_db.data.params, 6743 ETH_DB_DATA_DEST, DB_DEST_XCM); 6744 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, 6745 DB_AGG_CMD_SET); 6746 SET_FIELD(txq->tx_db.data.params, 6747 ETH_DB_DATA_AGG_VAL_SEL, 6748 DQ_XCM_ETH_TX_BD_PROD_CMD); 6749 6750 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 6751 } 6752 } 6753 6754 /* Fill struct with RSS params */ 6755 if (ha->num_rss > 1) { 6756 rss_params->update_rss_config = 1; 6757 rss_params->rss_enable = 1; 6758 rss_params->update_rss_capabilities = 1; 6759 rss_params->update_rss_ind_table = 1; 6760 rss_params->update_rss_key = 1; 6761 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | 6762 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; 6763 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */ 6764 6765 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 6766 fp = &ha->fp_array[(i % ha->num_rss)]; 6767 rss_params->rss_ind_table[i] = fp->rxq->handle; 6768 } 6769 6770 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 6771 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i]; 6772 6773 } else { 6774 memset(rss_params, 0, sizeof(*rss_params)); 6775 } 6776 6777 /* Prepare and send the vport enable */ 6778 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6779 vport_update_params.vport_id = vport_id; 6780 vport_update_params.update_vport_active_tx_flg = 1; 6781 vport_update_params.vport_active_tx_flg = 1; 6782 vport_update_params.update_vport_active_rx_flg = 1; 6783 vport_update_params.vport_active_rx_flg = 1; 6784 vport_update_params.rss_params = rss_params; 6785 vport_update_params.update_inner_vlan_removal_flg = 1; 6786 vport_update_params.inner_vlan_removal_flg = 1; 6787 6788 if (hw_lro_enable) { 6789 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params)); 6790 6791 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6792 6793 tpa_params.update_tpa_en_flg = 1; 6794 tpa_params.tpa_ipv4_en_flg = 1; 6795 tpa_params.tpa_ipv6_en_flg = 1; 6796 6797 tpa_params.update_tpa_param_flg = 1; 6798 tpa_params.tpa_pkt_split_flg = 0; 6799 tpa_params.tpa_hdr_data_split_flg = 0; 6800 tpa_params.tpa_gro_consistent_flg = 0; 6801 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 6802 tpa_params.tpa_max_size = (uint16_t)(-1); 6803 tpa_params.tpa_min_size_to_start = ifp->if_mtu/2; 6804 tpa_params.tpa_min_size_to_cont = ifp->if_mtu/2; 6805 6806 vport_update_params.sge_tpa_params = &tpa_params; 6807 } 6808 6809 rc = qlnx_update_vport(cdev, &vport_update_params); 6810 if (rc) { 6811 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc); 6812 return rc; 6813 } 6814 6815 return 0; 6816 } 6817 6818 static int 6819 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6820 struct qlnx_tx_queue *txq) 6821 { 6822 uint16_t hw_bd_cons; 6823 uint16_t ecore_cons_idx; 6824 6825 QL_DPRINT2(ha, "enter\n"); 6826 6827 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6828 6829 while (hw_bd_cons != 6830 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 6831 mtx_lock(&fp->tx_mtx); 6832 6833 (void)qlnx_tx_int(ha, fp, txq); 6834 6835 mtx_unlock(&fp->tx_mtx); 6836 6837 qlnx_mdelay(__func__, 2); 6838 6839 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6840 } 6841 6842 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index); 6843 6844 return 0; 6845 } 6846 6847 static int 6848 qlnx_stop_queues(qlnx_host_t *ha) 6849 { 6850 struct qlnx_update_vport_params vport_update_params; 6851 struct ecore_dev *cdev; 6852 struct qlnx_fastpath *fp; 6853 int rc, tc, i; 6854 6855 cdev = &ha->cdev; 6856 6857 /* Disable the vport */ 6858 6859 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6860 6861 vport_update_params.vport_id = 0; 6862 vport_update_params.update_vport_active_tx_flg = 1; 6863 vport_update_params.vport_active_tx_flg = 0; 6864 vport_update_params.update_vport_active_rx_flg = 1; 6865 vport_update_params.vport_active_rx_flg = 0; 6866 vport_update_params.rss_params = &ha->rss_params; 6867 vport_update_params.rss_params->update_rss_config = 0; 6868 vport_update_params.rss_params->rss_enable = 0; 6869 vport_update_params.update_inner_vlan_removal_flg = 0; 6870 vport_update_params.inner_vlan_removal_flg = 0; 6871 6872 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id); 6873 6874 rc = qlnx_update_vport(cdev, &vport_update_params); 6875 if (rc) { 6876 QL_DPRINT1(ha, "Failed to update vport\n"); 6877 return rc; 6878 } 6879 6880 /* Flush Tx queues. If needed, request drain from MCP */ 6881 for_each_rss(i) { 6882 fp = &ha->fp_array[i]; 6883 6884 for (tc = 0; tc < ha->num_tc; tc++) { 6885 struct qlnx_tx_queue *txq = fp->txq[tc]; 6886 6887 rc = qlnx_drain_txq(ha, fp, txq); 6888 if (rc) 6889 return rc; 6890 } 6891 } 6892 6893 /* Stop all Queues in reverse order*/ 6894 for (i = ha->num_rss - 1; i >= 0; i--) { 6895 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)]; 6896 6897 fp = &ha->fp_array[i]; 6898 6899 /* Stop the Tx Queue(s)*/ 6900 for (tc = 0; tc < ha->num_tc; tc++) { 6901 int tx_queue_id __unused; 6902 6903 tx_queue_id = tc * ha->num_rss + i; 6904 rc = ecore_eth_tx_queue_stop(p_hwfn, 6905 fp->txq[tc]->handle); 6906 6907 if (rc) { 6908 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n", 6909 tx_queue_id); 6910 return rc; 6911 } 6912 } 6913 6914 /* Stop the Rx Queue*/ 6915 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false, 6916 false); 6917 if (rc) { 6918 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i); 6919 return rc; 6920 } 6921 } 6922 6923 /* Stop the vport */ 6924 for_each_hwfn(cdev, i) { 6925 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6926 6927 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0); 6928 6929 if (rc) { 6930 QL_DPRINT1(ha, "Failed to stop VPORT\n"); 6931 return rc; 6932 } 6933 } 6934 6935 return rc; 6936 } 6937 6938 static int 6939 qlnx_set_ucast_rx_mac(qlnx_host_t *ha, 6940 enum ecore_filter_opcode opcode, 6941 unsigned char mac[ETH_ALEN]) 6942 { 6943 struct ecore_filter_ucast ucast; 6944 struct ecore_dev *cdev; 6945 int rc; 6946 6947 cdev = &ha->cdev; 6948 6949 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6950 6951 ucast.opcode = opcode; 6952 ucast.type = ECORE_FILTER_MAC; 6953 ucast.is_rx_filter = 1; 6954 ucast.vport_to_add_to = 0; 6955 memcpy(&ucast.mac[0], mac, ETH_ALEN); 6956 6957 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6958 6959 return (rc); 6960 } 6961 6962 static int 6963 qlnx_remove_all_ucast_mac(qlnx_host_t *ha) 6964 { 6965 struct ecore_filter_ucast ucast; 6966 struct ecore_dev *cdev; 6967 int rc; 6968 6969 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6970 6971 ucast.opcode = ECORE_FILTER_REPLACE; 6972 ucast.type = ECORE_FILTER_MAC; 6973 ucast.is_rx_filter = 1; 6974 6975 cdev = &ha->cdev; 6976 6977 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6978 6979 return (rc); 6980 } 6981 6982 static int 6983 qlnx_remove_all_mcast_mac(qlnx_host_t *ha) 6984 { 6985 struct ecore_filter_mcast *mcast; 6986 struct ecore_dev *cdev; 6987 int rc, i; 6988 6989 cdev = &ha->cdev; 6990 6991 mcast = &ha->ecore_mcast; 6992 bzero(mcast, sizeof(struct ecore_filter_mcast)); 6993 6994 mcast->opcode = ECORE_FILTER_REMOVE; 6995 6996 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 6997 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] || 6998 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] || 6999 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) { 7000 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN); 7001 mcast->num_mc_addrs++; 7002 } 7003 } 7004 mcast = &ha->ecore_mcast; 7005 7006 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 7007 7008 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS)); 7009 ha->nmcast = 0; 7010 7011 return (rc); 7012 } 7013 7014 static int 7015 qlnx_clean_filters(qlnx_host_t *ha) 7016 { 7017 int rc = 0; 7018 7019 /* Remove all unicast macs */ 7020 rc = qlnx_remove_all_ucast_mac(ha); 7021 if (rc) 7022 return rc; 7023 7024 /* Remove all multicast macs */ 7025 rc = qlnx_remove_all_mcast_mac(ha); 7026 if (rc) 7027 return rc; 7028 7029 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac); 7030 7031 return (rc); 7032 } 7033 7034 static int 7035 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter) 7036 { 7037 struct ecore_filter_accept_flags accept; 7038 int rc = 0; 7039 struct ecore_dev *cdev; 7040 7041 cdev = &ha->cdev; 7042 7043 bzero(&accept, sizeof(struct ecore_filter_accept_flags)); 7044 7045 accept.update_rx_mode_config = 1; 7046 accept.rx_accept_filter = filter; 7047 7048 accept.update_tx_mode_config = 1; 7049 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 7050 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST; 7051 7052 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false, 7053 ECORE_SPQ_MODE_CB, NULL); 7054 7055 return (rc); 7056 } 7057 7058 static int 7059 qlnx_set_rx_mode(qlnx_host_t *ha) 7060 { 7061 int rc = 0; 7062 uint8_t filter; 7063 7064 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac); 7065 if (rc) 7066 return rc; 7067 7068 rc = qlnx_remove_all_mcast_mac(ha); 7069 if (rc) 7070 return rc; 7071 7072 filter = ECORE_ACCEPT_UCAST_MATCHED | 7073 ECORE_ACCEPT_MCAST_MATCHED | 7074 ECORE_ACCEPT_BCAST; 7075 7076 if (qlnx_vf_device(ha) == 0) { 7077 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 7078 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 7079 } 7080 ha->filter = filter; 7081 7082 rc = qlnx_set_rx_accept_filter(ha, filter); 7083 7084 return (rc); 7085 } 7086 7087 static int 7088 qlnx_set_link(qlnx_host_t *ha, bool link_up) 7089 { 7090 int i, rc = 0; 7091 struct ecore_dev *cdev; 7092 struct ecore_hwfn *hwfn; 7093 struct ecore_ptt *ptt; 7094 7095 if (qlnx_vf_device(ha) == 0) 7096 return (0); 7097 7098 cdev = &ha->cdev; 7099 7100 for_each_hwfn(cdev, i) { 7101 hwfn = &cdev->hwfns[i]; 7102 7103 ptt = ecore_ptt_acquire(hwfn); 7104 if (!ptt) 7105 return -EBUSY; 7106 7107 rc = ecore_mcp_set_link(hwfn, ptt, link_up); 7108 7109 ecore_ptt_release(hwfn, ptt); 7110 7111 if (rc) 7112 return rc; 7113 } 7114 return (rc); 7115 } 7116 7117 static uint64_t 7118 qlnx_get_counter(if_t ifp, ift_counter cnt) 7119 { 7120 qlnx_host_t *ha; 7121 uint64_t count; 7122 7123 ha = (qlnx_host_t *)if_getsoftc(ifp); 7124 7125 switch (cnt) { 7126 case IFCOUNTER_IPACKETS: 7127 count = ha->hw_stats.common.rx_ucast_pkts + 7128 ha->hw_stats.common.rx_mcast_pkts + 7129 ha->hw_stats.common.rx_bcast_pkts; 7130 break; 7131 7132 case IFCOUNTER_IERRORS: 7133 count = ha->hw_stats.common.rx_crc_errors + 7134 ha->hw_stats.common.rx_align_errors + 7135 ha->hw_stats.common.rx_oversize_packets + 7136 ha->hw_stats.common.rx_undersize_packets; 7137 break; 7138 7139 case IFCOUNTER_OPACKETS: 7140 count = ha->hw_stats.common.tx_ucast_pkts + 7141 ha->hw_stats.common.tx_mcast_pkts + 7142 ha->hw_stats.common.tx_bcast_pkts; 7143 break; 7144 7145 case IFCOUNTER_OERRORS: 7146 count = ha->hw_stats.common.tx_err_drop_pkts; 7147 break; 7148 7149 case IFCOUNTER_COLLISIONS: 7150 return (0); 7151 7152 case IFCOUNTER_IBYTES: 7153 count = ha->hw_stats.common.rx_ucast_bytes + 7154 ha->hw_stats.common.rx_mcast_bytes + 7155 ha->hw_stats.common.rx_bcast_bytes; 7156 break; 7157 7158 case IFCOUNTER_OBYTES: 7159 count = ha->hw_stats.common.tx_ucast_bytes + 7160 ha->hw_stats.common.tx_mcast_bytes + 7161 ha->hw_stats.common.tx_bcast_bytes; 7162 break; 7163 7164 case IFCOUNTER_IMCASTS: 7165 count = ha->hw_stats.common.rx_mcast_bytes; 7166 break; 7167 7168 case IFCOUNTER_OMCASTS: 7169 count = ha->hw_stats.common.tx_mcast_bytes; 7170 break; 7171 7172 case IFCOUNTER_IQDROPS: 7173 case IFCOUNTER_OQDROPS: 7174 case IFCOUNTER_NOPROTO: 7175 7176 default: 7177 return (if_get_counter_default(ifp, cnt)); 7178 } 7179 return (count); 7180 } 7181 7182 static void 7183 qlnx_timer(void *arg) 7184 { 7185 qlnx_host_t *ha; 7186 7187 ha = (qlnx_host_t *)arg; 7188 7189 if (ha->error_recovery) { 7190 ha->error_recovery = 0; 7191 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 7192 return; 7193 } 7194 7195 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats); 7196 7197 if (ha->storm_stats_gather) 7198 qlnx_sample_storm_stats(ha); 7199 7200 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7201 7202 return; 7203 } 7204 7205 static int 7206 qlnx_load(qlnx_host_t *ha) 7207 { 7208 int i; 7209 int rc = 0; 7210 device_t dev; 7211 7212 dev = ha->pci_dev; 7213 7214 QL_DPRINT2(ha, "enter\n"); 7215 7216 rc = qlnx_alloc_mem_arrays(ha); 7217 if (rc) 7218 goto qlnx_load_exit0; 7219 7220 qlnx_init_fp(ha); 7221 7222 rc = qlnx_alloc_mem_load(ha); 7223 if (rc) 7224 goto qlnx_load_exit1; 7225 7226 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n", 7227 ha->num_rss, ha->num_tc); 7228 7229 for (i = 0; i < ha->num_rss; i++) { 7230 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq, 7231 (INTR_TYPE_NET | INTR_MPSAFE), 7232 NULL, qlnx_fp_isr, &ha->irq_vec[i], 7233 &ha->irq_vec[i].handle))) { 7234 QL_DPRINT1(ha, "could not setup interrupt\n"); 7235 goto qlnx_load_exit2; 7236 } 7237 7238 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \ 7239 irq %p handle %p\n", i, 7240 ha->irq_vec[i].irq_rid, 7241 ha->irq_vec[i].irq, ha->irq_vec[i].handle); 7242 7243 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus)); 7244 } 7245 7246 rc = qlnx_start_queues(ha); 7247 if (rc) 7248 goto qlnx_load_exit2; 7249 7250 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n"); 7251 7252 /* Add primary mac and set Rx filters */ 7253 rc = qlnx_set_rx_mode(ha); 7254 if (rc) 7255 goto qlnx_load_exit2; 7256 7257 /* Ask for link-up using current configuration */ 7258 qlnx_set_link(ha, true); 7259 7260 if (qlnx_vf_device(ha) == 0) 7261 qlnx_link_update(&ha->cdev.hwfns[0]); 7262 7263 ha->state = QLNX_STATE_OPEN; 7264 7265 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats)); 7266 7267 if (ha->flags.callout_init) 7268 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7269 7270 goto qlnx_load_exit0; 7271 7272 qlnx_load_exit2: 7273 qlnx_free_mem_load(ha); 7274 7275 qlnx_load_exit1: 7276 ha->num_rss = 0; 7277 7278 qlnx_load_exit0: 7279 QL_DPRINT2(ha, "exit [%d]\n", rc); 7280 return rc; 7281 } 7282 7283 static void 7284 qlnx_drain_soft_lro(qlnx_host_t *ha) 7285 { 7286 #ifdef QLNX_SOFT_LRO 7287 7288 struct ifnet *ifp; 7289 int i; 7290 7291 ifp = ha->ifp; 7292 7293 if (ifp->if_capenable & IFCAP_LRO) { 7294 for (i = 0; i < ha->num_rss; i++) { 7295 struct qlnx_fastpath *fp = &ha->fp_array[i]; 7296 struct lro_ctrl *lro; 7297 7298 lro = &fp->rxq->lro; 7299 7300 tcp_lro_flush_all(lro); 7301 } 7302 } 7303 7304 #endif /* #ifdef QLNX_SOFT_LRO */ 7305 7306 return; 7307 } 7308 7309 static void 7310 qlnx_unload(qlnx_host_t *ha) 7311 { 7312 struct ecore_dev *cdev; 7313 device_t dev; 7314 int i; 7315 7316 cdev = &ha->cdev; 7317 dev = ha->pci_dev; 7318 7319 QL_DPRINT2(ha, "enter\n"); 7320 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state); 7321 7322 if (ha->state == QLNX_STATE_OPEN) { 7323 qlnx_set_link(ha, false); 7324 qlnx_clean_filters(ha); 7325 qlnx_stop_queues(ha); 7326 ecore_hw_stop_fastpath(cdev); 7327 7328 for (i = 0; i < ha->num_rss; i++) { 7329 if (ha->irq_vec[i].handle) { 7330 (void)bus_teardown_intr(dev, 7331 ha->irq_vec[i].irq, 7332 ha->irq_vec[i].handle); 7333 ha->irq_vec[i].handle = NULL; 7334 } 7335 } 7336 7337 qlnx_drain_fp_taskqueues(ha); 7338 qlnx_drain_soft_lro(ha); 7339 qlnx_free_mem_load(ha); 7340 } 7341 7342 if (ha->flags.callout_init) 7343 callout_drain(&ha->qlnx_callout); 7344 7345 qlnx_mdelay(__func__, 1000); 7346 7347 ha->state = QLNX_STATE_CLOSED; 7348 7349 QL_DPRINT2(ha, "exit\n"); 7350 return; 7351 } 7352 7353 static int 7354 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7355 { 7356 int rval = -1; 7357 struct ecore_hwfn *p_hwfn; 7358 struct ecore_ptt *p_ptt; 7359 7360 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7361 7362 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7363 p_ptt = ecore_ptt_acquire(p_hwfn); 7364 7365 if (!p_ptt) { 7366 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7367 return (rval); 7368 } 7369 7370 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7371 7372 if (rval == DBG_STATUS_OK) 7373 rval = 0; 7374 else { 7375 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed" 7376 "[0x%x]\n", rval); 7377 } 7378 7379 ecore_ptt_release(p_hwfn, p_ptt); 7380 7381 return (rval); 7382 } 7383 7384 static int 7385 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7386 { 7387 int rval = -1; 7388 struct ecore_hwfn *p_hwfn; 7389 struct ecore_ptt *p_ptt; 7390 7391 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7392 7393 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7394 p_ptt = ecore_ptt_acquire(p_hwfn); 7395 7396 if (!p_ptt) { 7397 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7398 return (rval); 7399 } 7400 7401 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7402 7403 if (rval == DBG_STATUS_OK) 7404 rval = 0; 7405 else { 7406 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed" 7407 " [0x%x]\n", rval); 7408 } 7409 7410 ecore_ptt_release(p_hwfn, p_ptt); 7411 7412 return (rval); 7413 } 7414 7415 static void 7416 qlnx_sample_storm_stats(qlnx_host_t *ha) 7417 { 7418 int i, index; 7419 struct ecore_dev *cdev; 7420 qlnx_storm_stats_t *s_stats; 7421 uint32_t reg; 7422 struct ecore_ptt *p_ptt; 7423 struct ecore_hwfn *hwfn; 7424 7425 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) { 7426 ha->storm_stats_gather = 0; 7427 return; 7428 } 7429 7430 cdev = &ha->cdev; 7431 7432 for_each_hwfn(cdev, i) { 7433 hwfn = &cdev->hwfns[i]; 7434 7435 p_ptt = ecore_ptt_acquire(hwfn); 7436 if (!p_ptt) 7437 return; 7438 7439 index = ha->storm_stats_index + 7440 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN); 7441 7442 s_stats = &ha->storm_stats[index]; 7443 7444 /* XSTORM */ 7445 reg = XSEM_REG_FAST_MEMORY + 7446 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7447 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7448 7449 reg = XSEM_REG_FAST_MEMORY + 7450 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7451 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7452 7453 reg = XSEM_REG_FAST_MEMORY + 7454 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7455 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7456 7457 reg = XSEM_REG_FAST_MEMORY + 7458 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7459 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7460 7461 /* YSTORM */ 7462 reg = YSEM_REG_FAST_MEMORY + 7463 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7464 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7465 7466 reg = YSEM_REG_FAST_MEMORY + 7467 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7468 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7469 7470 reg = YSEM_REG_FAST_MEMORY + 7471 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7472 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7473 7474 reg = YSEM_REG_FAST_MEMORY + 7475 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7476 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7477 7478 /* PSTORM */ 7479 reg = PSEM_REG_FAST_MEMORY + 7480 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7481 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7482 7483 reg = PSEM_REG_FAST_MEMORY + 7484 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7485 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7486 7487 reg = PSEM_REG_FAST_MEMORY + 7488 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7489 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7490 7491 reg = PSEM_REG_FAST_MEMORY + 7492 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7493 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7494 7495 /* TSTORM */ 7496 reg = TSEM_REG_FAST_MEMORY + 7497 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7498 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7499 7500 reg = TSEM_REG_FAST_MEMORY + 7501 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7502 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7503 7504 reg = TSEM_REG_FAST_MEMORY + 7505 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7506 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7507 7508 reg = TSEM_REG_FAST_MEMORY + 7509 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7510 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7511 7512 /* MSTORM */ 7513 reg = MSEM_REG_FAST_MEMORY + 7514 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7515 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7516 7517 reg = MSEM_REG_FAST_MEMORY + 7518 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7519 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7520 7521 reg = MSEM_REG_FAST_MEMORY + 7522 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7523 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7524 7525 reg = MSEM_REG_FAST_MEMORY + 7526 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7527 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7528 7529 /* USTORM */ 7530 reg = USEM_REG_FAST_MEMORY + 7531 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7532 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7533 7534 reg = USEM_REG_FAST_MEMORY + 7535 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7536 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7537 7538 reg = USEM_REG_FAST_MEMORY + 7539 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7540 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7541 7542 reg = USEM_REG_FAST_MEMORY + 7543 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7544 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7545 7546 ecore_ptt_release(hwfn, p_ptt); 7547 } 7548 7549 ha->storm_stats_index++; 7550 7551 return; 7552 } 7553 7554 /* 7555 * Name: qlnx_dump_buf8 7556 * Function: dumps a buffer as bytes 7557 */ 7558 static void 7559 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len) 7560 { 7561 device_t dev; 7562 uint32_t i = 0; 7563 uint8_t *buf; 7564 7565 dev = ha->pci_dev; 7566 buf = dbuf; 7567 7568 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len); 7569 7570 while (len >= 16) { 7571 device_printf(dev,"0x%08x:" 7572 " %02x %02x %02x %02x %02x %02x %02x %02x" 7573 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7574 buf[0], buf[1], buf[2], buf[3], 7575 buf[4], buf[5], buf[6], buf[7], 7576 buf[8], buf[9], buf[10], buf[11], 7577 buf[12], buf[13], buf[14], buf[15]); 7578 i += 16; 7579 len -= 16; 7580 buf += 16; 7581 } 7582 switch (len) { 7583 case 1: 7584 device_printf(dev,"0x%08x: %02x\n", i, buf[0]); 7585 break; 7586 case 2: 7587 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]); 7588 break; 7589 case 3: 7590 device_printf(dev,"0x%08x: %02x %02x %02x\n", 7591 i, buf[0], buf[1], buf[2]); 7592 break; 7593 case 4: 7594 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i, 7595 buf[0], buf[1], buf[2], buf[3]); 7596 break; 7597 case 5: 7598 device_printf(dev,"0x%08x:" 7599 " %02x %02x %02x %02x %02x\n", i, 7600 buf[0], buf[1], buf[2], buf[3], buf[4]); 7601 break; 7602 case 6: 7603 device_printf(dev,"0x%08x:" 7604 " %02x %02x %02x %02x %02x %02x\n", i, 7605 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); 7606 break; 7607 case 7: 7608 device_printf(dev,"0x%08x:" 7609 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7610 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); 7611 break; 7612 case 8: 7613 device_printf(dev,"0x%08x:" 7614 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7615 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7616 buf[7]); 7617 break; 7618 case 9: 7619 device_printf(dev,"0x%08x:" 7620 " %02x %02x %02x %02x %02x %02x %02x %02x" 7621 " %02x\n", i, 7622 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7623 buf[7], buf[8]); 7624 break; 7625 case 10: 7626 device_printf(dev,"0x%08x:" 7627 " %02x %02x %02x %02x %02x %02x %02x %02x" 7628 " %02x %02x\n", i, 7629 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7630 buf[7], buf[8], buf[9]); 7631 break; 7632 case 11: 7633 device_printf(dev,"0x%08x:" 7634 " %02x %02x %02x %02x %02x %02x %02x %02x" 7635 " %02x %02x %02x\n", i, 7636 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7637 buf[7], buf[8], buf[9], buf[10]); 7638 break; 7639 case 12: 7640 device_printf(dev,"0x%08x:" 7641 " %02x %02x %02x %02x %02x %02x %02x %02x" 7642 " %02x %02x %02x %02x\n", i, 7643 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7644 buf[7], buf[8], buf[9], buf[10], buf[11]); 7645 break; 7646 case 13: 7647 device_printf(dev,"0x%08x:" 7648 " %02x %02x %02x %02x %02x %02x %02x %02x" 7649 " %02x %02x %02x %02x %02x\n", i, 7650 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7651 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]); 7652 break; 7653 case 14: 7654 device_printf(dev,"0x%08x:" 7655 " %02x %02x %02x %02x %02x %02x %02x %02x" 7656 " %02x %02x %02x %02x %02x %02x\n", i, 7657 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7658 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7659 buf[13]); 7660 break; 7661 case 15: 7662 device_printf(dev,"0x%08x:" 7663 " %02x %02x %02x %02x %02x %02x %02x %02x" 7664 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7665 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7666 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7667 buf[13], buf[14]); 7668 break; 7669 default: 7670 break; 7671 } 7672 7673 device_printf(dev, "%s: %s dump end\n", __func__, msg); 7674 7675 return; 7676 } 7677 7678 #ifdef CONFIG_ECORE_SRIOV 7679 7680 static void 7681 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id) 7682 { 7683 struct ecore_public_vf_info *vf_info; 7684 7685 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false); 7686 7687 if (!vf_info) 7688 return; 7689 7690 /* Clear the VF mac */ 7691 memset(vf_info->forced_mac, 0, ETH_ALEN); 7692 7693 vf_info->forced_vlan = 0; 7694 7695 return; 7696 } 7697 7698 void 7699 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id) 7700 { 7701 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id); 7702 return; 7703 } 7704 7705 static int 7706 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid, 7707 struct ecore_filter_ucast *params) 7708 { 7709 struct ecore_public_vf_info *vf; 7710 7711 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) { 7712 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev), 7713 "VF[%d] vport not initialized\n", vfid); 7714 return ECORE_INVAL; 7715 } 7716 7717 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true); 7718 if (!vf) 7719 return -EINVAL; 7720 7721 /* No real decision to make; Store the configured MAC */ 7722 if (params->type == ECORE_FILTER_MAC || 7723 params->type == ECORE_FILTER_MAC_VLAN) 7724 memcpy(params->mac, vf->forced_mac, ETH_ALEN); 7725 7726 return 0; 7727 } 7728 7729 int 7730 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params) 7731 { 7732 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params)); 7733 } 7734 7735 static int 7736 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid, 7737 struct ecore_sp_vport_update_params *params, uint16_t * tlvs) 7738 { 7739 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) { 7740 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev), 7741 "VF[%d] vport not initialized\n", vfid); 7742 return ECORE_INVAL; 7743 } 7744 7745 /* Untrusted VFs can't even be trusted to know that fact. 7746 * Simply indicate everything is configured fine, and trace 7747 * configuration 'behind their back'. 7748 */ 7749 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM))) 7750 return 0; 7751 7752 return 0; 7753 7754 } 7755 int 7756 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs) 7757 { 7758 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs)); 7759 } 7760 7761 static int 7762 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn) 7763 { 7764 int i; 7765 struct ecore_dev *cdev; 7766 7767 cdev = p_hwfn->p_dev; 7768 7769 for (i = 0; i < cdev->num_hwfns; i++) { 7770 if (&cdev->hwfns[i] == p_hwfn) 7771 break; 7772 } 7773 7774 if (i >= cdev->num_hwfns) 7775 return (-1); 7776 7777 return (i); 7778 } 7779 7780 static int 7781 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id) 7782 { 7783 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 7784 int i; 7785 7786 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n", 7787 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id); 7788 7789 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 7790 return (-1); 7791 7792 if (ha->sriov_task[i].pf_taskqueue != NULL) { 7793 atomic_testandset_32(&ha->sriov_task[i].flags, 7794 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG); 7795 7796 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 7797 &ha->sriov_task[i].pf_task); 7798 } 7799 7800 return (ECORE_SUCCESS); 7801 } 7802 7803 int 7804 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id) 7805 { 7806 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id)); 7807 } 7808 7809 static void 7810 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn) 7811 { 7812 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 7813 int i; 7814 7815 if (!ha->sriov_initialized) 7816 return; 7817 7818 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 7819 ha, p_hwfn->p_dev, p_hwfn); 7820 7821 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 7822 return; 7823 7824 if (ha->sriov_task[i].pf_taskqueue != NULL) { 7825 atomic_testandset_32(&ha->sriov_task[i].flags, 7826 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE); 7827 7828 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 7829 &ha->sriov_task[i].pf_task); 7830 } 7831 7832 return; 7833 } 7834 7835 void 7836 qlnx_vf_flr_update(void *p_hwfn) 7837 { 7838 __qlnx_vf_flr_update(p_hwfn); 7839 7840 return; 7841 } 7842 7843 #ifndef QLNX_VF 7844 7845 static void 7846 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn) 7847 { 7848 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 7849 int i; 7850 7851 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 7852 ha, p_hwfn->p_dev, p_hwfn); 7853 7854 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 7855 return; 7856 7857 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n", 7858 ha, p_hwfn->p_dev, p_hwfn, i); 7859 7860 if (ha->sriov_task[i].pf_taskqueue != NULL) { 7861 atomic_testandset_32(&ha->sriov_task[i].flags, 7862 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE); 7863 7864 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 7865 &ha->sriov_task[i].pf_task); 7866 } 7867 } 7868 7869 static void 7870 qlnx_initialize_sriov(qlnx_host_t *ha) 7871 { 7872 device_t dev; 7873 nvlist_t *pf_schema, *vf_schema; 7874 int iov_error; 7875 7876 dev = ha->pci_dev; 7877 7878 pf_schema = pci_iov_schema_alloc_node(); 7879 vf_schema = pci_iov_schema_alloc_node(); 7880 7881 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 7882 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 7883 IOV_SCHEMA_HASDEFAULT, FALSE); 7884 pci_iov_schema_add_bool(vf_schema, "allow-promisc", 7885 IOV_SCHEMA_HASDEFAULT, FALSE); 7886 pci_iov_schema_add_uint16(vf_schema, "num-queues", 7887 IOV_SCHEMA_HASDEFAULT, 1); 7888 7889 iov_error = pci_iov_attach(dev, pf_schema, vf_schema); 7890 7891 if (iov_error != 0) { 7892 ha->sriov_initialized = 0; 7893 } else { 7894 device_printf(dev, "SRIOV initialized\n"); 7895 ha->sriov_initialized = 1; 7896 } 7897 7898 return; 7899 } 7900 7901 static void 7902 qlnx_sriov_disable(qlnx_host_t *ha) 7903 { 7904 struct ecore_dev *cdev; 7905 int i, j; 7906 7907 cdev = &ha->cdev; 7908 7909 ecore_iov_set_vfs_to_disable(cdev, true); 7910 7911 for_each_hwfn(cdev, i) { 7912 struct ecore_hwfn *hwfn = &cdev->hwfns[i]; 7913 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 7914 7915 if (!ptt) { 7916 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 7917 return; 7918 } 7919 /* Clean WFQ db and configure equal weight for all vports */ 7920 ecore_clean_wfq_db(hwfn, ptt); 7921 7922 ecore_for_each_vf(hwfn, j) { 7923 int k = 0; 7924 7925 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false)) 7926 continue; 7927 7928 if (ecore_iov_is_vf_started(hwfn, j)) { 7929 /* Wait until VF is disabled before releasing */ 7930 7931 for (k = 0; k < 100; k++) { 7932 if (!ecore_iov_is_vf_stopped(hwfn, j)) { 7933 qlnx_mdelay(__func__, 10); 7934 } else 7935 break; 7936 } 7937 } 7938 7939 if (k < 100) 7940 ecore_iov_release_hw_for_vf(&cdev->hwfns[i], 7941 ptt, j); 7942 else { 7943 QL_DPRINT1(ha, 7944 "Timeout waiting for VF's FLR to end\n"); 7945 } 7946 } 7947 ecore_ptt_release(hwfn, ptt); 7948 } 7949 7950 ecore_iov_set_vfs_to_disable(cdev, false); 7951 7952 return; 7953 } 7954 7955 static void 7956 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid, 7957 struct ecore_iov_vf_init_params *params) 7958 { 7959 u16 base, i; 7960 7961 /* Since we have an equal resource distribution per-VF, and we assume 7962 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting 7963 * sequentially from there. 7964 */ 7965 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues; 7966 7967 params->rel_vf_id = vfid; 7968 7969 for (i = 0; i < params->num_queues; i++) { 7970 params->req_rx_queue[i] = base + i; 7971 params->req_tx_queue[i] = base + i; 7972 } 7973 7974 /* PF uses indices 0 for itself; Set vport/RSS afterwards */ 7975 params->vport_id = vfid + 1; 7976 params->rss_eng_id = vfid + 1; 7977 7978 return; 7979 } 7980 7981 static int 7982 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params) 7983 { 7984 qlnx_host_t *ha; 7985 struct ecore_dev *cdev; 7986 struct ecore_iov_vf_init_params params; 7987 int ret, j, i; 7988 uint32_t max_vfs; 7989 7990 if ((ha = device_get_softc(dev)) == NULL) { 7991 device_printf(dev, "%s: cannot get softc\n", __func__); 7992 return (-1); 7993 } 7994 7995 if (qlnx_create_pf_taskqueues(ha) != 0) 7996 goto qlnx_iov_init_err0; 7997 7998 cdev = &ha->cdev; 7999 8000 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT); 8001 8002 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n", 8003 dev, num_vfs, max_vfs); 8004 8005 if (num_vfs >= max_vfs) { 8006 QL_DPRINT1(ha, "Can start at most %d VFs\n", 8007 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1)); 8008 goto qlnx_iov_init_err0; 8009 } 8010 8011 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF, 8012 M_NOWAIT); 8013 8014 if (ha->vf_attr == NULL) 8015 goto qlnx_iov_init_err0; 8016 8017 memset(¶ms, 0, sizeof(params)); 8018 8019 /* Initialize HW for VF access */ 8020 for_each_hwfn(cdev, j) { 8021 struct ecore_hwfn *hwfn = &cdev->hwfns[j]; 8022 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 8023 8024 /* Make sure not to use more than 16 queues per VF */ 8025 params.num_queues = min_t(int, 8026 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs), 8027 16); 8028 8029 if (!ptt) { 8030 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 8031 goto qlnx_iov_init_err1; 8032 } 8033 8034 for (i = 0; i < num_vfs; i++) { 8035 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true)) 8036 continue; 8037 8038 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms); 8039 8040 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms); 8041 8042 if (ret) { 8043 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i); 8044 ecore_ptt_release(hwfn, ptt); 8045 goto qlnx_iov_init_err1; 8046 } 8047 } 8048 8049 ecore_ptt_release(hwfn, ptt); 8050 } 8051 8052 ha->num_vfs = num_vfs; 8053 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha); 8054 8055 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs); 8056 8057 return (0); 8058 8059 qlnx_iov_init_err1: 8060 qlnx_sriov_disable(ha); 8061 8062 qlnx_iov_init_err0: 8063 qlnx_destroy_pf_taskqueues(ha); 8064 ha->num_vfs = 0; 8065 8066 return (-1); 8067 } 8068 8069 static void 8070 qlnx_iov_uninit(device_t dev) 8071 { 8072 qlnx_host_t *ha; 8073 8074 if ((ha = device_get_softc(dev)) == NULL) { 8075 device_printf(dev, "%s: cannot get softc\n", __func__); 8076 return; 8077 } 8078 8079 QL_DPRINT2(ha," dev = %p enter\n", dev); 8080 8081 qlnx_sriov_disable(ha); 8082 qlnx_destroy_pf_taskqueues(ha); 8083 8084 free(ha->vf_attr, M_QLNXBUF); 8085 ha->vf_attr = NULL; 8086 8087 ha->num_vfs = 0; 8088 8089 QL_DPRINT2(ha," dev = %p exit\n", dev); 8090 return; 8091 } 8092 8093 static int 8094 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 8095 { 8096 qlnx_host_t *ha; 8097 qlnx_vf_attr_t *vf_attr; 8098 unsigned const char *mac; 8099 size_t size; 8100 struct ecore_hwfn *p_hwfn; 8101 8102 if ((ha = device_get_softc(dev)) == NULL) { 8103 device_printf(dev, "%s: cannot get softc\n", __func__); 8104 return (-1); 8105 } 8106 8107 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum); 8108 8109 if (vfnum > (ha->num_vfs - 1)) { 8110 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n", 8111 vfnum, (ha->num_vfs - 1)); 8112 } 8113 8114 vf_attr = &ha->vf_attr[vfnum]; 8115 8116 if (nvlist_exists_binary(params, "mac-addr")) { 8117 mac = nvlist_get_binary(params, "mac-addr", &size); 8118 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN); 8119 device_printf(dev, 8120 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", 8121 __func__, vf_attr->mac_addr[0], 8122 vf_attr->mac_addr[1], vf_attr->mac_addr[2], 8123 vf_attr->mac_addr[3], vf_attr->mac_addr[4], 8124 vf_attr->mac_addr[5]); 8125 p_hwfn = &ha->cdev.hwfns[0]; 8126 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr, 8127 vfnum); 8128 } 8129 8130 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum); 8131 return (0); 8132 } 8133 8134 static void 8135 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8136 { 8137 uint64_t events[ECORE_VF_ARRAY_LENGTH]; 8138 struct ecore_ptt *ptt; 8139 int i; 8140 8141 ptt = ecore_ptt_acquire(p_hwfn); 8142 if (!ptt) { 8143 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8144 __qlnx_pf_vf_msg(p_hwfn, 0); 8145 return; 8146 } 8147 8148 ecore_iov_pf_get_pending_events(p_hwfn, events); 8149 8150 QL_DPRINT2(ha, "Event mask of VF events:" 8151 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n", 8152 events[0], events[1], events[2]); 8153 8154 ecore_for_each_vf(p_hwfn, i) { 8155 /* Skip VFs with no pending messages */ 8156 if (!(events[i / 64] & (1ULL << (i % 64)))) 8157 continue; 8158 8159 QL_DPRINT2(ha, 8160 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", 8161 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i); 8162 8163 /* Copy VF's message to PF's request buffer for that VF */ 8164 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i)) 8165 continue; 8166 8167 ecore_iov_process_mbx_req(p_hwfn, ptt, i); 8168 } 8169 8170 ecore_ptt_release(p_hwfn, ptt); 8171 8172 return; 8173 } 8174 8175 static void 8176 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8177 { 8178 struct ecore_ptt *ptt; 8179 int ret; 8180 8181 ptt = ecore_ptt_acquire(p_hwfn); 8182 8183 if (!ptt) { 8184 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8185 __qlnx_vf_flr_update(p_hwfn); 8186 return; 8187 } 8188 8189 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt); 8190 8191 if (ret) { 8192 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n"); 8193 } 8194 8195 ecore_ptt_release(p_hwfn, ptt); 8196 8197 return; 8198 } 8199 8200 static void 8201 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8202 { 8203 struct ecore_ptt *ptt; 8204 int i; 8205 8206 ptt = ecore_ptt_acquire(p_hwfn); 8207 8208 if (!ptt) { 8209 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8210 qlnx_vf_bulleting_update(p_hwfn); 8211 return; 8212 } 8213 8214 ecore_for_each_vf(p_hwfn, i) { 8215 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n", 8216 p_hwfn, i); 8217 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt); 8218 } 8219 8220 ecore_ptt_release(p_hwfn, ptt); 8221 8222 return; 8223 } 8224 8225 static void 8226 qlnx_pf_taskqueue(void *context, int pending) 8227 { 8228 struct ecore_hwfn *p_hwfn; 8229 qlnx_host_t *ha; 8230 int i; 8231 8232 p_hwfn = context; 8233 8234 if (p_hwfn == NULL) 8235 return; 8236 8237 ha = (qlnx_host_t *)(p_hwfn->p_dev); 8238 8239 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8240 return; 8241 8242 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8243 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG)) 8244 qlnx_handle_vf_msg(ha, p_hwfn); 8245 8246 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8247 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE)) 8248 qlnx_handle_vf_flr_update(ha, p_hwfn); 8249 8250 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8251 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE)) 8252 qlnx_handle_bulletin_update(ha, p_hwfn); 8253 8254 return; 8255 } 8256 8257 static int 8258 qlnx_create_pf_taskqueues(qlnx_host_t *ha) 8259 { 8260 int i; 8261 uint8_t tq_name[32]; 8262 8263 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8264 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 8265 8266 bzero(tq_name, sizeof (tq_name)); 8267 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i); 8268 8269 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn); 8270 8271 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 8272 taskqueue_thread_enqueue, 8273 &ha->sriov_task[i].pf_taskqueue); 8274 8275 if (ha->sriov_task[i].pf_taskqueue == NULL) 8276 return (-1); 8277 8278 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1, 8279 PI_NET, "%s", tq_name); 8280 8281 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue); 8282 } 8283 8284 return (0); 8285 } 8286 8287 static void 8288 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha) 8289 { 8290 int i; 8291 8292 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8293 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8294 taskqueue_drain(ha->sriov_task[i].pf_taskqueue, 8295 &ha->sriov_task[i].pf_task); 8296 taskqueue_free(ha->sriov_task[i].pf_taskqueue); 8297 ha->sriov_task[i].pf_taskqueue = NULL; 8298 } 8299 } 8300 return; 8301 } 8302 8303 static void 8304 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha) 8305 { 8306 struct ecore_mcp_link_capabilities caps; 8307 struct ecore_mcp_link_params params; 8308 struct ecore_mcp_link_state link; 8309 int i; 8310 8311 if (!p_hwfn->pf_iov_info) 8312 return; 8313 8314 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params)); 8315 memset(&link, 0, sizeof(struct ecore_mcp_link_state)); 8316 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities)); 8317 8318 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 8319 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link)); 8320 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params)); 8321 8322 QL_DPRINT2(ha, "called\n"); 8323 8324 /* Update bulletin of all future possible VFs with link configuration */ 8325 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) { 8326 /* Modify link according to the VF's configured link state */ 8327 8328 link.link_up = false; 8329 8330 if (ha->link_up) { 8331 link.link_up = true; 8332 /* Set speed according to maximum supported by HW. 8333 * that is 40G for regular devices and 100G for CMT 8334 * mode devices. 8335 */ 8336 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ? 8337 100000 : link.speed; 8338 } 8339 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up); 8340 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps); 8341 } 8342 8343 qlnx_vf_bulleting_update(p_hwfn); 8344 8345 return; 8346 } 8347 #endif /* #ifndef QLNX_VF */ 8348 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 8349