1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File: qlnx_os.c 30 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "qlnx_os.h" 37 #include "bcm_osal.h" 38 #include "reg_addr.h" 39 #include "ecore_gtt_reg_addr.h" 40 #include "ecore.h" 41 #include "ecore_chain.h" 42 #include "ecore_status.h" 43 #include "ecore_hw.h" 44 #include "ecore_rt_defs.h" 45 #include "ecore_init_ops.h" 46 #include "ecore_int.h" 47 #include "ecore_cxt.h" 48 #include "ecore_spq.h" 49 #include "ecore_init_fw_funcs.h" 50 #include "ecore_sp_commands.h" 51 #include "ecore_dev_api.h" 52 #include "ecore_l2_api.h" 53 #include "ecore_mcp.h" 54 #include "ecore_hw_defs.h" 55 #include "mcp_public.h" 56 #include "ecore_iro.h" 57 #include "nvm_cfg.h" 58 #include "ecore_dbg_fw_funcs.h" 59 #include "ecore_iov_api.h" 60 #include "ecore_vf_api.h" 61 62 #include "qlnx_ioctl.h" 63 #include "qlnx_def.h" 64 #include "qlnx_ver.h" 65 66 #ifdef QLNX_ENABLE_IWARP 67 #include "qlnx_rdma.h" 68 #endif /* #ifdef QLNX_ENABLE_IWARP */ 69 70 #ifdef CONFIG_ECORE_SRIOV 71 #include <sys/nv.h> 72 #include <sys/iov_schema.h> 73 #include <dev/pci/pci_iov.h> 74 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 75 76 #include <sys/smp.h> 77 78 /* 79 * static functions 80 */ 81 /* 82 * ioctl related functions 83 */ 84 static void qlnx_add_sysctls(qlnx_host_t *ha); 85 86 /* 87 * main driver 88 */ 89 static void qlnx_release(qlnx_host_t *ha); 90 static void qlnx_fp_isr(void *arg); 91 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha); 92 static void qlnx_init(void *arg); 93 static void qlnx_init_locked(qlnx_host_t *ha); 94 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi); 95 static int qlnx_set_promisc(qlnx_host_t *ha); 96 static int qlnx_set_allmulti(qlnx_host_t *ha); 97 static int qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data); 98 static int qlnx_media_change(if_t ifp); 99 static void qlnx_media_status(if_t ifp, struct ifmediareq *ifmr); 100 static void qlnx_stop(qlnx_host_t *ha); 101 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, 102 struct mbuf **m_headp); 103 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha); 104 static uint32_t qlnx_get_optics(qlnx_host_t *ha, 105 struct qlnx_link_output *if_link); 106 static int qlnx_transmit(if_t ifp, struct mbuf *mp); 107 static int qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, 108 struct mbuf *mp); 109 static void qlnx_qflush(if_t ifp); 110 111 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha); 112 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha); 113 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha); 114 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha); 115 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha); 116 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha); 117 118 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver); 119 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size); 120 121 static int qlnx_nic_setup(struct ecore_dev *cdev, 122 struct ecore_pf_params *func_params); 123 static int qlnx_nic_start(struct ecore_dev *cdev); 124 static int qlnx_slowpath_start(qlnx_host_t *ha); 125 static int qlnx_slowpath_stop(qlnx_host_t *ha); 126 static int qlnx_init_hw(qlnx_host_t *ha); 127 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 128 char ver_str[VER_SIZE]); 129 static void qlnx_unload(qlnx_host_t *ha); 130 static int qlnx_load(qlnx_host_t *ha); 131 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 132 uint32_t add_mac); 133 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, 134 uint32_t len); 135 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq); 136 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq); 137 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, 138 struct qlnx_rx_queue *rxq); 139 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter); 140 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, 141 int hwfn_index); 142 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, 143 int hwfn_index); 144 static void qlnx_timer(void *arg); 145 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 146 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 147 static void qlnx_trigger_dump(qlnx_host_t *ha); 148 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 149 struct qlnx_tx_queue *txq); 150 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 151 struct qlnx_tx_queue *txq); 152 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 153 int lro_enable); 154 static void qlnx_fp_taskqueue(void *context, int pending); 155 static void qlnx_sample_storm_stats(qlnx_host_t *ha); 156 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 157 struct qlnx_agg_info *tpa); 158 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa); 159 160 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt); 161 162 /* 163 * Hooks to the Operating Systems 164 */ 165 static int qlnx_pci_probe (device_t); 166 static int qlnx_pci_attach (device_t); 167 static int qlnx_pci_detach (device_t); 168 169 #ifndef QLNX_VF 170 171 #ifdef CONFIG_ECORE_SRIOV 172 173 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params); 174 static void qlnx_iov_uninit(device_t dev); 175 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params); 176 static void qlnx_initialize_sriov(qlnx_host_t *ha); 177 static void qlnx_pf_taskqueue(void *context, int pending); 178 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha); 179 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha); 180 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha); 181 182 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 183 184 static device_method_t qlnx_pci_methods[] = { 185 /* Device interface */ 186 DEVMETHOD(device_probe, qlnx_pci_probe), 187 DEVMETHOD(device_attach, qlnx_pci_attach), 188 DEVMETHOD(device_detach, qlnx_pci_detach), 189 190 #ifdef CONFIG_ECORE_SRIOV 191 DEVMETHOD(pci_iov_init, qlnx_iov_init), 192 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit), 193 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf), 194 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 195 { 0, 0 } 196 }; 197 198 static driver_t qlnx_pci_driver = { 199 "ql", qlnx_pci_methods, sizeof (qlnx_host_t), 200 }; 201 202 MODULE_VERSION(if_qlnxe,1); 203 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, 0, 0); 204 205 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1); 206 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1); 207 208 #else 209 210 static device_method_t qlnxv_pci_methods[] = { 211 /* Device interface */ 212 DEVMETHOD(device_probe, qlnx_pci_probe), 213 DEVMETHOD(device_attach, qlnx_pci_attach), 214 DEVMETHOD(device_detach, qlnx_pci_detach), 215 { 0, 0 } 216 }; 217 218 static driver_t qlnxv_pci_driver = { 219 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t), 220 }; 221 222 MODULE_VERSION(if_qlnxev,1); 223 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, 0, 0); 224 225 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1); 226 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1); 227 228 #endif /* #ifdef QLNX_VF */ 229 230 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver"); 231 232 char qlnx_dev_str[128]; 233 char qlnx_ver_str[VER_SIZE]; 234 char qlnx_name_str[NAME_SIZE]; 235 236 /* 237 * Some PCI Configuration Space Related Defines 238 */ 239 240 #ifndef PCI_VENDOR_QLOGIC 241 #define PCI_VENDOR_QLOGIC 0x1077 242 #endif 243 244 /* 40G Adapter QLE45xxx*/ 245 #ifndef QLOGIC_PCI_DEVICE_ID_1634 246 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634 247 #endif 248 249 /* 100G Adapter QLE45xxx*/ 250 #ifndef QLOGIC_PCI_DEVICE_ID_1644 251 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644 252 #endif 253 254 /* 25G Adapter QLE45xxx*/ 255 #ifndef QLOGIC_PCI_DEVICE_ID_1656 256 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656 257 #endif 258 259 /* 50G Adapter QLE45xxx*/ 260 #ifndef QLOGIC_PCI_DEVICE_ID_1654 261 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654 262 #endif 263 264 /* 10G/25G/40G Adapter QLE41xxx*/ 265 #ifndef QLOGIC_PCI_DEVICE_ID_8070 266 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070 267 #endif 268 269 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/ 270 #ifndef QLOGIC_PCI_DEVICE_ID_8090 271 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090 272 #endif 273 274 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 275 "qlnxe driver parameters"); 276 277 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */ 278 static int qlnxe_queue_count = QLNX_DEFAULT_RSS; 279 280 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 281 &qlnxe_queue_count, 0, "Multi-Queue queue count"); 282 283 /* 284 * Note on RDMA personality setting 285 * 286 * Read the personality configured in NVRAM 287 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and 288 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT 289 * use the personality in NVRAM. 290 291 * Otherwise use t the personality configured in sysctl. 292 * 293 */ 294 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */ 295 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */ 296 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */ 297 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */ 298 #define QLNX_PERSONALITY_BITS_PER_FUNC 4 299 #define QLNX_PERSONALIY_MASK 0xF 300 301 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/ 302 static uint64_t qlnxe_rdma_configuration = 0x22222222; 303 304 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN, 305 &qlnxe_rdma_configuration, 0, "RDMA Configuration"); 306 307 int 308 qlnx_vf_device(qlnx_host_t *ha) 309 { 310 uint16_t device_id; 311 312 device_id = ha->device_id; 313 314 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 315 return 0; 316 317 return -1; 318 } 319 320 static int 321 qlnx_valid_device(qlnx_host_t *ha) 322 { 323 uint16_t device_id; 324 325 device_id = ha->device_id; 326 327 #ifndef QLNX_VF 328 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 329 (device_id == QLOGIC_PCI_DEVICE_ID_1644) || 330 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 331 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 332 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 333 return 0; 334 #else 335 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 336 return 0; 337 338 #endif /* #ifndef QLNX_VF */ 339 return -1; 340 } 341 342 #ifdef QLNX_ENABLE_IWARP 343 static int 344 qlnx_rdma_supported(struct qlnx_host *ha) 345 { 346 uint16_t device_id; 347 348 device_id = pci_get_device(ha->pci_dev); 349 350 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 351 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 352 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 353 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 354 return (0); 355 356 return (-1); 357 } 358 #endif /* #ifdef QLNX_ENABLE_IWARP */ 359 360 /* 361 * Name: qlnx_pci_probe 362 * Function: Validate the PCI device to be a QLA80XX device 363 */ 364 static int 365 qlnx_pci_probe(device_t dev) 366 { 367 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d", 368 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); 369 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx"); 370 371 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) { 372 return (ENXIO); 373 } 374 375 switch (pci_get_device(dev)) { 376 #ifndef QLNX_VF 377 378 case QLOGIC_PCI_DEVICE_ID_1644: 379 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 380 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function", 381 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 382 QLNX_VERSION_BUILD); 383 device_set_desc_copy(dev, qlnx_dev_str); 384 385 break; 386 387 case QLOGIC_PCI_DEVICE_ID_1634: 388 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 389 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function", 390 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 391 QLNX_VERSION_BUILD); 392 device_set_desc_copy(dev, qlnx_dev_str); 393 394 break; 395 396 case QLOGIC_PCI_DEVICE_ID_1656: 397 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 398 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function", 399 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 400 QLNX_VERSION_BUILD); 401 device_set_desc_copy(dev, qlnx_dev_str); 402 403 break; 404 405 case QLOGIC_PCI_DEVICE_ID_1654: 406 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 407 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function", 408 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 409 QLNX_VERSION_BUILD); 410 device_set_desc_copy(dev, qlnx_dev_str); 411 412 break; 413 414 case QLOGIC_PCI_DEVICE_ID_8070: 415 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 416 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)" 417 " Adapter-Ethernet Function", 418 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 419 QLNX_VERSION_BUILD); 420 device_set_desc_copy(dev, qlnx_dev_str); 421 422 break; 423 424 #else 425 case QLOGIC_PCI_DEVICE_ID_8090: 426 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 427 "Qlogic SRIOV PCI CNA (AH) " 428 "Adapter-Ethernet Function", 429 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 430 QLNX_VERSION_BUILD); 431 device_set_desc_copy(dev, qlnx_dev_str); 432 433 break; 434 435 #endif /* #ifndef QLNX_VF */ 436 437 default: 438 return (ENXIO); 439 } 440 441 #ifdef QLNX_ENABLE_IWARP 442 qlnx_rdma_init(); 443 #endif /* #ifdef QLNX_ENABLE_IWARP */ 444 445 return (BUS_PROBE_DEFAULT); 446 } 447 448 static uint16_t 449 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 450 struct qlnx_tx_queue *txq) 451 { 452 u16 hw_bd_cons; 453 u16 ecore_cons_idx; 454 455 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 456 457 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl); 458 459 return (hw_bd_cons - ecore_cons_idx); 460 } 461 462 static void 463 qlnx_sp_intr(void *arg) 464 { 465 struct ecore_hwfn *p_hwfn; 466 qlnx_host_t *ha; 467 int i; 468 469 p_hwfn = arg; 470 471 if (p_hwfn == NULL) { 472 printf("%s: spurious slowpath intr\n", __func__); 473 return; 474 } 475 476 ha = (qlnx_host_t *)p_hwfn->p_dev; 477 478 QL_DPRINT2(ha, "enter\n"); 479 480 for (i = 0; i < ha->cdev.num_hwfns; i++) { 481 if (&ha->cdev.hwfns[i] == p_hwfn) { 482 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]); 483 break; 484 } 485 } 486 QL_DPRINT2(ha, "exit\n"); 487 488 return; 489 } 490 491 static void 492 qlnx_sp_taskqueue(void *context, int pending) 493 { 494 struct ecore_hwfn *p_hwfn; 495 496 p_hwfn = context; 497 498 if (p_hwfn != NULL) { 499 qlnx_sp_isr(p_hwfn); 500 } 501 return; 502 } 503 504 static int 505 qlnx_create_sp_taskqueues(qlnx_host_t *ha) 506 { 507 int i; 508 uint8_t tq_name[32]; 509 510 for (i = 0; i < ha->cdev.num_hwfns; i++) { 511 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 512 513 bzero(tq_name, sizeof (tq_name)); 514 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i); 515 516 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn); 517 518 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT, 519 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]); 520 521 if (ha->sp_taskqueue[i] == NULL) 522 return (-1); 523 524 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s", 525 tq_name); 526 527 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]); 528 } 529 530 return (0); 531 } 532 533 static void 534 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha) 535 { 536 int i; 537 538 for (i = 0; i < ha->cdev.num_hwfns; i++) { 539 if (ha->sp_taskqueue[i] != NULL) { 540 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]); 541 taskqueue_free(ha->sp_taskqueue[i]); 542 } 543 } 544 return; 545 } 546 547 static void 548 qlnx_fp_taskqueue(void *context, int pending) 549 { 550 struct qlnx_fastpath *fp; 551 qlnx_host_t *ha; 552 if_t ifp; 553 554 fp = context; 555 556 if (fp == NULL) 557 return; 558 559 ha = (qlnx_host_t *)fp->edev; 560 561 ifp = ha->ifp; 562 563 if(if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 564 if (!drbr_empty(ifp, fp->tx_br)) { 565 if(mtx_trylock(&fp->tx_mtx)) { 566 #ifdef QLNX_TRACE_PERF_DATA 567 tx_pkts = fp->tx_pkts_transmitted; 568 tx_compl = fp->tx_pkts_completed; 569 #endif 570 571 qlnx_transmit_locked(ifp, fp, NULL); 572 573 #ifdef QLNX_TRACE_PERF_DATA 574 fp->tx_pkts_trans_fp += 575 (fp->tx_pkts_transmitted - tx_pkts); 576 fp->tx_pkts_compl_fp += 577 (fp->tx_pkts_completed - tx_compl); 578 #endif 579 mtx_unlock(&fp->tx_mtx); 580 } 581 } 582 } 583 584 QL_DPRINT2(ha, "exit \n"); 585 return; 586 } 587 588 static int 589 qlnx_create_fp_taskqueues(qlnx_host_t *ha) 590 { 591 int i; 592 uint8_t tq_name[32]; 593 struct qlnx_fastpath *fp; 594 595 for (i = 0; i < ha->num_rss; i++) { 596 fp = &ha->fp_array[i]; 597 598 bzero(tq_name, sizeof (tq_name)); 599 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 600 601 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp); 602 603 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 604 taskqueue_thread_enqueue, 605 &fp->fp_taskqueue); 606 607 if (fp->fp_taskqueue == NULL) 608 return (-1); 609 610 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 611 tq_name); 612 613 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue); 614 } 615 616 return (0); 617 } 618 619 static void 620 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha) 621 { 622 int i; 623 struct qlnx_fastpath *fp; 624 625 for (i = 0; i < ha->num_rss; i++) { 626 fp = &ha->fp_array[i]; 627 628 if (fp->fp_taskqueue != NULL) { 629 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 630 taskqueue_free(fp->fp_taskqueue); 631 fp->fp_taskqueue = NULL; 632 } 633 } 634 return; 635 } 636 637 static void 638 qlnx_drain_fp_taskqueues(qlnx_host_t *ha) 639 { 640 int i; 641 struct qlnx_fastpath *fp; 642 643 for (i = 0; i < ha->num_rss; i++) { 644 fp = &ha->fp_array[i]; 645 646 if (fp->fp_taskqueue != NULL) { 647 QLNX_UNLOCK(ha); 648 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 649 QLNX_LOCK(ha); 650 } 651 } 652 return; 653 } 654 655 static void 656 qlnx_get_params(qlnx_host_t *ha) 657 { 658 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) { 659 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n", 660 qlnxe_queue_count); 661 qlnxe_queue_count = 0; 662 } 663 return; 664 } 665 666 static void 667 qlnx_error_recovery_taskqueue(void *context, int pending) 668 { 669 qlnx_host_t *ha; 670 671 ha = context; 672 673 QL_DPRINT2(ha, "enter\n"); 674 675 QLNX_LOCK(ha); 676 qlnx_stop(ha); 677 QLNX_UNLOCK(ha); 678 679 #ifdef QLNX_ENABLE_IWARP 680 qlnx_rdma_dev_remove(ha); 681 #endif /* #ifdef QLNX_ENABLE_IWARP */ 682 683 qlnx_slowpath_stop(ha); 684 qlnx_slowpath_start(ha); 685 686 #ifdef QLNX_ENABLE_IWARP 687 qlnx_rdma_dev_add(ha); 688 #endif /* #ifdef QLNX_ENABLE_IWARP */ 689 690 qlnx_init(ha); 691 692 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 693 694 QL_DPRINT2(ha, "exit\n"); 695 696 return; 697 } 698 699 static int 700 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha) 701 { 702 uint8_t tq_name[32]; 703 704 bzero(tq_name, sizeof (tq_name)); 705 snprintf(tq_name, sizeof (tq_name), "ql_err_tq"); 706 707 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha); 708 709 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 710 taskqueue_thread_enqueue, &ha->err_taskqueue); 711 712 if (ha->err_taskqueue == NULL) 713 return (-1); 714 715 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name); 716 717 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue); 718 719 return (0); 720 } 721 722 static void 723 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha) 724 { 725 if (ha->err_taskqueue != NULL) { 726 taskqueue_drain(ha->err_taskqueue, &ha->err_task); 727 taskqueue_free(ha->err_taskqueue); 728 } 729 730 ha->err_taskqueue = NULL; 731 732 return; 733 } 734 735 /* 736 * Name: qlnx_pci_attach 737 * Function: attaches the device to the operating system 738 */ 739 static int 740 qlnx_pci_attach(device_t dev) 741 { 742 qlnx_host_t *ha = NULL; 743 uint32_t rsrc_len_reg __unused = 0; 744 uint32_t rsrc_len_dbells = 0; 745 uint32_t rsrc_len_msix __unused = 0; 746 int i; 747 uint32_t mfw_ver; 748 uint32_t num_sp_msix = 0; 749 uint32_t num_rdma_irqs = 0; 750 751 if ((ha = device_get_softc(dev)) == NULL) { 752 device_printf(dev, "cannot get softc\n"); 753 return (ENOMEM); 754 } 755 756 memset(ha, 0, sizeof (qlnx_host_t)); 757 758 ha->device_id = pci_get_device(dev); 759 760 if (qlnx_valid_device(ha) != 0) { 761 device_printf(dev, "device is not valid device\n"); 762 return (ENXIO); 763 } 764 ha->pci_func = pci_get_function(dev); 765 766 ha->pci_dev = dev; 767 768 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 769 770 ha->flags.lock_init = 1; 771 772 pci_enable_busmaster(dev); 773 774 /* 775 * map the PCI BARs 776 */ 777 778 ha->reg_rid = PCIR_BAR(0); 779 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 780 RF_ACTIVE); 781 782 if (ha->pci_reg == NULL) { 783 device_printf(dev, "unable to map BAR0\n"); 784 goto qlnx_pci_attach_err; 785 } 786 787 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 788 ha->reg_rid); 789 790 ha->dbells_rid = PCIR_BAR(2); 791 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, 792 SYS_RES_MEMORY, 793 ha->dbells_rid); 794 if (rsrc_len_dbells) { 795 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 796 &ha->dbells_rid, RF_ACTIVE); 797 798 if (ha->pci_dbells == NULL) { 799 device_printf(dev, "unable to map BAR1\n"); 800 goto qlnx_pci_attach_err; 801 } 802 ha->dbells_phys_addr = (uint64_t) 803 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid); 804 805 ha->dbells_size = rsrc_len_dbells; 806 } else { 807 if (qlnx_vf_device(ha) != 0) { 808 device_printf(dev, " BAR1 size is zero\n"); 809 goto qlnx_pci_attach_err; 810 } 811 } 812 813 ha->msix_rid = PCIR_BAR(4); 814 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 815 &ha->msix_rid, RF_ACTIVE); 816 817 if (ha->msix_bar == NULL) { 818 device_printf(dev, "unable to map BAR2\n"); 819 goto qlnx_pci_attach_err; 820 } 821 822 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 823 ha->msix_rid); 824 825 ha->dbg_level = 0x0000; 826 827 QL_DPRINT1(ha, "\n\t\t\t" 828 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 829 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 830 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 831 " msix_avail = 0x%x " 832 "\n\t\t\t[ncpus = %d]\n", 833 ha->pci_dev, ha->pci_reg, rsrc_len_reg, 834 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 835 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 836 mp_ncpus); 837 /* 838 * allocate dma tags 839 */ 840 841 if (qlnx_alloc_parent_dma_tag(ha)) 842 goto qlnx_pci_attach_err; 843 844 if (qlnx_alloc_tx_dma_tag(ha)) 845 goto qlnx_pci_attach_err; 846 847 if (qlnx_alloc_rx_dma_tag(ha)) 848 goto qlnx_pci_attach_err; 849 850 851 if (qlnx_init_hw(ha) != 0) 852 goto qlnx_pci_attach_err; 853 854 ha->flags.hw_init = 1; 855 856 qlnx_get_params(ha); 857 858 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) && 859 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) { 860 qlnxe_queue_count = QLNX_MAX_RSS; 861 } 862 863 /* 864 * Allocate MSI-x vectors 865 */ 866 if (qlnx_vf_device(ha) != 0) { 867 if (qlnxe_queue_count == 0) 868 ha->num_rss = QLNX_DEFAULT_RSS; 869 else 870 ha->num_rss = qlnxe_queue_count; 871 872 num_sp_msix = ha->cdev.num_hwfns; 873 } else { 874 uint8_t max_rxq; 875 uint8_t max_txq; 876 877 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq); 878 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq); 879 880 if (max_rxq < max_txq) 881 ha->num_rss = max_rxq; 882 else 883 ha->num_rss = max_txq; 884 885 if (ha->num_rss > QLNX_MAX_VF_RSS) 886 ha->num_rss = QLNX_MAX_VF_RSS; 887 888 num_sp_msix = 0; 889 } 890 891 if (ha->num_rss > mp_ncpus) 892 ha->num_rss = mp_ncpus; 893 894 ha->num_tc = QLNX_MAX_TC; 895 896 ha->msix_count = pci_msix_count(dev); 897 898 #ifdef QLNX_ENABLE_IWARP 899 900 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha); 901 902 #endif /* #ifdef QLNX_ENABLE_IWARP */ 903 904 if (!ha->msix_count || 905 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) { 906 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 907 ha->msix_count); 908 goto qlnx_pci_attach_err; 909 } 910 911 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs)) 912 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs; 913 else 914 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs); 915 916 QL_DPRINT1(ha, "\n\t\t\t" 917 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 918 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 919 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 920 " msix_avail = 0x%x msix_alloc = 0x%x" 921 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n", 922 ha->pci_reg, rsrc_len_reg, 923 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 924 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 925 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc); 926 927 if (pci_alloc_msix(dev, &ha->msix_count)) { 928 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__, 929 ha->msix_count); 930 ha->msix_count = 0; 931 goto qlnx_pci_attach_err; 932 } 933 934 /* 935 * Initialize slow path interrupt and task queue 936 */ 937 938 if (num_sp_msix) { 939 if (qlnx_create_sp_taskqueues(ha) != 0) 940 goto qlnx_pci_attach_err; 941 942 for (i = 0; i < ha->cdev.num_hwfns; i++) { 943 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 944 945 ha->sp_irq_rid[i] = i + 1; 946 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, 947 &ha->sp_irq_rid[i], 948 (RF_ACTIVE | RF_SHAREABLE)); 949 if (ha->sp_irq[i] == NULL) { 950 device_printf(dev, 951 "could not allocate mbx interrupt\n"); 952 goto qlnx_pci_attach_err; 953 } 954 955 if (bus_setup_intr(dev, ha->sp_irq[i], 956 (INTR_TYPE_NET | INTR_MPSAFE), NULL, 957 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) { 958 device_printf(dev, 959 "could not setup slow path interrupt\n"); 960 goto qlnx_pci_attach_err; 961 } 962 963 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d" 964 " sp_irq %p sp_handle %p\n", p_hwfn, 965 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]); 966 } 967 } 968 969 /* 970 * initialize fast path interrupt 971 */ 972 if (qlnx_create_fp_taskqueues(ha) != 0) 973 goto qlnx_pci_attach_err; 974 975 for (i = 0; i < ha->num_rss; i++) { 976 ha->irq_vec[i].rss_idx = i; 977 ha->irq_vec[i].ha = ha; 978 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i; 979 980 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 981 &ha->irq_vec[i].irq_rid, 982 (RF_ACTIVE | RF_SHAREABLE)); 983 984 if (ha->irq_vec[i].irq == NULL) { 985 device_printf(dev, 986 "could not allocate interrupt[%d] irq_rid = %d\n", 987 i, ha->irq_vec[i].irq_rid); 988 goto qlnx_pci_attach_err; 989 } 990 991 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) { 992 device_printf(dev, "could not allocate tx_br[%d]\n", i); 993 goto qlnx_pci_attach_err; 994 } 995 } 996 997 if (qlnx_vf_device(ha) != 0) { 998 callout_init(&ha->qlnx_callout, 1); 999 ha->flags.callout_init = 1; 1000 1001 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1002 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0) 1003 goto qlnx_pci_attach_err; 1004 if (ha->grcdump_size[i] == 0) 1005 goto qlnx_pci_attach_err; 1006 1007 ha->grcdump_size[i] = ha->grcdump_size[i] << 2; 1008 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n", 1009 i, ha->grcdump_size[i]); 1010 1011 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]); 1012 if (ha->grcdump[i] == NULL) { 1013 device_printf(dev, "grcdump alloc[%d] failed\n", i); 1014 goto qlnx_pci_attach_err; 1015 } 1016 1017 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0) 1018 goto qlnx_pci_attach_err; 1019 if (ha->idle_chk_size[i] == 0) 1020 goto qlnx_pci_attach_err; 1021 1022 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2; 1023 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n", 1024 i, ha->idle_chk_size[i]); 1025 1026 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]); 1027 1028 if (ha->idle_chk[i] == NULL) { 1029 device_printf(dev, "idle_chk alloc failed\n"); 1030 goto qlnx_pci_attach_err; 1031 } 1032 } 1033 1034 if (qlnx_create_error_recovery_taskqueue(ha) != 0) 1035 goto qlnx_pci_attach_err; 1036 } 1037 1038 if (qlnx_slowpath_start(ha) != 0) 1039 goto qlnx_pci_attach_err; 1040 else 1041 ha->flags.slowpath_start = 1; 1042 1043 if (qlnx_vf_device(ha) != 0) { 1044 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) { 1045 qlnx_mdelay(__func__, 1000); 1046 qlnx_trigger_dump(ha); 1047 1048 goto qlnx_pci_attach_err0; 1049 } 1050 1051 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) { 1052 qlnx_mdelay(__func__, 1000); 1053 qlnx_trigger_dump(ha); 1054 1055 goto qlnx_pci_attach_err0; 1056 } 1057 } else { 1058 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 1059 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL); 1060 } 1061 1062 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d", 1063 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF), 1064 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF)); 1065 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d", 1066 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, 1067 FW_ENGINEERING_VERSION); 1068 1069 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n", 1070 ha->stormfw_ver, ha->mfw_ver); 1071 1072 qlnx_init_ifnet(dev, ha); 1073 1074 /* 1075 * add sysctls 1076 */ 1077 qlnx_add_sysctls(ha); 1078 1079 qlnx_pci_attach_err0: 1080 /* 1081 * create ioctl device interface 1082 */ 1083 if (qlnx_vf_device(ha) != 0) { 1084 if (qlnx_make_cdev(ha)) { 1085 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 1086 goto qlnx_pci_attach_err; 1087 } 1088 1089 #ifdef QLNX_ENABLE_IWARP 1090 qlnx_rdma_dev_add(ha); 1091 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1092 } 1093 1094 #ifndef QLNX_VF 1095 #ifdef CONFIG_ECORE_SRIOV 1096 1097 if (qlnx_vf_device(ha) != 0) 1098 qlnx_initialize_sriov(ha); 1099 1100 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1101 #endif /* #ifdef QLNX_VF */ 1102 1103 QL_DPRINT2(ha, "success\n"); 1104 1105 return (0); 1106 1107 qlnx_pci_attach_err: 1108 1109 qlnx_release(ha); 1110 1111 return (ENXIO); 1112 } 1113 1114 /* 1115 * Name: qlnx_pci_detach 1116 * Function: Unhooks the device from the operating system 1117 */ 1118 static int 1119 qlnx_pci_detach(device_t dev) 1120 { 1121 qlnx_host_t *ha = NULL; 1122 1123 if ((ha = device_get_softc(dev)) == NULL) { 1124 device_printf(dev, "%s: cannot get softc\n", __func__); 1125 return (ENOMEM); 1126 } 1127 1128 if (qlnx_vf_device(ha) != 0) { 1129 #ifdef CONFIG_ECORE_SRIOV 1130 int ret; 1131 1132 ret = pci_iov_detach(dev); 1133 if (ret) { 1134 device_printf(dev, "%s: SRIOV in use\n", __func__); 1135 return (ret); 1136 } 1137 1138 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1139 1140 #ifdef QLNX_ENABLE_IWARP 1141 if (qlnx_rdma_dev_remove(ha) != 0) 1142 return (EBUSY); 1143 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1144 } 1145 1146 QLNX_LOCK(ha); 1147 qlnx_stop(ha); 1148 QLNX_UNLOCK(ha); 1149 1150 qlnx_release(ha); 1151 1152 return (0); 1153 } 1154 1155 #ifdef QLNX_ENABLE_IWARP 1156 1157 static uint8_t 1158 qlnx_get_personality(uint8_t pci_func) 1159 { 1160 uint8_t personality; 1161 1162 personality = (qlnxe_rdma_configuration >> 1163 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) & 1164 QLNX_PERSONALIY_MASK; 1165 return (personality); 1166 } 1167 1168 static void 1169 qlnx_set_personality(qlnx_host_t *ha) 1170 { 1171 uint8_t personality; 1172 1173 personality = qlnx_get_personality(ha->pci_func); 1174 1175 switch (personality) { 1176 case QLNX_PERSONALITY_DEFAULT: 1177 device_printf(ha->pci_dev, "%s: DEFAULT\n", 1178 __func__); 1179 ha->personality = ECORE_PCI_DEFAULT; 1180 break; 1181 1182 case QLNX_PERSONALITY_ETH_ONLY: 1183 device_printf(ha->pci_dev, "%s: ETH_ONLY\n", 1184 __func__); 1185 ha->personality = ECORE_PCI_ETH; 1186 break; 1187 1188 case QLNX_PERSONALITY_ETH_IWARP: 1189 device_printf(ha->pci_dev, "%s: ETH_IWARP\n", 1190 __func__); 1191 ha->personality = ECORE_PCI_ETH_IWARP; 1192 break; 1193 1194 case QLNX_PERSONALITY_ETH_ROCE: 1195 device_printf(ha->pci_dev, "%s: ETH_ROCE\n", 1196 __func__); 1197 ha->personality = ECORE_PCI_ETH_ROCE; 1198 break; 1199 } 1200 1201 return; 1202 } 1203 1204 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1205 1206 static int 1207 qlnx_init_hw(qlnx_host_t *ha) 1208 { 1209 int rval = 0; 1210 struct ecore_hw_prepare_params params; 1211 1212 ecore_init_struct(&ha->cdev); 1213 1214 /* ha->dp_module = ECORE_MSG_PROBE | 1215 ECORE_MSG_INTR | 1216 ECORE_MSG_SP | 1217 ECORE_MSG_LINK | 1218 ECORE_MSG_SPQ | 1219 ECORE_MSG_RDMA; 1220 ha->dp_level = ECORE_LEVEL_VERBOSE;*/ 1221 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2; 1222 ha->dp_level = ECORE_LEVEL_NOTICE; 1223 //ha->dp_level = ECORE_LEVEL_VERBOSE; 1224 1225 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev); 1226 1227 ha->cdev.regview = ha->pci_reg; 1228 1229 ha->personality = ECORE_PCI_DEFAULT; 1230 1231 if (qlnx_vf_device(ha) == 0) { 1232 ha->cdev.b_is_vf = true; 1233 1234 if (ha->pci_dbells != NULL) { 1235 ha->cdev.doorbells = ha->pci_dbells; 1236 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1237 ha->cdev.db_size = ha->dbells_size; 1238 } else { 1239 ha->pci_dbells = ha->pci_reg; 1240 } 1241 } else { 1242 ha->cdev.doorbells = ha->pci_dbells; 1243 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1244 ha->cdev.db_size = ha->dbells_size; 1245 1246 #ifdef QLNX_ENABLE_IWARP 1247 1248 if (qlnx_rdma_supported(ha) == 0) 1249 qlnx_set_personality(ha); 1250 1251 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1252 } 1253 QL_DPRINT2(ha, "%s: %s\n", __func__, 1254 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet")); 1255 1256 bzero(¶ms, sizeof (struct ecore_hw_prepare_params)); 1257 1258 params.personality = ha->personality; 1259 1260 params.drv_resc_alloc = false; 1261 params.chk_reg_fifo = false; 1262 params.initiate_pf_flr = true; 1263 params.epoch = 0; 1264 1265 ecore_hw_prepare(&ha->cdev, ¶ms); 1266 1267 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str); 1268 1269 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n", 1270 ha, &ha->cdev, &ha->cdev.hwfns[0]); 1271 1272 return (rval); 1273 } 1274 1275 static void 1276 qlnx_release(qlnx_host_t *ha) 1277 { 1278 device_t dev; 1279 int i; 1280 1281 dev = ha->pci_dev; 1282 1283 QL_DPRINT2(ha, "enter\n"); 1284 1285 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) { 1286 if (ha->idle_chk[i] != NULL) { 1287 free(ha->idle_chk[i], M_QLNXBUF); 1288 ha->idle_chk[i] = NULL; 1289 } 1290 1291 if (ha->grcdump[i] != NULL) { 1292 free(ha->grcdump[i], M_QLNXBUF); 1293 ha->grcdump[i] = NULL; 1294 } 1295 } 1296 1297 if (ha->flags.callout_init) 1298 callout_drain(&ha->qlnx_callout); 1299 1300 if (ha->flags.slowpath_start) { 1301 qlnx_slowpath_stop(ha); 1302 } 1303 1304 if (ha->flags.hw_init) 1305 ecore_hw_remove(&ha->cdev); 1306 1307 qlnx_del_cdev(ha); 1308 1309 if (ha->ifp != NULL) 1310 ether_ifdetach(ha->ifp); 1311 1312 qlnx_free_tx_dma_tag(ha); 1313 1314 qlnx_free_rx_dma_tag(ha); 1315 1316 qlnx_free_parent_dma_tag(ha); 1317 1318 if (qlnx_vf_device(ha) != 0) { 1319 qlnx_destroy_error_recovery_taskqueue(ha); 1320 } 1321 1322 for (i = 0; i < ha->num_rss; i++) { 1323 struct qlnx_fastpath *fp = &ha->fp_array[i]; 1324 1325 if (ha->irq_vec[i].handle) { 1326 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 1327 ha->irq_vec[i].handle); 1328 } 1329 1330 if (ha->irq_vec[i].irq) { 1331 (void)bus_release_resource(dev, SYS_RES_IRQ, 1332 ha->irq_vec[i].irq_rid, 1333 ha->irq_vec[i].irq); 1334 } 1335 1336 qlnx_free_tx_br(ha, fp); 1337 } 1338 qlnx_destroy_fp_taskqueues(ha); 1339 1340 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1341 if (ha->sp_handle[i]) 1342 (void)bus_teardown_intr(dev, ha->sp_irq[i], 1343 ha->sp_handle[i]); 1344 1345 if (ha->sp_irq[i]) 1346 (void) bus_release_resource(dev, SYS_RES_IRQ, 1347 ha->sp_irq_rid[i], ha->sp_irq[i]); 1348 } 1349 1350 qlnx_destroy_sp_taskqueues(ha); 1351 1352 if (ha->msix_count) 1353 pci_release_msi(dev); 1354 1355 if (ha->flags.lock_init) { 1356 mtx_destroy(&ha->hw_lock); 1357 } 1358 1359 if (ha->pci_reg) 1360 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 1361 ha->pci_reg); 1362 1363 if (ha->dbells_size && ha->pci_dbells) 1364 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid, 1365 ha->pci_dbells); 1366 1367 if (ha->msix_bar) 1368 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid, 1369 ha->msix_bar); 1370 1371 QL_DPRINT2(ha, "exit\n"); 1372 return; 1373 } 1374 1375 static void 1376 qlnx_trigger_dump(qlnx_host_t *ha) 1377 { 1378 int i; 1379 1380 if (ha->ifp != NULL) 1381 if_setdrvflagbits(ha->ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING)); 1382 1383 QL_DPRINT2(ha, "enter\n"); 1384 1385 if (qlnx_vf_device(ha) == 0) 1386 return; 1387 1388 ha->error_recovery = 1; 1389 1390 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1391 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i); 1392 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i); 1393 } 1394 1395 QL_DPRINT2(ha, "exit\n"); 1396 1397 return; 1398 } 1399 1400 static int 1401 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS) 1402 { 1403 int err, ret = 0; 1404 qlnx_host_t *ha; 1405 1406 err = sysctl_handle_int(oidp, &ret, 0, req); 1407 1408 if (err || !req->newptr) 1409 return (err); 1410 1411 if (ret == 1) { 1412 ha = (qlnx_host_t *)arg1; 1413 qlnx_trigger_dump(ha); 1414 } 1415 return (err); 1416 } 1417 1418 static int 1419 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS) 1420 { 1421 int err, i, ret = 0, usecs = 0; 1422 qlnx_host_t *ha; 1423 struct ecore_hwfn *p_hwfn; 1424 struct qlnx_fastpath *fp; 1425 1426 err = sysctl_handle_int(oidp, &usecs, 0, req); 1427 1428 if (err || !req->newptr || !usecs || (usecs > 255)) 1429 return (err); 1430 1431 ha = (qlnx_host_t *)arg1; 1432 1433 if (qlnx_vf_device(ha) == 0) 1434 return (-1); 1435 1436 for (i = 0; i < ha->num_rss; i++) { 1437 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1438 1439 fp = &ha->fp_array[i]; 1440 1441 if (fp->txq[0]->handle != NULL) { 1442 ret = ecore_set_queue_coalesce(p_hwfn, 0, 1443 (uint16_t)usecs, fp->txq[0]->handle); 1444 } 1445 } 1446 1447 if (!ret) 1448 ha->tx_coalesce_usecs = (uint8_t)usecs; 1449 1450 return (err); 1451 } 1452 1453 static int 1454 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS) 1455 { 1456 int err, i, ret = 0, usecs = 0; 1457 qlnx_host_t *ha; 1458 struct ecore_hwfn *p_hwfn; 1459 struct qlnx_fastpath *fp; 1460 1461 err = sysctl_handle_int(oidp, &usecs, 0, req); 1462 1463 if (err || !req->newptr || !usecs || (usecs > 255)) 1464 return (err); 1465 1466 ha = (qlnx_host_t *)arg1; 1467 1468 if (qlnx_vf_device(ha) == 0) 1469 return (-1); 1470 1471 for (i = 0; i < ha->num_rss; i++) { 1472 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1473 1474 fp = &ha->fp_array[i]; 1475 1476 if (fp->rxq->handle != NULL) { 1477 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs, 1478 0, fp->rxq->handle); 1479 } 1480 } 1481 1482 if (!ret) 1483 ha->rx_coalesce_usecs = (uint8_t)usecs; 1484 1485 return (err); 1486 } 1487 1488 static void 1489 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha) 1490 { 1491 struct sysctl_ctx_list *ctx; 1492 struct sysctl_oid_list *children; 1493 struct sysctl_oid *ctx_oid; 1494 1495 ctx = device_get_sysctl_ctx(ha->pci_dev); 1496 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1497 1498 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat", 1499 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat"); 1500 children = SYSCTL_CHILDREN(ctx_oid); 1501 1502 SYSCTL_ADD_QUAD(ctx, children, 1503 OID_AUTO, "sp_interrupts", 1504 CTLFLAG_RD, &ha->sp_interrupts, 1505 "No. of slowpath interrupts"); 1506 1507 return; 1508 } 1509 1510 static void 1511 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha) 1512 { 1513 struct sysctl_ctx_list *ctx; 1514 struct sysctl_oid_list *children; 1515 struct sysctl_oid_list *node_children; 1516 struct sysctl_oid *ctx_oid; 1517 int i, j; 1518 uint8_t name_str[16]; 1519 1520 ctx = device_get_sysctl_ctx(ha->pci_dev); 1521 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1522 1523 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat", 1524 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat"); 1525 children = SYSCTL_CHILDREN(ctx_oid); 1526 1527 for (i = 0; i < ha->num_rss; i++) { 1528 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1529 snprintf(name_str, sizeof(name_str), "%d", i); 1530 1531 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 1532 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str); 1533 node_children = SYSCTL_CHILDREN(ctx_oid); 1534 1535 /* Tx Related */ 1536 1537 SYSCTL_ADD_QUAD(ctx, node_children, 1538 OID_AUTO, "tx_pkts_processed", 1539 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed, 1540 "No. of packets processed for transmission"); 1541 1542 SYSCTL_ADD_QUAD(ctx, node_children, 1543 OID_AUTO, "tx_pkts_freed", 1544 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed, 1545 "No. of freed packets"); 1546 1547 SYSCTL_ADD_QUAD(ctx, node_children, 1548 OID_AUTO, "tx_pkts_transmitted", 1549 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted, 1550 "No. of transmitted packets"); 1551 1552 SYSCTL_ADD_QUAD(ctx, node_children, 1553 OID_AUTO, "tx_pkts_completed", 1554 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed, 1555 "No. of transmit completions"); 1556 1557 SYSCTL_ADD_QUAD(ctx, node_children, 1558 OID_AUTO, "tx_non_tso_pkts", 1559 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts, 1560 "No. of non LSO transmited packets"); 1561 1562 #ifdef QLNX_TRACE_PERF_DATA 1563 1564 SYSCTL_ADD_QUAD(ctx, node_children, 1565 OID_AUTO, "tx_pkts_trans_ctx", 1566 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx, 1567 "No. of transmitted packets in transmit context"); 1568 1569 SYSCTL_ADD_QUAD(ctx, node_children, 1570 OID_AUTO, "tx_pkts_compl_ctx", 1571 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx, 1572 "No. of transmit completions in transmit context"); 1573 1574 SYSCTL_ADD_QUAD(ctx, node_children, 1575 OID_AUTO, "tx_pkts_trans_fp", 1576 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp, 1577 "No. of transmitted packets in taskqueue"); 1578 1579 SYSCTL_ADD_QUAD(ctx, node_children, 1580 OID_AUTO, "tx_pkts_compl_fp", 1581 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp, 1582 "No. of transmit completions in taskqueue"); 1583 1584 SYSCTL_ADD_QUAD(ctx, node_children, 1585 OID_AUTO, "tx_pkts_compl_intr", 1586 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr, 1587 "No. of transmit completions in interrupt ctx"); 1588 #endif 1589 1590 SYSCTL_ADD_QUAD(ctx, node_children, 1591 OID_AUTO, "tx_tso_pkts", 1592 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts, 1593 "No. of LSO transmited packets"); 1594 1595 SYSCTL_ADD_QUAD(ctx, node_children, 1596 OID_AUTO, "tx_lso_wnd_min_len", 1597 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len, 1598 "tx_lso_wnd_min_len"); 1599 1600 SYSCTL_ADD_QUAD(ctx, node_children, 1601 OID_AUTO, "tx_defrag", 1602 CTLFLAG_RD, &ha->fp_array[i].tx_defrag, 1603 "tx_defrag"); 1604 1605 SYSCTL_ADD_QUAD(ctx, node_children, 1606 OID_AUTO, "tx_nsegs_gt_elem_left", 1607 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left, 1608 "tx_nsegs_gt_elem_left"); 1609 1610 SYSCTL_ADD_UINT(ctx, node_children, 1611 OID_AUTO, "tx_tso_max_nsegs", 1612 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs, 1613 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs"); 1614 1615 SYSCTL_ADD_UINT(ctx, node_children, 1616 OID_AUTO, "tx_tso_min_nsegs", 1617 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs, 1618 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs"); 1619 1620 SYSCTL_ADD_UINT(ctx, node_children, 1621 OID_AUTO, "tx_tso_max_pkt_len", 1622 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len, 1623 ha->fp_array[i].tx_tso_max_pkt_len, 1624 "tx_tso_max_pkt_len"); 1625 1626 SYSCTL_ADD_UINT(ctx, node_children, 1627 OID_AUTO, "tx_tso_min_pkt_len", 1628 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len, 1629 ha->fp_array[i].tx_tso_min_pkt_len, 1630 "tx_tso_min_pkt_len"); 1631 1632 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) { 1633 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1634 snprintf(name_str, sizeof(name_str), 1635 "tx_pkts_nseg_%02d", (j+1)); 1636 1637 SYSCTL_ADD_QUAD(ctx, node_children, 1638 OID_AUTO, name_str, CTLFLAG_RD, 1639 &ha->fp_array[i].tx_pkts[j], name_str); 1640 } 1641 1642 #ifdef QLNX_TRACE_PERF_DATA 1643 for (j = 0; j < 18; j++) { 1644 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1645 snprintf(name_str, sizeof(name_str), 1646 "tx_pkts_hist_%02d", (j+1)); 1647 1648 SYSCTL_ADD_QUAD(ctx, node_children, 1649 OID_AUTO, name_str, CTLFLAG_RD, 1650 &ha->fp_array[i].tx_pkts_hist[j], name_str); 1651 } 1652 for (j = 0; j < 5; j++) { 1653 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1654 snprintf(name_str, sizeof(name_str), 1655 "tx_comInt_%02d", (j+1)); 1656 1657 SYSCTL_ADD_QUAD(ctx, node_children, 1658 OID_AUTO, name_str, CTLFLAG_RD, 1659 &ha->fp_array[i].tx_comInt[j], name_str); 1660 } 1661 for (j = 0; j < 18; j++) { 1662 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1663 snprintf(name_str, sizeof(name_str), 1664 "tx_pkts_q_%02d", (j+1)); 1665 1666 SYSCTL_ADD_QUAD(ctx, node_children, 1667 OID_AUTO, name_str, CTLFLAG_RD, 1668 &ha->fp_array[i].tx_pkts_q[j], name_str); 1669 } 1670 #endif 1671 1672 SYSCTL_ADD_QUAD(ctx, node_children, 1673 OID_AUTO, "err_tx_nsegs_gt_elem_left", 1674 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left, 1675 "err_tx_nsegs_gt_elem_left"); 1676 1677 SYSCTL_ADD_QUAD(ctx, node_children, 1678 OID_AUTO, "err_tx_dmamap_create", 1679 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create, 1680 "err_tx_dmamap_create"); 1681 1682 SYSCTL_ADD_QUAD(ctx, node_children, 1683 OID_AUTO, "err_tx_defrag_dmamap_load", 1684 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load, 1685 "err_tx_defrag_dmamap_load"); 1686 1687 SYSCTL_ADD_QUAD(ctx, node_children, 1688 OID_AUTO, "err_tx_non_tso_max_seg", 1689 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg, 1690 "err_tx_non_tso_max_seg"); 1691 1692 SYSCTL_ADD_QUAD(ctx, node_children, 1693 OID_AUTO, "err_tx_dmamap_load", 1694 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load, 1695 "err_tx_dmamap_load"); 1696 1697 SYSCTL_ADD_QUAD(ctx, node_children, 1698 OID_AUTO, "err_tx_defrag", 1699 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag, 1700 "err_tx_defrag"); 1701 1702 SYSCTL_ADD_QUAD(ctx, node_children, 1703 OID_AUTO, "err_tx_free_pkt_null", 1704 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null, 1705 "err_tx_free_pkt_null"); 1706 1707 SYSCTL_ADD_QUAD(ctx, node_children, 1708 OID_AUTO, "err_tx_cons_idx_conflict", 1709 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict, 1710 "err_tx_cons_idx_conflict"); 1711 1712 SYSCTL_ADD_QUAD(ctx, node_children, 1713 OID_AUTO, "lro_cnt_64", 1714 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64, 1715 "lro_cnt_64"); 1716 1717 SYSCTL_ADD_QUAD(ctx, node_children, 1718 OID_AUTO, "lro_cnt_128", 1719 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128, 1720 "lro_cnt_128"); 1721 1722 SYSCTL_ADD_QUAD(ctx, node_children, 1723 OID_AUTO, "lro_cnt_256", 1724 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256, 1725 "lro_cnt_256"); 1726 1727 SYSCTL_ADD_QUAD(ctx, node_children, 1728 OID_AUTO, "lro_cnt_512", 1729 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512, 1730 "lro_cnt_512"); 1731 1732 SYSCTL_ADD_QUAD(ctx, node_children, 1733 OID_AUTO, "lro_cnt_1024", 1734 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024, 1735 "lro_cnt_1024"); 1736 1737 /* Rx Related */ 1738 1739 SYSCTL_ADD_QUAD(ctx, node_children, 1740 OID_AUTO, "rx_pkts", 1741 CTLFLAG_RD, &ha->fp_array[i].rx_pkts, 1742 "No. of received packets"); 1743 1744 SYSCTL_ADD_QUAD(ctx, node_children, 1745 OID_AUTO, "tpa_start", 1746 CTLFLAG_RD, &ha->fp_array[i].tpa_start, 1747 "No. of tpa_start packets"); 1748 1749 SYSCTL_ADD_QUAD(ctx, node_children, 1750 OID_AUTO, "tpa_cont", 1751 CTLFLAG_RD, &ha->fp_array[i].tpa_cont, 1752 "No. of tpa_cont packets"); 1753 1754 SYSCTL_ADD_QUAD(ctx, node_children, 1755 OID_AUTO, "tpa_end", 1756 CTLFLAG_RD, &ha->fp_array[i].tpa_end, 1757 "No. of tpa_end packets"); 1758 1759 SYSCTL_ADD_QUAD(ctx, node_children, 1760 OID_AUTO, "err_m_getcl", 1761 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl, 1762 "err_m_getcl"); 1763 1764 SYSCTL_ADD_QUAD(ctx, node_children, 1765 OID_AUTO, "err_m_getjcl", 1766 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl, 1767 "err_m_getjcl"); 1768 1769 SYSCTL_ADD_QUAD(ctx, node_children, 1770 OID_AUTO, "err_rx_hw_errors", 1771 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors, 1772 "err_rx_hw_errors"); 1773 1774 SYSCTL_ADD_QUAD(ctx, node_children, 1775 OID_AUTO, "err_rx_alloc_errors", 1776 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors, 1777 "err_rx_alloc_errors"); 1778 } 1779 1780 return; 1781 } 1782 1783 static void 1784 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha) 1785 { 1786 struct sysctl_ctx_list *ctx; 1787 struct sysctl_oid_list *children; 1788 struct sysctl_oid *ctx_oid; 1789 1790 ctx = device_get_sysctl_ctx(ha->pci_dev); 1791 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1792 1793 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat", 1794 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat"); 1795 children = SYSCTL_CHILDREN(ctx_oid); 1796 1797 SYSCTL_ADD_QUAD(ctx, children, 1798 OID_AUTO, "no_buff_discards", 1799 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards, 1800 "No. of packets discarded due to lack of buffer"); 1801 1802 SYSCTL_ADD_QUAD(ctx, children, 1803 OID_AUTO, "packet_too_big_discard", 1804 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard, 1805 "No. of packets discarded because packet was too big"); 1806 1807 SYSCTL_ADD_QUAD(ctx, children, 1808 OID_AUTO, "ttl0_discard", 1809 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard, 1810 "ttl0_discard"); 1811 1812 SYSCTL_ADD_QUAD(ctx, children, 1813 OID_AUTO, "rx_ucast_bytes", 1814 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes, 1815 "rx_ucast_bytes"); 1816 1817 SYSCTL_ADD_QUAD(ctx, children, 1818 OID_AUTO, "rx_mcast_bytes", 1819 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes, 1820 "rx_mcast_bytes"); 1821 1822 SYSCTL_ADD_QUAD(ctx, children, 1823 OID_AUTO, "rx_bcast_bytes", 1824 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes, 1825 "rx_bcast_bytes"); 1826 1827 SYSCTL_ADD_QUAD(ctx, children, 1828 OID_AUTO, "rx_ucast_pkts", 1829 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts, 1830 "rx_ucast_pkts"); 1831 1832 SYSCTL_ADD_QUAD(ctx, children, 1833 OID_AUTO, "rx_mcast_pkts", 1834 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts, 1835 "rx_mcast_pkts"); 1836 1837 SYSCTL_ADD_QUAD(ctx, children, 1838 OID_AUTO, "rx_bcast_pkts", 1839 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts, 1840 "rx_bcast_pkts"); 1841 1842 SYSCTL_ADD_QUAD(ctx, children, 1843 OID_AUTO, "mftag_filter_discards", 1844 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards, 1845 "mftag_filter_discards"); 1846 1847 SYSCTL_ADD_QUAD(ctx, children, 1848 OID_AUTO, "mac_filter_discards", 1849 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards, 1850 "mac_filter_discards"); 1851 1852 SYSCTL_ADD_QUAD(ctx, children, 1853 OID_AUTO, "tx_ucast_bytes", 1854 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes, 1855 "tx_ucast_bytes"); 1856 1857 SYSCTL_ADD_QUAD(ctx, children, 1858 OID_AUTO, "tx_mcast_bytes", 1859 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes, 1860 "tx_mcast_bytes"); 1861 1862 SYSCTL_ADD_QUAD(ctx, children, 1863 OID_AUTO, "tx_bcast_bytes", 1864 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes, 1865 "tx_bcast_bytes"); 1866 1867 SYSCTL_ADD_QUAD(ctx, children, 1868 OID_AUTO, "tx_ucast_pkts", 1869 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts, 1870 "tx_ucast_pkts"); 1871 1872 SYSCTL_ADD_QUAD(ctx, children, 1873 OID_AUTO, "tx_mcast_pkts", 1874 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts, 1875 "tx_mcast_pkts"); 1876 1877 SYSCTL_ADD_QUAD(ctx, children, 1878 OID_AUTO, "tx_bcast_pkts", 1879 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts, 1880 "tx_bcast_pkts"); 1881 1882 SYSCTL_ADD_QUAD(ctx, children, 1883 OID_AUTO, "tx_err_drop_pkts", 1884 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts, 1885 "tx_err_drop_pkts"); 1886 1887 SYSCTL_ADD_QUAD(ctx, children, 1888 OID_AUTO, "tpa_coalesced_pkts", 1889 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts, 1890 "tpa_coalesced_pkts"); 1891 1892 SYSCTL_ADD_QUAD(ctx, children, 1893 OID_AUTO, "tpa_coalesced_events", 1894 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events, 1895 "tpa_coalesced_events"); 1896 1897 SYSCTL_ADD_QUAD(ctx, children, 1898 OID_AUTO, "tpa_aborts_num", 1899 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num, 1900 "tpa_aborts_num"); 1901 1902 SYSCTL_ADD_QUAD(ctx, children, 1903 OID_AUTO, "tpa_not_coalesced_pkts", 1904 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts, 1905 "tpa_not_coalesced_pkts"); 1906 1907 SYSCTL_ADD_QUAD(ctx, children, 1908 OID_AUTO, "tpa_coalesced_bytes", 1909 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes, 1910 "tpa_coalesced_bytes"); 1911 1912 SYSCTL_ADD_QUAD(ctx, children, 1913 OID_AUTO, "rx_64_byte_packets", 1914 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets, 1915 "rx_64_byte_packets"); 1916 1917 SYSCTL_ADD_QUAD(ctx, children, 1918 OID_AUTO, "rx_65_to_127_byte_packets", 1919 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets, 1920 "rx_65_to_127_byte_packets"); 1921 1922 SYSCTL_ADD_QUAD(ctx, children, 1923 OID_AUTO, "rx_128_to_255_byte_packets", 1924 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets, 1925 "rx_128_to_255_byte_packets"); 1926 1927 SYSCTL_ADD_QUAD(ctx, children, 1928 OID_AUTO, "rx_256_to_511_byte_packets", 1929 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets, 1930 "rx_256_to_511_byte_packets"); 1931 1932 SYSCTL_ADD_QUAD(ctx, children, 1933 OID_AUTO, "rx_512_to_1023_byte_packets", 1934 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets, 1935 "rx_512_to_1023_byte_packets"); 1936 1937 SYSCTL_ADD_QUAD(ctx, children, 1938 OID_AUTO, "rx_1024_to_1518_byte_packets", 1939 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets, 1940 "rx_1024_to_1518_byte_packets"); 1941 1942 SYSCTL_ADD_QUAD(ctx, children, 1943 OID_AUTO, "rx_1519_to_1522_byte_packets", 1944 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets, 1945 "rx_1519_to_1522_byte_packets"); 1946 1947 SYSCTL_ADD_QUAD(ctx, children, 1948 OID_AUTO, "rx_1523_to_2047_byte_packets", 1949 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets, 1950 "rx_1523_to_2047_byte_packets"); 1951 1952 SYSCTL_ADD_QUAD(ctx, children, 1953 OID_AUTO, "rx_2048_to_4095_byte_packets", 1954 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets, 1955 "rx_2048_to_4095_byte_packets"); 1956 1957 SYSCTL_ADD_QUAD(ctx, children, 1958 OID_AUTO, "rx_4096_to_9216_byte_packets", 1959 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets, 1960 "rx_4096_to_9216_byte_packets"); 1961 1962 SYSCTL_ADD_QUAD(ctx, children, 1963 OID_AUTO, "rx_9217_to_16383_byte_packets", 1964 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets, 1965 "rx_9217_to_16383_byte_packets"); 1966 1967 SYSCTL_ADD_QUAD(ctx, children, 1968 OID_AUTO, "rx_crc_errors", 1969 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors, 1970 "rx_crc_errors"); 1971 1972 SYSCTL_ADD_QUAD(ctx, children, 1973 OID_AUTO, "rx_mac_crtl_frames", 1974 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames, 1975 "rx_mac_crtl_frames"); 1976 1977 SYSCTL_ADD_QUAD(ctx, children, 1978 OID_AUTO, "rx_pause_frames", 1979 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames, 1980 "rx_pause_frames"); 1981 1982 SYSCTL_ADD_QUAD(ctx, children, 1983 OID_AUTO, "rx_pfc_frames", 1984 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames, 1985 "rx_pfc_frames"); 1986 1987 SYSCTL_ADD_QUAD(ctx, children, 1988 OID_AUTO, "rx_align_errors", 1989 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors, 1990 "rx_align_errors"); 1991 1992 SYSCTL_ADD_QUAD(ctx, children, 1993 OID_AUTO, "rx_carrier_errors", 1994 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors, 1995 "rx_carrier_errors"); 1996 1997 SYSCTL_ADD_QUAD(ctx, children, 1998 OID_AUTO, "rx_oversize_packets", 1999 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets, 2000 "rx_oversize_packets"); 2001 2002 SYSCTL_ADD_QUAD(ctx, children, 2003 OID_AUTO, "rx_jabbers", 2004 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers, 2005 "rx_jabbers"); 2006 2007 SYSCTL_ADD_QUAD(ctx, children, 2008 OID_AUTO, "rx_undersize_packets", 2009 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets, 2010 "rx_undersize_packets"); 2011 2012 SYSCTL_ADD_QUAD(ctx, children, 2013 OID_AUTO, "rx_fragments", 2014 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments, 2015 "rx_fragments"); 2016 2017 SYSCTL_ADD_QUAD(ctx, children, 2018 OID_AUTO, "tx_64_byte_packets", 2019 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets, 2020 "tx_64_byte_packets"); 2021 2022 SYSCTL_ADD_QUAD(ctx, children, 2023 OID_AUTO, "tx_65_to_127_byte_packets", 2024 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets, 2025 "tx_65_to_127_byte_packets"); 2026 2027 SYSCTL_ADD_QUAD(ctx, children, 2028 OID_AUTO, "tx_128_to_255_byte_packets", 2029 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets, 2030 "tx_128_to_255_byte_packets"); 2031 2032 SYSCTL_ADD_QUAD(ctx, children, 2033 OID_AUTO, "tx_256_to_511_byte_packets", 2034 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets, 2035 "tx_256_to_511_byte_packets"); 2036 2037 SYSCTL_ADD_QUAD(ctx, children, 2038 OID_AUTO, "tx_512_to_1023_byte_packets", 2039 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets, 2040 "tx_512_to_1023_byte_packets"); 2041 2042 SYSCTL_ADD_QUAD(ctx, children, 2043 OID_AUTO, "tx_1024_to_1518_byte_packets", 2044 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets, 2045 "tx_1024_to_1518_byte_packets"); 2046 2047 SYSCTL_ADD_QUAD(ctx, children, 2048 OID_AUTO, "tx_1519_to_2047_byte_packets", 2049 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets, 2050 "tx_1519_to_2047_byte_packets"); 2051 2052 SYSCTL_ADD_QUAD(ctx, children, 2053 OID_AUTO, "tx_2048_to_4095_byte_packets", 2054 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets, 2055 "tx_2048_to_4095_byte_packets"); 2056 2057 SYSCTL_ADD_QUAD(ctx, children, 2058 OID_AUTO, "tx_4096_to_9216_byte_packets", 2059 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets, 2060 "tx_4096_to_9216_byte_packets"); 2061 2062 SYSCTL_ADD_QUAD(ctx, children, 2063 OID_AUTO, "tx_9217_to_16383_byte_packets", 2064 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets, 2065 "tx_9217_to_16383_byte_packets"); 2066 2067 SYSCTL_ADD_QUAD(ctx, children, 2068 OID_AUTO, "tx_pause_frames", 2069 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames, 2070 "tx_pause_frames"); 2071 2072 SYSCTL_ADD_QUAD(ctx, children, 2073 OID_AUTO, "tx_pfc_frames", 2074 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames, 2075 "tx_pfc_frames"); 2076 2077 SYSCTL_ADD_QUAD(ctx, children, 2078 OID_AUTO, "tx_lpi_entry_count", 2079 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count, 2080 "tx_lpi_entry_count"); 2081 2082 SYSCTL_ADD_QUAD(ctx, children, 2083 OID_AUTO, "tx_total_collisions", 2084 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions, 2085 "tx_total_collisions"); 2086 2087 SYSCTL_ADD_QUAD(ctx, children, 2088 OID_AUTO, "brb_truncates", 2089 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates, 2090 "brb_truncates"); 2091 2092 SYSCTL_ADD_QUAD(ctx, children, 2093 OID_AUTO, "brb_discards", 2094 CTLFLAG_RD, &ha->hw_stats.common.brb_discards, 2095 "brb_discards"); 2096 2097 SYSCTL_ADD_QUAD(ctx, children, 2098 OID_AUTO, "rx_mac_bytes", 2099 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes, 2100 "rx_mac_bytes"); 2101 2102 SYSCTL_ADD_QUAD(ctx, children, 2103 OID_AUTO, "rx_mac_uc_packets", 2104 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets, 2105 "rx_mac_uc_packets"); 2106 2107 SYSCTL_ADD_QUAD(ctx, children, 2108 OID_AUTO, "rx_mac_mc_packets", 2109 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets, 2110 "rx_mac_mc_packets"); 2111 2112 SYSCTL_ADD_QUAD(ctx, children, 2113 OID_AUTO, "rx_mac_bc_packets", 2114 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets, 2115 "rx_mac_bc_packets"); 2116 2117 SYSCTL_ADD_QUAD(ctx, children, 2118 OID_AUTO, "rx_mac_frames_ok", 2119 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok, 2120 "rx_mac_frames_ok"); 2121 2122 SYSCTL_ADD_QUAD(ctx, children, 2123 OID_AUTO, "tx_mac_bytes", 2124 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes, 2125 "tx_mac_bytes"); 2126 2127 SYSCTL_ADD_QUAD(ctx, children, 2128 OID_AUTO, "tx_mac_uc_packets", 2129 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets, 2130 "tx_mac_uc_packets"); 2131 2132 SYSCTL_ADD_QUAD(ctx, children, 2133 OID_AUTO, "tx_mac_mc_packets", 2134 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets, 2135 "tx_mac_mc_packets"); 2136 2137 SYSCTL_ADD_QUAD(ctx, children, 2138 OID_AUTO, "tx_mac_bc_packets", 2139 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets, 2140 "tx_mac_bc_packets"); 2141 2142 SYSCTL_ADD_QUAD(ctx, children, 2143 OID_AUTO, "tx_mac_ctrl_frames", 2144 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames, 2145 "tx_mac_ctrl_frames"); 2146 return; 2147 } 2148 2149 static void 2150 qlnx_add_sysctls(qlnx_host_t *ha) 2151 { 2152 device_t dev = ha->pci_dev; 2153 struct sysctl_ctx_list *ctx; 2154 struct sysctl_oid_list *children; 2155 2156 ctx = device_get_sysctl_ctx(dev); 2157 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2158 2159 qlnx_add_fp_stats_sysctls(ha); 2160 qlnx_add_sp_stats_sysctls(ha); 2161 2162 if (qlnx_vf_device(ha) != 0) 2163 qlnx_add_hw_stats_sysctls(ha); 2164 2165 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version", 2166 CTLFLAG_RD, qlnx_ver_str, 0, 2167 "Driver Version"); 2168 2169 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version", 2170 CTLFLAG_RD, ha->stormfw_ver, 0, 2171 "STORM Firmware Version"); 2172 2173 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version", 2174 CTLFLAG_RD, ha->mfw_ver, 0, 2175 "Management Firmware Version"); 2176 2177 SYSCTL_ADD_UINT(ctx, children, 2178 OID_AUTO, "personality", CTLFLAG_RD, 2179 &ha->personality, ha->personality, 2180 "\tpersonality = 0 => Ethernet Only\n" 2181 "\tpersonality = 3 => Ethernet and RoCE\n" 2182 "\tpersonality = 4 => Ethernet and iWARP\n" 2183 "\tpersonality = 6 => Default in Shared Memory\n"); 2184 2185 ha->dbg_level = 0; 2186 SYSCTL_ADD_UINT(ctx, children, 2187 OID_AUTO, "debug", CTLFLAG_RW, 2188 &ha->dbg_level, ha->dbg_level, "Debug Level"); 2189 2190 ha->dp_level = 0x01; 2191 SYSCTL_ADD_UINT(ctx, children, 2192 OID_AUTO, "dp_level", CTLFLAG_RW, 2193 &ha->dp_level, ha->dp_level, "DP Level"); 2194 2195 ha->dbg_trace_lro_cnt = 0; 2196 SYSCTL_ADD_UINT(ctx, children, 2197 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW, 2198 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt, 2199 "Trace LRO Counts"); 2200 2201 ha->dbg_trace_tso_pkt_len = 0; 2202 SYSCTL_ADD_UINT(ctx, children, 2203 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW, 2204 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len, 2205 "Trace TSO packet lengths"); 2206 2207 ha->dp_module = 0; 2208 SYSCTL_ADD_UINT(ctx, children, 2209 OID_AUTO, "dp_module", CTLFLAG_RW, 2210 &ha->dp_module, ha->dp_module, "DP Module"); 2211 2212 ha->err_inject = 0; 2213 2214 SYSCTL_ADD_UINT(ctx, children, 2215 OID_AUTO, "err_inject", CTLFLAG_RW, 2216 &ha->err_inject, ha->err_inject, "Error Inject"); 2217 2218 ha->storm_stats_enable = 0; 2219 2220 SYSCTL_ADD_UINT(ctx, children, 2221 OID_AUTO, "storm_stats_enable", CTLFLAG_RW, 2222 &ha->storm_stats_enable, ha->storm_stats_enable, 2223 "Enable Storm Statistics Gathering"); 2224 2225 ha->storm_stats_index = 0; 2226 2227 SYSCTL_ADD_UINT(ctx, children, 2228 OID_AUTO, "storm_stats_index", CTLFLAG_RD, 2229 &ha->storm_stats_index, ha->storm_stats_index, 2230 "Enable Storm Statistics Gathering Current Index"); 2231 2232 ha->grcdump_taken = 0; 2233 SYSCTL_ADD_UINT(ctx, children, 2234 OID_AUTO, "grcdump_taken", CTLFLAG_RD, 2235 &ha->grcdump_taken, ha->grcdump_taken, 2236 "grcdump_taken"); 2237 2238 ha->idle_chk_taken = 0; 2239 SYSCTL_ADD_UINT(ctx, children, 2240 OID_AUTO, "idle_chk_taken", CTLFLAG_RD, 2241 &ha->idle_chk_taken, ha->idle_chk_taken, 2242 "idle_chk_taken"); 2243 2244 SYSCTL_ADD_UINT(ctx, children, 2245 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD, 2246 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs, 2247 "rx_coalesce_usecs"); 2248 2249 SYSCTL_ADD_UINT(ctx, children, 2250 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD, 2251 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs, 2252 "tx_coalesce_usecs"); 2253 2254 SYSCTL_ADD_PROC(ctx, children, 2255 OID_AUTO, "trigger_dump", 2256 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2257 (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump"); 2258 2259 SYSCTL_ADD_PROC(ctx, children, 2260 OID_AUTO, "set_rx_coalesce_usecs", 2261 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2262 (void *)ha, 0, qlnx_set_rx_coalesce, "I", 2263 "rx interrupt coalesce period microseconds"); 2264 2265 SYSCTL_ADD_PROC(ctx, children, 2266 OID_AUTO, "set_tx_coalesce_usecs", 2267 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2268 (void *)ha, 0, qlnx_set_tx_coalesce, "I", 2269 "tx interrupt coalesce period microseconds"); 2270 2271 ha->rx_pkt_threshold = 128; 2272 SYSCTL_ADD_UINT(ctx, children, 2273 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW, 2274 &ha->rx_pkt_threshold, ha->rx_pkt_threshold, 2275 "No. of Rx Pkts to process at a time"); 2276 2277 ha->rx_jumbo_buf_eq_mtu = 0; 2278 SYSCTL_ADD_UINT(ctx, children, 2279 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW, 2280 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu, 2281 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n" 2282 "otherwise Rx Jumbo buffers are set to >= MTU size\n"); 2283 2284 SYSCTL_ADD_QUAD(ctx, children, 2285 OID_AUTO, "err_illegal_intr", CTLFLAG_RD, 2286 &ha->err_illegal_intr, "err_illegal_intr"); 2287 2288 SYSCTL_ADD_QUAD(ctx, children, 2289 OID_AUTO, "err_fp_null", CTLFLAG_RD, 2290 &ha->err_fp_null, "err_fp_null"); 2291 2292 SYSCTL_ADD_QUAD(ctx, children, 2293 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD, 2294 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type"); 2295 return; 2296 } 2297 2298 /***************************************************************************** 2299 * Operating System Network Interface Functions 2300 *****************************************************************************/ 2301 2302 static void 2303 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) 2304 { 2305 uint16_t device_id; 2306 if_t ifp; 2307 2308 ifp = ha->ifp = if_alloc(IFT_ETHER); 2309 2310 if (ifp == NULL) 2311 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 2312 2313 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2314 2315 device_id = pci_get_device(ha->pci_dev); 2316 2317 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) 2318 if_setbaudrate(ifp, IF_Gbps(40)); 2319 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2320 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 2321 if_setbaudrate(ifp, IF_Gbps(25)); 2322 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) 2323 if_setbaudrate(ifp, IF_Gbps(50)); 2324 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) 2325 if_setbaudrate(ifp, IF_Gbps(100)); 2326 2327 if_setcapabilities(ifp, IFCAP_LINKSTATE); 2328 2329 if_setinitfn(ifp, qlnx_init); 2330 if_setsoftc(ifp, ha); 2331 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 2332 if_setioctlfn(ifp, qlnx_ioctl); 2333 if_settransmitfn(ifp, qlnx_transmit); 2334 if_setqflushfn(ifp, qlnx_qflush); 2335 2336 if_setsendqlen(ifp, qlnx_get_ifq_snd_maxlen(ha)); 2337 if_setsendqready(ifp); 2338 2339 if_setgetcounterfn(ifp, qlnx_get_counter); 2340 2341 ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; 2342 2343 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN); 2344 2345 if (!ha->primary_mac[0] && !ha->primary_mac[1] && 2346 !ha->primary_mac[2] && !ha->primary_mac[3] && 2347 !ha->primary_mac[4] && !ha->primary_mac[5]) { 2348 uint32_t rnd; 2349 2350 rnd = arc4random(); 2351 2352 ha->primary_mac[0] = 0x00; 2353 ha->primary_mac[1] = 0x0e; 2354 ha->primary_mac[2] = 0x1e; 2355 ha->primary_mac[3] = rnd & 0xFF; 2356 ha->primary_mac[4] = (rnd >> 8) & 0xFF; 2357 ha->primary_mac[5] = (rnd >> 16) & 0xFF; 2358 } 2359 2360 ether_ifattach(ifp, ha->primary_mac); 2361 bcopy(if_getlladdr(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN); 2362 2363 if_setcapabilities(ifp, IFCAP_HWCSUM); 2364 if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0); 2365 2366 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); 2367 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0); 2368 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0); 2369 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0); 2370 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0); 2371 if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0); 2372 if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0); 2373 if_setcapabilitiesbit(ifp, IFCAP_LRO, 0); 2374 2375 if_sethwtsomax(ifp, QLNX_MAX_TSO_FRAME_SIZE - 2376 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); 2377 if_sethwtsomaxsegcount(ifp, QLNX_MAX_SEGMENTS - 1); /* hdr */ 2378 if_sethwtsomaxsegsize(ifp, QLNX_MAX_TX_MBUF_SIZE); 2379 2380 if_setcapenable(ifp, if_getcapabilities(ifp)); 2381 2382 if_sethwassist(ifp, CSUM_IP); 2383 if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0); 2384 if_sethwassistbits(ifp, CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0); 2385 if_sethwassistbits(ifp, CSUM_TSO, 0); 2386 2387 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 2388 2389 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\ 2390 qlnx_media_status); 2391 2392 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) { 2393 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL); 2394 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL); 2395 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL); 2396 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2397 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) { 2398 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL); 2399 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL); 2400 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) { 2401 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL); 2402 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL); 2403 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) { 2404 ifmedia_add(&ha->media, 2405 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL); 2406 ifmedia_add(&ha->media, 2407 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL); 2408 ifmedia_add(&ha->media, 2409 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL); 2410 } 2411 2412 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL); 2413 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 2414 2415 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 2416 2417 QL_DPRINT2(ha, "exit\n"); 2418 2419 return; 2420 } 2421 2422 static void 2423 qlnx_init_locked(qlnx_host_t *ha) 2424 { 2425 if_t ifp = ha->ifp; 2426 2427 QL_DPRINT1(ha, "Driver Initialization start \n"); 2428 2429 qlnx_stop(ha); 2430 2431 if (qlnx_load(ha) == 0) { 2432 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 2433 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 2434 2435 #ifdef QLNX_ENABLE_IWARP 2436 if (qlnx_vf_device(ha) != 0) { 2437 qlnx_rdma_dev_open(ha); 2438 } 2439 #endif /* #ifdef QLNX_ENABLE_IWARP */ 2440 } 2441 2442 return; 2443 } 2444 2445 static void 2446 qlnx_init(void *arg) 2447 { 2448 qlnx_host_t *ha; 2449 2450 ha = (qlnx_host_t *)arg; 2451 2452 QL_DPRINT2(ha, "enter\n"); 2453 2454 QLNX_LOCK(ha); 2455 qlnx_init_locked(ha); 2456 QLNX_UNLOCK(ha); 2457 2458 QL_DPRINT2(ha, "exit\n"); 2459 2460 return; 2461 } 2462 2463 static int 2464 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac) 2465 { 2466 struct ecore_filter_mcast *mcast; 2467 struct ecore_dev *cdev; 2468 int rc; 2469 2470 cdev = &ha->cdev; 2471 2472 mcast = &ha->ecore_mcast; 2473 bzero(mcast, sizeof(struct ecore_filter_mcast)); 2474 2475 if (add_mac) 2476 mcast->opcode = ECORE_FILTER_ADD; 2477 else 2478 mcast->opcode = ECORE_FILTER_REMOVE; 2479 2480 mcast->num_mc_addrs = 1; 2481 memcpy(mcast->mac, mac_addr, ETH_ALEN); 2482 2483 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 2484 2485 return (rc); 2486 } 2487 2488 static int 2489 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta) 2490 { 2491 int i; 2492 2493 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2494 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) 2495 return 0; /* its been already added */ 2496 } 2497 2498 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2499 if ((ha->mcast[i].addr[0] == 0) && 2500 (ha->mcast[i].addr[1] == 0) && 2501 (ha->mcast[i].addr[2] == 0) && 2502 (ha->mcast[i].addr[3] == 0) && 2503 (ha->mcast[i].addr[4] == 0) && 2504 (ha->mcast[i].addr[5] == 0)) { 2505 if (qlnx_config_mcast_mac_addr(ha, mta, 1)) 2506 return (-1); 2507 2508 bcopy(mta, ha->mcast[i].addr, ETH_ALEN); 2509 ha->nmcast++; 2510 2511 return 0; 2512 } 2513 } 2514 return 0; 2515 } 2516 2517 static int 2518 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta) 2519 { 2520 int i; 2521 2522 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2523 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { 2524 if (qlnx_config_mcast_mac_addr(ha, mta, 0)) 2525 return (-1); 2526 2527 ha->mcast[i].addr[0] = 0; 2528 ha->mcast[i].addr[1] = 0; 2529 ha->mcast[i].addr[2] = 0; 2530 ha->mcast[i].addr[3] = 0; 2531 ha->mcast[i].addr[4] = 0; 2532 ha->mcast[i].addr[5] = 0; 2533 2534 ha->nmcast--; 2535 2536 return 0; 2537 } 2538 } 2539 return 0; 2540 } 2541 2542 /* 2543 * Name: qls_hw_set_multi 2544 * Function: Sets the Multicast Addresses provided the host O.S into the 2545 * hardware (for the given interface) 2546 */ 2547 static void 2548 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 2549 uint32_t add_mac) 2550 { 2551 int i; 2552 2553 for (i = 0; i < mcnt; i++) { 2554 if (add_mac) { 2555 if (qlnx_hw_add_mcast(ha, mta)) 2556 break; 2557 } else { 2558 if (qlnx_hw_del_mcast(ha, mta)) 2559 break; 2560 } 2561 2562 mta += ETHER_HDR_LEN; 2563 } 2564 return; 2565 } 2566 2567 static u_int 2568 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) 2569 { 2570 uint8_t *mta = arg; 2571 2572 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS) 2573 return (0); 2574 2575 bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN); 2576 2577 return (1); 2578 } 2579 2580 static int 2581 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi) 2582 { 2583 uint8_t mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN]; 2584 if_t ifp = ha->ifp; 2585 u_int mcnt; 2586 2587 if (qlnx_vf_device(ha) == 0) 2588 return (0); 2589 2590 mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta); 2591 2592 QLNX_LOCK(ha); 2593 qlnx_hw_set_multi(ha, mta, mcnt, add_multi); 2594 QLNX_UNLOCK(ha); 2595 2596 return (0); 2597 } 2598 2599 static int 2600 qlnx_set_promisc(qlnx_host_t *ha) 2601 { 2602 int rc = 0; 2603 uint8_t filter; 2604 2605 if (qlnx_vf_device(ha) == 0) 2606 return (0); 2607 2608 filter = ha->filter; 2609 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2610 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 2611 2612 rc = qlnx_set_rx_accept_filter(ha, filter); 2613 return (rc); 2614 } 2615 2616 static int 2617 qlnx_set_allmulti(qlnx_host_t *ha) 2618 { 2619 int rc = 0; 2620 uint8_t filter; 2621 2622 if (qlnx_vf_device(ha) == 0) 2623 return (0); 2624 2625 filter = ha->filter; 2626 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2627 rc = qlnx_set_rx_accept_filter(ha, filter); 2628 2629 return (rc); 2630 } 2631 2632 static int 2633 qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data) 2634 { 2635 int ret = 0, mask; 2636 struct ifreq *ifr = (struct ifreq *)data; 2637 #ifdef INET 2638 struct ifaddr *ifa = (struct ifaddr *)data; 2639 #endif 2640 qlnx_host_t *ha; 2641 2642 ha = (qlnx_host_t *)if_getsoftc(ifp); 2643 2644 switch (cmd) { 2645 case SIOCSIFADDR: 2646 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd); 2647 2648 #ifdef INET 2649 if (ifa->ifa_addr->sa_family == AF_INET) { 2650 if_setflagbits(ifp, IFF_UP, 0); 2651 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 2652 QLNX_LOCK(ha); 2653 qlnx_init_locked(ha); 2654 QLNX_UNLOCK(ha); 2655 } 2656 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 2657 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)); 2658 2659 arp_ifinit(ifp, ifa); 2660 break; 2661 } 2662 #endif 2663 ether_ioctl(ifp, cmd, data); 2664 break; 2665 2666 case SIOCSIFMTU: 2667 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd); 2668 2669 if (ifr->ifr_mtu > QLNX_MAX_MTU) { 2670 ret = EINVAL; 2671 } else { 2672 QLNX_LOCK(ha); 2673 if_setmtu(ifp, ifr->ifr_mtu); 2674 ha->max_frame_size = 2675 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; 2676 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2677 qlnx_init_locked(ha); 2678 } 2679 2680 QLNX_UNLOCK(ha); 2681 } 2682 2683 break; 2684 2685 case SIOCSIFFLAGS: 2686 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd); 2687 2688 QLNX_LOCK(ha); 2689 2690 if (if_getflags(ifp) & IFF_UP) { 2691 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2692 if ((if_getflags(ifp) ^ ha->if_flags) & 2693 IFF_PROMISC) { 2694 ret = qlnx_set_promisc(ha); 2695 } else if ((if_getflags(ifp) ^ ha->if_flags) & 2696 IFF_ALLMULTI) { 2697 ret = qlnx_set_allmulti(ha); 2698 } 2699 } else { 2700 ha->max_frame_size = if_getmtu(ifp) + 2701 ETHER_HDR_LEN + ETHER_CRC_LEN; 2702 qlnx_init_locked(ha); 2703 } 2704 } else { 2705 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 2706 qlnx_stop(ha); 2707 ha->if_flags = if_getflags(ifp); 2708 } 2709 2710 QLNX_UNLOCK(ha); 2711 break; 2712 2713 case SIOCADDMULTI: 2714 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd); 2715 2716 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2717 if (qlnx_set_multi(ha, 1)) 2718 ret = EINVAL; 2719 } 2720 break; 2721 2722 case SIOCDELMULTI: 2723 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd); 2724 2725 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2726 if (qlnx_set_multi(ha, 0)) 2727 ret = EINVAL; 2728 } 2729 break; 2730 2731 case SIOCSIFMEDIA: 2732 case SIOCGIFMEDIA: 2733 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd); 2734 2735 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 2736 break; 2737 2738 case SIOCSIFCAP: 2739 2740 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 2741 2742 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd); 2743 2744 if (mask & IFCAP_HWCSUM) 2745 if_togglecapenable(ifp, IFCAP_HWCSUM); 2746 if (mask & IFCAP_TSO4) 2747 if_togglecapenable(ifp, IFCAP_TSO4); 2748 if (mask & IFCAP_TSO6) 2749 if_togglecapenable(ifp, IFCAP_TSO6); 2750 if (mask & IFCAP_VLAN_HWTAGGING) 2751 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 2752 if (mask & IFCAP_VLAN_HWTSO) 2753 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 2754 if (mask & IFCAP_LRO) 2755 if_togglecapenable(ifp, IFCAP_LRO); 2756 2757 QLNX_LOCK(ha); 2758 2759 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 2760 qlnx_init_locked(ha); 2761 2762 QLNX_UNLOCK(ha); 2763 2764 VLAN_CAPABILITIES(ifp); 2765 break; 2766 2767 case SIOCGI2C: 2768 { 2769 struct ifi2creq i2c; 2770 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 2771 struct ecore_ptt *p_ptt; 2772 2773 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 2774 2775 if (ret) 2776 break; 2777 2778 if ((i2c.len > sizeof (i2c.data)) || 2779 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) { 2780 ret = EINVAL; 2781 break; 2782 } 2783 2784 p_ptt = ecore_ptt_acquire(p_hwfn); 2785 2786 if (!p_ptt) { 2787 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 2788 ret = -1; 2789 break; 2790 } 2791 2792 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt, 2793 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset, 2794 i2c.len, &i2c.data[0]); 2795 2796 ecore_ptt_release(p_hwfn, p_ptt); 2797 2798 if (ret) { 2799 ret = -1; 2800 break; 2801 } 2802 2803 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 2804 2805 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \ 2806 len = %d addr = 0x%02x offset = 0x%04x \ 2807 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \ 2808 0x%02x 0x%02x 0x%02x\n", 2809 ret, i2c.len, i2c.dev_addr, i2c.offset, 2810 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3], 2811 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]); 2812 break; 2813 } 2814 2815 default: 2816 QL_DPRINT4(ha, "default (0x%lx)\n", cmd); 2817 ret = ether_ioctl(ifp, cmd, data); 2818 break; 2819 } 2820 2821 return (ret); 2822 } 2823 2824 static int 2825 qlnx_media_change(if_t ifp) 2826 { 2827 qlnx_host_t *ha; 2828 struct ifmedia *ifm; 2829 int ret = 0; 2830 2831 ha = (qlnx_host_t *)if_getsoftc(ifp); 2832 2833 QL_DPRINT2(ha, "enter\n"); 2834 2835 ifm = &ha->media; 2836 2837 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2838 ret = EINVAL; 2839 2840 QL_DPRINT2(ha, "exit\n"); 2841 2842 return (ret); 2843 } 2844 2845 static void 2846 qlnx_media_status(if_t ifp, struct ifmediareq *ifmr) 2847 { 2848 qlnx_host_t *ha; 2849 2850 ha = (qlnx_host_t *)if_getsoftc(ifp); 2851 2852 QL_DPRINT2(ha, "enter\n"); 2853 2854 ifmr->ifm_status = IFM_AVALID; 2855 ifmr->ifm_active = IFM_ETHER; 2856 2857 if (ha->link_up) { 2858 ifmr->ifm_status |= IFM_ACTIVE; 2859 ifmr->ifm_active |= 2860 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link)); 2861 2862 if (ha->if_link.link_partner_caps & 2863 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause)) 2864 ifmr->ifm_active |= 2865 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 2866 } 2867 2868 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down")); 2869 2870 return; 2871 } 2872 2873 static void 2874 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2875 struct qlnx_tx_queue *txq) 2876 { 2877 u16 idx; 2878 struct mbuf *mp; 2879 bus_dmamap_t map; 2880 int i; 2881 // struct eth_tx_bd *tx_data_bd; 2882 struct eth_tx_1st_bd *first_bd; 2883 int nbds = 0; 2884 2885 idx = txq->sw_tx_cons; 2886 mp = txq->sw_tx_ring[idx].mp; 2887 map = txq->sw_tx_ring[idx].map; 2888 2889 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){ 2890 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL); 2891 2892 QL_DPRINT1(ha, "(mp == NULL) " 2893 " tx_idx = 0x%x" 2894 " ecore_prod_idx = 0x%x" 2895 " ecore_cons_idx = 0x%x" 2896 " hw_bd_cons = 0x%x" 2897 " txq_db_last = 0x%x" 2898 " elem_left = 0x%x\n", 2899 fp->rss_id, 2900 ecore_chain_get_prod_idx(&txq->tx_pbl), 2901 ecore_chain_get_cons_idx(&txq->tx_pbl), 2902 le16toh(*txq->hw_cons_ptr), 2903 txq->tx_db.raw, 2904 ecore_chain_get_elem_left(&txq->tx_pbl)); 2905 2906 fp->err_tx_free_pkt_null++; 2907 2908 //DEBUG 2909 qlnx_trigger_dump(ha); 2910 2911 return; 2912 } else { 2913 QLNX_INC_OPACKETS((ha->ifp)); 2914 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len)); 2915 2916 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE); 2917 bus_dmamap_unload(ha->tx_tag, map); 2918 2919 fp->tx_pkts_freed++; 2920 fp->tx_pkts_completed++; 2921 2922 m_freem(mp); 2923 } 2924 2925 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl); 2926 nbds = first_bd->data.nbds; 2927 2928 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0); 2929 2930 for (i = 1; i < nbds; i++) { 2931 /* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl); 2932 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0); 2933 } 2934 txq->sw_tx_ring[idx].flags = 0; 2935 txq->sw_tx_ring[idx].mp = NULL; 2936 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0; 2937 2938 return; 2939 } 2940 2941 static void 2942 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2943 struct qlnx_tx_queue *txq) 2944 { 2945 u16 hw_bd_cons; 2946 u16 ecore_cons_idx; 2947 uint16_t diff; 2948 uint16_t idx, idx2; 2949 2950 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 2951 2952 while (hw_bd_cons != 2953 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 2954 diff = hw_bd_cons - ecore_cons_idx; 2955 if ((diff > TX_RING_SIZE) || 2956 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){ 2957 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF); 2958 2959 QL_DPRINT1(ha, "(diff = 0x%x) " 2960 " tx_idx = 0x%x" 2961 " ecore_prod_idx = 0x%x" 2962 " ecore_cons_idx = 0x%x" 2963 " hw_bd_cons = 0x%x" 2964 " txq_db_last = 0x%x" 2965 " elem_left = 0x%x\n", 2966 diff, 2967 fp->rss_id, 2968 ecore_chain_get_prod_idx(&txq->tx_pbl), 2969 ecore_chain_get_cons_idx(&txq->tx_pbl), 2970 le16toh(*txq->hw_cons_ptr), 2971 txq->tx_db.raw, 2972 ecore_chain_get_elem_left(&txq->tx_pbl)); 2973 2974 fp->err_tx_cons_idx_conflict++; 2975 2976 //DEBUG 2977 qlnx_trigger_dump(ha); 2978 } 2979 2980 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 2981 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1); 2982 prefetch(txq->sw_tx_ring[idx].mp); 2983 prefetch(txq->sw_tx_ring[idx2].mp); 2984 2985 qlnx_free_tx_pkt(ha, fp, txq); 2986 2987 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 2988 } 2989 return; 2990 } 2991 2992 static int 2993 qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, struct mbuf *mp) 2994 { 2995 int ret = 0; 2996 struct qlnx_tx_queue *txq; 2997 qlnx_host_t * ha; 2998 uint16_t elem_left; 2999 3000 txq = fp->txq[0]; 3001 ha = (qlnx_host_t *)fp->edev; 3002 3003 if ((!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) || (!ha->link_up)) { 3004 if(mp != NULL) 3005 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3006 return (ret); 3007 } 3008 3009 if(mp != NULL) 3010 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3011 3012 mp = drbr_peek(ifp, fp->tx_br); 3013 3014 while (mp != NULL) { 3015 if (qlnx_send(ha, fp, &mp)) { 3016 if (mp != NULL) { 3017 drbr_putback(ifp, fp->tx_br, mp); 3018 } else { 3019 fp->tx_pkts_processed++; 3020 drbr_advance(ifp, fp->tx_br); 3021 } 3022 goto qlnx_transmit_locked_exit; 3023 3024 } else { 3025 drbr_advance(ifp, fp->tx_br); 3026 fp->tx_pkts_transmitted++; 3027 fp->tx_pkts_processed++; 3028 } 3029 3030 mp = drbr_peek(ifp, fp->tx_br); 3031 } 3032 3033 qlnx_transmit_locked_exit: 3034 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) || 3035 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) 3036 < QLNX_TX_ELEM_MAX_THRESH)) 3037 (void)qlnx_tx_int(ha, fp, fp->txq[0]); 3038 3039 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret); 3040 return ret; 3041 } 3042 3043 static int 3044 qlnx_transmit(if_t ifp, struct mbuf *mp) 3045 { 3046 qlnx_host_t *ha = (qlnx_host_t *)if_getsoftc(ifp); 3047 struct qlnx_fastpath *fp; 3048 int rss_id = 0, ret = 0; 3049 3050 #ifdef QLNX_TRACEPERF_DATA 3051 uint64_t tx_pkts = 0, tx_compl = 0; 3052 #endif 3053 3054 QL_DPRINT2(ha, "enter\n"); 3055 3056 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 3057 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) % 3058 ha->num_rss; 3059 3060 fp = &ha->fp_array[rss_id]; 3061 3062 if (fp->tx_br == NULL) { 3063 ret = EINVAL; 3064 goto qlnx_transmit_exit; 3065 } 3066 3067 if (mtx_trylock(&fp->tx_mtx)) { 3068 #ifdef QLNX_TRACEPERF_DATA 3069 tx_pkts = fp->tx_pkts_transmitted; 3070 tx_compl = fp->tx_pkts_completed; 3071 #endif 3072 3073 ret = qlnx_transmit_locked(ifp, fp, mp); 3074 3075 #ifdef QLNX_TRACEPERF_DATA 3076 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts); 3077 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl); 3078 #endif 3079 mtx_unlock(&fp->tx_mtx); 3080 } else { 3081 if (mp != NULL && (fp->fp_taskqueue != NULL)) { 3082 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3083 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 3084 } 3085 } 3086 3087 qlnx_transmit_exit: 3088 3089 QL_DPRINT2(ha, "exit ret = %d\n", ret); 3090 return ret; 3091 } 3092 3093 static void 3094 qlnx_qflush(if_t ifp) 3095 { 3096 int rss_id; 3097 struct qlnx_fastpath *fp; 3098 struct mbuf *mp; 3099 qlnx_host_t *ha; 3100 3101 ha = (qlnx_host_t *)if_getsoftc(ifp); 3102 3103 QL_DPRINT2(ha, "enter\n"); 3104 3105 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 3106 fp = &ha->fp_array[rss_id]; 3107 3108 if (fp == NULL) 3109 continue; 3110 3111 if (fp->tx_br) { 3112 mtx_lock(&fp->tx_mtx); 3113 3114 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 3115 fp->tx_pkts_freed++; 3116 m_freem(mp); 3117 } 3118 mtx_unlock(&fp->tx_mtx); 3119 } 3120 } 3121 QL_DPRINT2(ha, "exit\n"); 3122 3123 return; 3124 } 3125 3126 static void 3127 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value) 3128 { 3129 uint32_t offset; 3130 3131 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells); 3132 3133 bus_write_4(ha->pci_dbells, offset, value); 3134 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ); 3135 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ); 3136 3137 return; 3138 } 3139 3140 static uint32_t 3141 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp) 3142 { 3143 struct ether_vlan_header *eh = NULL; 3144 struct ip *ip = NULL; 3145 struct ip6_hdr *ip6 = NULL; 3146 struct tcphdr *th = NULL; 3147 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0; 3148 uint16_t etype = 0; 3149 uint8_t buf[sizeof(struct ip6_hdr)]; 3150 3151 eh = mtod(mp, struct ether_vlan_header *); 3152 3153 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 3154 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3155 etype = ntohs(eh->evl_proto); 3156 } else { 3157 ehdrlen = ETHER_HDR_LEN; 3158 etype = ntohs(eh->evl_encap_proto); 3159 } 3160 3161 switch (etype) { 3162 case ETHERTYPE_IP: 3163 ip = (struct ip *)(mp->m_data + ehdrlen); 3164 3165 ip_hlen = sizeof (struct ip); 3166 3167 if (mp->m_len < (ehdrlen + ip_hlen)) { 3168 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 3169 ip = (struct ip *)buf; 3170 } 3171 3172 th = (struct tcphdr *)(ip + 1); 3173 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3174 break; 3175 3176 case ETHERTYPE_IPV6: 3177 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 3178 3179 ip_hlen = sizeof(struct ip6_hdr); 3180 3181 if (mp->m_len < (ehdrlen + ip_hlen)) { 3182 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 3183 buf); 3184 ip6 = (struct ip6_hdr *)buf; 3185 } 3186 th = (struct tcphdr *)(ip6 + 1); 3187 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3188 break; 3189 3190 default: 3191 break; 3192 } 3193 3194 return (offset); 3195 } 3196 3197 static __inline int 3198 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs, 3199 uint32_t offset) 3200 { 3201 int i; 3202 uint32_t sum, nbds_in_hdr = 1; 3203 uint32_t window; 3204 bus_dma_segment_t *s_seg; 3205 3206 /* If the header spans multiple segments, skip those segments */ 3207 3208 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM) 3209 return (0); 3210 3211 i = 0; 3212 3213 while ((i < nsegs) && (offset >= segs->ds_len)) { 3214 offset = offset - segs->ds_len; 3215 segs++; 3216 i++; 3217 nbds_in_hdr++; 3218 } 3219 3220 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr; 3221 3222 nsegs = nsegs - i; 3223 3224 while (nsegs >= window) { 3225 sum = 0; 3226 s_seg = segs; 3227 3228 for (i = 0; i < window; i++){ 3229 sum += s_seg->ds_len; 3230 s_seg++; 3231 } 3232 3233 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) { 3234 fp->tx_lso_wnd_min_len++; 3235 return (-1); 3236 } 3237 3238 nsegs = nsegs - 1; 3239 segs++; 3240 } 3241 3242 return (0); 3243 } 3244 3245 static int 3246 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp) 3247 { 3248 bus_dma_segment_t *segs; 3249 bus_dmamap_t map = 0; 3250 uint32_t nsegs = 0; 3251 int ret = -1; 3252 struct mbuf *m_head = *m_headp; 3253 uint16_t idx = 0; 3254 uint16_t elem_left; 3255 3256 uint8_t nbd = 0; 3257 struct qlnx_tx_queue *txq; 3258 3259 struct eth_tx_1st_bd *first_bd; 3260 struct eth_tx_2nd_bd *second_bd; 3261 struct eth_tx_3rd_bd *third_bd; 3262 struct eth_tx_bd *tx_data_bd; 3263 3264 int seg_idx = 0; 3265 uint32_t nbds_in_hdr = 0; 3266 uint32_t offset = 0; 3267 3268 #ifdef QLNX_TRACE_PERF_DATA 3269 uint16_t bd_used; 3270 #endif 3271 3272 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id); 3273 3274 if (!ha->link_up) 3275 return (-1); 3276 3277 first_bd = NULL; 3278 second_bd = NULL; 3279 third_bd = NULL; 3280 tx_data_bd = NULL; 3281 3282 txq = fp->txq[0]; 3283 3284 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) < 3285 QLNX_TX_ELEM_MIN_THRESH) { 3286 fp->tx_nsegs_gt_elem_left++; 3287 fp->err_tx_nsegs_gt_elem_left++; 3288 3289 return (ENOBUFS); 3290 } 3291 3292 idx = txq->sw_tx_prod; 3293 3294 map = txq->sw_tx_ring[idx].map; 3295 segs = txq->segs; 3296 3297 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 3298 BUS_DMA_NOWAIT); 3299 3300 if (ha->dbg_trace_tso_pkt_len) { 3301 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3302 if (!fp->tx_tso_min_pkt_len) { 3303 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3304 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3305 } else { 3306 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len) 3307 fp->tx_tso_min_pkt_len = 3308 m_head->m_pkthdr.len; 3309 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len) 3310 fp->tx_tso_max_pkt_len = 3311 m_head->m_pkthdr.len; 3312 } 3313 } 3314 } 3315 3316 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3317 offset = qlnx_tcp_offset(ha, m_head); 3318 3319 if ((ret == EFBIG) || 3320 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && ( 3321 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) || 3322 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) && 3323 qlnx_tso_check(fp, segs, nsegs, offset))))) { 3324 struct mbuf *m; 3325 3326 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len); 3327 3328 fp->tx_defrag++; 3329 3330 m = m_defrag(m_head, M_NOWAIT); 3331 if (m == NULL) { 3332 fp->err_tx_defrag++; 3333 fp->tx_pkts_freed++; 3334 m_freem(m_head); 3335 *m_headp = NULL; 3336 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret); 3337 return (ENOBUFS); 3338 } 3339 3340 m_head = m; 3341 *m_headp = m_head; 3342 3343 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 3344 segs, &nsegs, BUS_DMA_NOWAIT))) { 3345 fp->err_tx_defrag_dmamap_load++; 3346 3347 QL_DPRINT1(ha, 3348 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n", 3349 ret, m_head->m_pkthdr.len); 3350 3351 fp->tx_pkts_freed++; 3352 m_freem(m_head); 3353 *m_headp = NULL; 3354 3355 return (ret); 3356 } 3357 3358 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && 3359 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) { 3360 fp->err_tx_non_tso_max_seg++; 3361 3362 QL_DPRINT1(ha, 3363 "(%d) nsegs too many for non-TSO [%d, %d]\n", 3364 ret, nsegs, m_head->m_pkthdr.len); 3365 3366 fp->tx_pkts_freed++; 3367 m_freem(m_head); 3368 *m_headp = NULL; 3369 3370 return (ret); 3371 } 3372 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3373 offset = qlnx_tcp_offset(ha, m_head); 3374 3375 } else if (ret) { 3376 fp->err_tx_dmamap_load++; 3377 3378 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n", 3379 ret, m_head->m_pkthdr.len); 3380 fp->tx_pkts_freed++; 3381 m_freem(m_head); 3382 *m_headp = NULL; 3383 return (ret); 3384 } 3385 3386 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet")); 3387 3388 if (ha->dbg_trace_tso_pkt_len) { 3389 if (nsegs < QLNX_FP_MAX_SEGS) 3390 fp->tx_pkts[(nsegs - 1)]++; 3391 else 3392 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; 3393 } 3394 3395 #ifdef QLNX_TRACE_PERF_DATA 3396 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3397 if(m_head->m_pkthdr.len <= 2048) 3398 fp->tx_pkts_hist[0]++; 3399 else if((m_head->m_pkthdr.len > 2048) && 3400 (m_head->m_pkthdr.len <= 4096)) 3401 fp->tx_pkts_hist[1]++; 3402 else if((m_head->m_pkthdr.len > 4096) && 3403 (m_head->m_pkthdr.len <= 8192)) 3404 fp->tx_pkts_hist[2]++; 3405 else if((m_head->m_pkthdr.len > 8192) && 3406 (m_head->m_pkthdr.len <= 12288 )) 3407 fp->tx_pkts_hist[3]++; 3408 else if((m_head->m_pkthdr.len > 11288) && 3409 (m_head->m_pkthdr.len <= 16394)) 3410 fp->tx_pkts_hist[4]++; 3411 else if((m_head->m_pkthdr.len > 16384) && 3412 (m_head->m_pkthdr.len <= 20480)) 3413 fp->tx_pkts_hist[5]++; 3414 else if((m_head->m_pkthdr.len > 20480) && 3415 (m_head->m_pkthdr.len <= 24576)) 3416 fp->tx_pkts_hist[6]++; 3417 else if((m_head->m_pkthdr.len > 24576) && 3418 (m_head->m_pkthdr.len <= 28672)) 3419 fp->tx_pkts_hist[7]++; 3420 else if((m_head->m_pkthdr.len > 28762) && 3421 (m_head->m_pkthdr.len <= 32768)) 3422 fp->tx_pkts_hist[8]++; 3423 else if((m_head->m_pkthdr.len > 32768) && 3424 (m_head->m_pkthdr.len <= 36864)) 3425 fp->tx_pkts_hist[9]++; 3426 else if((m_head->m_pkthdr.len > 36864) && 3427 (m_head->m_pkthdr.len <= 40960)) 3428 fp->tx_pkts_hist[10]++; 3429 else if((m_head->m_pkthdr.len > 40960) && 3430 (m_head->m_pkthdr.len <= 45056)) 3431 fp->tx_pkts_hist[11]++; 3432 else if((m_head->m_pkthdr.len > 45056) && 3433 (m_head->m_pkthdr.len <= 49152)) 3434 fp->tx_pkts_hist[12]++; 3435 else if((m_head->m_pkthdr.len > 49512) && 3436 m_head->m_pkthdr.len <= 53248)) 3437 fp->tx_pkts_hist[13]++; 3438 else if((m_head->m_pkthdr.len > 53248) && 3439 (m_head->m_pkthdr.len <= 57344)) 3440 fp->tx_pkts_hist[14]++; 3441 else if((m_head->m_pkthdr.len > 53248) && 3442 (m_head->m_pkthdr.len <= 57344)) 3443 fp->tx_pkts_hist[15]++; 3444 else if((m_head->m_pkthdr.len > 57344) && 3445 (m_head->m_pkthdr.len <= 61440)) 3446 fp->tx_pkts_hist[16]++; 3447 else 3448 fp->tx_pkts_hist[17]++; 3449 } 3450 3451 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3452 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl); 3453 bd_used = TX_RING_SIZE - elem_left; 3454 3455 if(bd_used <= 100) 3456 fp->tx_pkts_q[0]++; 3457 else if((bd_used > 100) && (bd_used <= 500)) 3458 fp->tx_pkts_q[1]++; 3459 else if((bd_used > 500) && (bd_used <= 1000)) 3460 fp->tx_pkts_q[2]++; 3461 else if((bd_used > 1000) && (bd_used <= 2000)) 3462 fp->tx_pkts_q[3]++; 3463 else if((bd_used > 3000) && (bd_used <= 4000)) 3464 fp->tx_pkts_q[4]++; 3465 else if((bd_used > 4000) && (bd_used <= 5000)) 3466 fp->tx_pkts_q[5]++; 3467 else if((bd_used > 6000) && (bd_used <= 7000)) 3468 fp->tx_pkts_q[6]++; 3469 else if((bd_used > 7000) && (bd_used <= 8000)) 3470 fp->tx_pkts_q[7]++; 3471 else if((bd_used > 8000) && (bd_used <= 9000)) 3472 fp->tx_pkts_q[8]++; 3473 else if((bd_used > 9000) && (bd_used <= 10000)) 3474 fp->tx_pkts_q[9]++; 3475 else if((bd_used > 10000) && (bd_used <= 11000)) 3476 fp->tx_pkts_q[10]++; 3477 else if((bd_used > 11000) && (bd_used <= 12000)) 3478 fp->tx_pkts_q[11]++; 3479 else if((bd_used > 12000) && (bd_used <= 13000)) 3480 fp->tx_pkts_q[12]++; 3481 else if((bd_used > 13000) && (bd_used <= 14000)) 3482 fp->tx_pkts_q[13]++; 3483 else if((bd_used > 14000) && (bd_used <= 15000)) 3484 fp->tx_pkts_q[14]++; 3485 else if((bd_used > 15000) && (bd_used <= 16000)) 3486 fp->tx_pkts_q[15]++; 3487 else 3488 fp->tx_pkts_q[16]++; 3489 } 3490 3491 #endif /* end of QLNX_TRACE_PERF_DATA */ 3492 3493 if ((nsegs + QLNX_TX_ELEM_RESERVE) > 3494 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) { 3495 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs" 3496 " in chain[%d] trying to free packets\n", 3497 nsegs, elem_left, fp->rss_id); 3498 3499 fp->tx_nsegs_gt_elem_left++; 3500 3501 (void)qlnx_tx_int(ha, fp, txq); 3502 3503 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left = 3504 ecore_chain_get_elem_left(&txq->tx_pbl))) { 3505 QL_DPRINT1(ha, 3506 "(%d, 0x%x) insuffient BDs in chain[%d]\n", 3507 nsegs, elem_left, fp->rss_id); 3508 3509 fp->err_tx_nsegs_gt_elem_left++; 3510 fp->tx_ring_full = 1; 3511 if (ha->storm_stats_enable) 3512 ha->storm_stats_gather = 1; 3513 return (ENOBUFS); 3514 } 3515 } 3516 3517 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 3518 3519 txq->sw_tx_ring[idx].mp = m_head; 3520 3521 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); 3522 3523 memset(first_bd, 0, sizeof(*first_bd)); 3524 3525 first_bd->data.bd_flags.bitfields = 3526 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 3527 3528 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len); 3529 3530 nbd++; 3531 3532 if (m_head->m_pkthdr.csum_flags & CSUM_IP) { 3533 first_bd->data.bd_flags.bitfields |= 3534 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3535 } 3536 3537 if (m_head->m_pkthdr.csum_flags & 3538 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) { 3539 first_bd->data.bd_flags.bitfields |= 3540 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT); 3541 } 3542 3543 if (m_head->m_flags & M_VLANTAG) { 3544 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag; 3545 first_bd->data.bd_flags.bitfields |= 3546 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT); 3547 } 3548 3549 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3550 first_bd->data.bd_flags.bitfields |= 3551 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); 3552 first_bd->data.bd_flags.bitfields |= 3553 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3554 3555 nbds_in_hdr = 1; 3556 3557 if (offset == segs->ds_len) { 3558 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3559 segs++; 3560 seg_idx++; 3561 3562 second_bd = (struct eth_tx_2nd_bd *) 3563 ecore_chain_produce(&txq->tx_pbl); 3564 memset(second_bd, 0, sizeof(*second_bd)); 3565 nbd++; 3566 3567 if (seg_idx < nsegs) { 3568 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3569 (segs->ds_addr), (segs->ds_len)); 3570 segs++; 3571 seg_idx++; 3572 } 3573 3574 third_bd = (struct eth_tx_3rd_bd *) 3575 ecore_chain_produce(&txq->tx_pbl); 3576 memset(third_bd, 0, sizeof(*third_bd)); 3577 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3578 third_bd->data.bitfields |= 3579 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3580 nbd++; 3581 3582 if (seg_idx < nsegs) { 3583 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3584 (segs->ds_addr), (segs->ds_len)); 3585 segs++; 3586 seg_idx++; 3587 } 3588 3589 for (; seg_idx < nsegs; seg_idx++) { 3590 tx_data_bd = (struct eth_tx_bd *) 3591 ecore_chain_produce(&txq->tx_pbl); 3592 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3593 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3594 segs->ds_addr,\ 3595 segs->ds_len); 3596 segs++; 3597 nbd++; 3598 } 3599 3600 } else if (offset < segs->ds_len) { 3601 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3602 3603 second_bd = (struct eth_tx_2nd_bd *) 3604 ecore_chain_produce(&txq->tx_pbl); 3605 memset(second_bd, 0, sizeof(*second_bd)); 3606 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3607 (segs->ds_addr + offset),\ 3608 (segs->ds_len - offset)); 3609 nbd++; 3610 segs++; 3611 3612 third_bd = (struct eth_tx_3rd_bd *) 3613 ecore_chain_produce(&txq->tx_pbl); 3614 memset(third_bd, 0, sizeof(*third_bd)); 3615 3616 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3617 segs->ds_addr,\ 3618 segs->ds_len); 3619 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3620 third_bd->data.bitfields |= 3621 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3622 segs++; 3623 nbd++; 3624 3625 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) { 3626 tx_data_bd = (struct eth_tx_bd *) 3627 ecore_chain_produce(&txq->tx_pbl); 3628 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3629 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3630 segs->ds_addr,\ 3631 segs->ds_len); 3632 segs++; 3633 nbd++; 3634 } 3635 3636 } else { 3637 offset = offset - segs->ds_len; 3638 segs++; 3639 3640 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3641 if (offset) 3642 nbds_in_hdr++; 3643 3644 tx_data_bd = (struct eth_tx_bd *) 3645 ecore_chain_produce(&txq->tx_pbl); 3646 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3647 3648 if (second_bd == NULL) { 3649 second_bd = (struct eth_tx_2nd_bd *) 3650 tx_data_bd; 3651 } else if (third_bd == NULL) { 3652 third_bd = (struct eth_tx_3rd_bd *) 3653 tx_data_bd; 3654 } 3655 3656 if (offset && (offset < segs->ds_len)) { 3657 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3658 segs->ds_addr, offset); 3659 3660 tx_data_bd = (struct eth_tx_bd *) 3661 ecore_chain_produce(&txq->tx_pbl); 3662 3663 memset(tx_data_bd, 0, 3664 sizeof(*tx_data_bd)); 3665 3666 if (second_bd == NULL) { 3667 second_bd = 3668 (struct eth_tx_2nd_bd *)tx_data_bd; 3669 } else if (third_bd == NULL) { 3670 third_bd = 3671 (struct eth_tx_3rd_bd *)tx_data_bd; 3672 } 3673 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3674 (segs->ds_addr + offset), \ 3675 (segs->ds_len - offset)); 3676 nbd++; 3677 offset = 0; 3678 } else { 3679 if (offset) 3680 offset = offset - segs->ds_len; 3681 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3682 segs->ds_addr, segs->ds_len); 3683 } 3684 segs++; 3685 nbd++; 3686 } 3687 3688 if (third_bd == NULL) { 3689 third_bd = (struct eth_tx_3rd_bd *) 3690 ecore_chain_produce(&txq->tx_pbl); 3691 memset(third_bd, 0, sizeof(*third_bd)); 3692 } 3693 3694 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3695 third_bd->data.bitfields |= 3696 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3697 } 3698 fp->tx_tso_pkts++; 3699 } else { 3700 segs++; 3701 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3702 tx_data_bd = (struct eth_tx_bd *) 3703 ecore_chain_produce(&txq->tx_pbl); 3704 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3705 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\ 3706 segs->ds_len); 3707 segs++; 3708 nbd++; 3709 } 3710 first_bd->data.bitfields = 3711 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) 3712 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 3713 first_bd->data.bitfields = 3714 htole16(first_bd->data.bitfields); 3715 fp->tx_non_tso_pkts++; 3716 } 3717 3718 first_bd->data.nbds = nbd; 3719 3720 if (ha->dbg_trace_tso_pkt_len) { 3721 if (fp->tx_tso_max_nsegs < nsegs) 3722 fp->tx_tso_max_nsegs = nsegs; 3723 3724 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs)) 3725 fp->tx_tso_min_nsegs = nsegs; 3726 } 3727 3728 txq->sw_tx_ring[idx].nsegs = nsegs; 3729 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1); 3730 3731 txq->tx_db.data.bd_prod = 3732 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl)); 3733 3734 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw); 3735 3736 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id); 3737 return (0); 3738 } 3739 3740 static void 3741 qlnx_stop(qlnx_host_t *ha) 3742 { 3743 if_t ifp = ha->ifp; 3744 int i; 3745 3746 if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING)); 3747 3748 /* 3749 * We simply lock and unlock each fp->tx_mtx to 3750 * propagate the if_drv_flags 3751 * state to each tx thread 3752 */ 3753 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state); 3754 3755 if (ha->state == QLNX_STATE_OPEN) { 3756 for (i = 0; i < ha->num_rss; i++) { 3757 struct qlnx_fastpath *fp = &ha->fp_array[i]; 3758 3759 mtx_lock(&fp->tx_mtx); 3760 mtx_unlock(&fp->tx_mtx); 3761 3762 if (fp->fp_taskqueue != NULL) 3763 taskqueue_enqueue(fp->fp_taskqueue, 3764 &fp->fp_task); 3765 } 3766 } 3767 #ifdef QLNX_ENABLE_IWARP 3768 if (qlnx_vf_device(ha) != 0) { 3769 qlnx_rdma_dev_close(ha); 3770 } 3771 #endif /* #ifdef QLNX_ENABLE_IWARP */ 3772 3773 qlnx_unload(ha); 3774 3775 return; 3776 } 3777 3778 static int 3779 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha) 3780 { 3781 return(TX_RING_SIZE - 1); 3782 } 3783 3784 uint8_t * 3785 qlnx_get_mac_addr(qlnx_host_t *ha) 3786 { 3787 struct ecore_hwfn *p_hwfn; 3788 unsigned char mac[ETHER_ADDR_LEN]; 3789 uint8_t p_is_forced; 3790 3791 p_hwfn = &ha->cdev.hwfns[0]; 3792 3793 if (qlnx_vf_device(ha) != 0) 3794 return (p_hwfn->hw_info.hw_mac_addr); 3795 3796 ecore_vf_read_bulletin(p_hwfn, &p_is_forced); 3797 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) == 3798 true) { 3799 device_printf(ha->pci_dev, "%s: p_is_forced = %d" 3800 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, 3801 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3802 memcpy(ha->primary_mac, mac, ETH_ALEN); 3803 } 3804 3805 return (ha->primary_mac); 3806 } 3807 3808 static uint32_t 3809 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link) 3810 { 3811 uint32_t ifm_type = 0; 3812 3813 switch (if_link->media_type) { 3814 case MEDIA_MODULE_FIBER: 3815 case MEDIA_UNSPECIFIED: 3816 if (if_link->speed == (100 * 1000)) 3817 ifm_type = QLNX_IFM_100G_SR4; 3818 else if (if_link->speed == (40 * 1000)) 3819 ifm_type = IFM_40G_SR4; 3820 else if (if_link->speed == (25 * 1000)) 3821 ifm_type = QLNX_IFM_25G_SR; 3822 else if (if_link->speed == (10 * 1000)) 3823 ifm_type = (IFM_10G_LR | IFM_10G_SR); 3824 else if (if_link->speed == (1 * 1000)) 3825 ifm_type = (IFM_1000_SX | IFM_1000_LX); 3826 3827 break; 3828 3829 case MEDIA_DA_TWINAX: 3830 if (if_link->speed == (100 * 1000)) 3831 ifm_type = QLNX_IFM_100G_CR4; 3832 else if (if_link->speed == (40 * 1000)) 3833 ifm_type = IFM_40G_CR4; 3834 else if (if_link->speed == (25 * 1000)) 3835 ifm_type = QLNX_IFM_25G_CR; 3836 else if (if_link->speed == (10 * 1000)) 3837 ifm_type = IFM_10G_TWINAX; 3838 3839 break; 3840 3841 default : 3842 ifm_type = IFM_UNKNOWN; 3843 break; 3844 } 3845 return (ifm_type); 3846 } 3847 3848 /***************************************************************************** 3849 * Interrupt Service Functions 3850 *****************************************************************************/ 3851 3852 static int 3853 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3854 struct mbuf *mp_head, uint16_t len) 3855 { 3856 struct mbuf *mp, *mpf, *mpl; 3857 struct sw_rx_data *sw_rx_data; 3858 struct qlnx_rx_queue *rxq; 3859 uint16_t len_in_buffer; 3860 3861 rxq = fp->rxq; 3862 mpf = mpl = mp = NULL; 3863 3864 while (len) { 3865 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3866 3867 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3868 mp = sw_rx_data->data; 3869 3870 if (mp == NULL) { 3871 QL_DPRINT1(ha, "mp = NULL\n"); 3872 fp->err_rx_mp_null++; 3873 rxq->sw_rx_cons = 3874 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3875 3876 if (mpf != NULL) 3877 m_freem(mpf); 3878 3879 return (-1); 3880 } 3881 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3882 BUS_DMASYNC_POSTREAD); 3883 3884 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3885 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 3886 " incoming packet and reusing its buffer\n"); 3887 3888 qlnx_reuse_rx_data(rxq); 3889 fp->err_rx_alloc_errors++; 3890 3891 if (mpf != NULL) 3892 m_freem(mpf); 3893 3894 return (-1); 3895 } 3896 ecore_chain_consume(&rxq->rx_bd_ring); 3897 3898 if (len > rxq->rx_buf_size) 3899 len_in_buffer = rxq->rx_buf_size; 3900 else 3901 len_in_buffer = len; 3902 3903 len = len - len_in_buffer; 3904 3905 mp->m_flags &= ~M_PKTHDR; 3906 mp->m_next = NULL; 3907 mp->m_len = len_in_buffer; 3908 3909 if (mpf == NULL) 3910 mpf = mpl = mp; 3911 else { 3912 mpl->m_next = mp; 3913 mpl = mp; 3914 } 3915 } 3916 3917 if (mpf != NULL) 3918 mp_head->m_next = mpf; 3919 3920 return (0); 3921 } 3922 3923 static void 3924 qlnx_tpa_start(qlnx_host_t *ha, 3925 struct qlnx_fastpath *fp, 3926 struct qlnx_rx_queue *rxq, 3927 struct eth_fast_path_rx_tpa_start_cqe *cqe) 3928 { 3929 uint32_t agg_index; 3930 if_t ifp = ha->ifp; 3931 struct mbuf *mp; 3932 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3933 struct sw_rx_data *sw_rx_data; 3934 dma_addr_t addr; 3935 bus_dmamap_t map; 3936 struct eth_rx_bd *rx_bd; 3937 int i; 3938 uint8_t hash_type; 3939 3940 agg_index = cqe->tpa_agg_index; 3941 3942 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \ 3943 \t type = 0x%x\n \ 3944 \t bitfields = 0x%x\n \ 3945 \t seg_len = 0x%x\n \ 3946 \t pars_flags = 0x%x\n \ 3947 \t vlan_tag = 0x%x\n \ 3948 \t rss_hash = 0x%x\n \ 3949 \t len_on_first_bd = 0x%x\n \ 3950 \t placement_offset = 0x%x\n \ 3951 \t tpa_agg_index = 0x%x\n \ 3952 \t header_len = 0x%x\n \ 3953 \t ext_bd_len_list[0] = 0x%x\n \ 3954 \t ext_bd_len_list[1] = 0x%x\n \ 3955 \t ext_bd_len_list[2] = 0x%x\n \ 3956 \t ext_bd_len_list[3] = 0x%x\n \ 3957 \t ext_bd_len_list[4] = 0x%x\n", 3958 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len, 3959 cqe->pars_flags.flags, cqe->vlan_tag, 3960 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset, 3961 cqe->tpa_agg_index, cqe->header_len, 3962 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1], 3963 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3], 3964 cqe->ext_bd_len_list[4]); 3965 3966 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3967 fp->err_rx_tpa_invalid_agg_num++; 3968 return; 3969 } 3970 3971 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3972 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); 3973 mp = sw_rx_data->data; 3974 3975 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp); 3976 3977 if (mp == NULL) { 3978 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id); 3979 fp->err_rx_mp_null++; 3980 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3981 3982 return; 3983 } 3984 3985 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) { 3986 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error," 3987 " flags = %x, dropping incoming packet\n", fp->rss_id, 3988 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags)); 3989 3990 fp->err_rx_hw_errors++; 3991 3992 qlnx_reuse_rx_data(rxq); 3993 3994 QLNX_INC_IERRORS(ifp); 3995 3996 return; 3997 } 3998 3999 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4000 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4001 " dropping incoming packet and reusing its buffer\n", 4002 fp->rss_id); 4003 4004 fp->err_rx_alloc_errors++; 4005 QLNX_INC_IQDROPS(ifp); 4006 4007 /* 4008 * Load the tpa mbuf into the rx ring and save the 4009 * posted mbuf 4010 */ 4011 4012 map = sw_rx_data->map; 4013 addr = sw_rx_data->dma_addr; 4014 4015 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 4016 4017 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data; 4018 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr; 4019 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map; 4020 4021 rxq->tpa_info[agg_index].rx_buf.data = mp; 4022 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr; 4023 rxq->tpa_info[agg_index].rx_buf.map = map; 4024 4025 rx_bd = (struct eth_rx_bd *) 4026 ecore_chain_produce(&rxq->rx_bd_ring); 4027 4028 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr)); 4029 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr)); 4030 4031 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4032 BUS_DMASYNC_PREREAD); 4033 4034 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 4035 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4036 4037 ecore_chain_consume(&rxq->rx_bd_ring); 4038 4039 /* Now reuse any buffers posted in ext_bd_len_list */ 4040 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4041 if (cqe->ext_bd_len_list[i] == 0) 4042 break; 4043 4044 qlnx_reuse_rx_data(rxq); 4045 } 4046 4047 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4048 return; 4049 } 4050 4051 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4052 QL_DPRINT7(ha, "[%d]: invalid aggregation state," 4053 " dropping incoming packet and reusing its buffer\n", 4054 fp->rss_id); 4055 4056 QLNX_INC_IQDROPS(ifp); 4057 4058 /* if we already have mbuf head in aggregation free it */ 4059 if (rxq->tpa_info[agg_index].mpf) { 4060 m_freem(rxq->tpa_info[agg_index].mpf); 4061 rxq->tpa_info[agg_index].mpl = NULL; 4062 } 4063 rxq->tpa_info[agg_index].mpf = mp; 4064 rxq->tpa_info[agg_index].mpl = NULL; 4065 4066 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4067 ecore_chain_consume(&rxq->rx_bd_ring); 4068 4069 /* Now reuse any buffers posted in ext_bd_len_list */ 4070 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4071 if (cqe->ext_bd_len_list[i] == 0) 4072 break; 4073 4074 qlnx_reuse_rx_data(rxq); 4075 } 4076 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4077 4078 return; 4079 } 4080 4081 /* 4082 * first process the ext_bd_len_list 4083 * if this fails then we simply drop the packet 4084 */ 4085 ecore_chain_consume(&rxq->rx_bd_ring); 4086 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4087 4088 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4089 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id); 4090 4091 if (cqe->ext_bd_len_list[i] == 0) 4092 break; 4093 4094 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4095 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4096 BUS_DMASYNC_POSTREAD); 4097 4098 mpc = sw_rx_data->data; 4099 4100 if (mpc == NULL) { 4101 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4102 fp->err_rx_mp_null++; 4103 if (mpf != NULL) 4104 m_freem(mpf); 4105 mpf = mpl = NULL; 4106 rxq->tpa_info[agg_index].agg_state = 4107 QLNX_AGG_STATE_ERROR; 4108 ecore_chain_consume(&rxq->rx_bd_ring); 4109 rxq->sw_rx_cons = 4110 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4111 continue; 4112 } 4113 4114 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4115 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4116 " dropping incoming packet and reusing its" 4117 " buffer\n", fp->rss_id); 4118 4119 qlnx_reuse_rx_data(rxq); 4120 4121 if (mpf != NULL) 4122 m_freem(mpf); 4123 mpf = mpl = NULL; 4124 4125 rxq->tpa_info[agg_index].agg_state = 4126 QLNX_AGG_STATE_ERROR; 4127 4128 ecore_chain_consume(&rxq->rx_bd_ring); 4129 rxq->sw_rx_cons = 4130 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4131 4132 continue; 4133 } 4134 4135 mpc->m_flags &= ~M_PKTHDR; 4136 mpc->m_next = NULL; 4137 mpc->m_len = cqe->ext_bd_len_list[i]; 4138 4139 if (mpf == NULL) { 4140 mpf = mpl = mpc; 4141 } else { 4142 mpl->m_len = ha->rx_buf_size; 4143 mpl->m_next = mpc; 4144 mpl = mpc; 4145 } 4146 4147 ecore_chain_consume(&rxq->rx_bd_ring); 4148 rxq->sw_rx_cons = 4149 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4150 } 4151 4152 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4153 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping" 4154 " incoming packet and reusing its buffer\n", 4155 fp->rss_id); 4156 4157 QLNX_INC_IQDROPS(ifp); 4158 4159 rxq->tpa_info[agg_index].mpf = mp; 4160 rxq->tpa_info[agg_index].mpl = NULL; 4161 4162 return; 4163 } 4164 4165 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset; 4166 4167 if (mpf != NULL) { 4168 mp->m_len = ha->rx_buf_size; 4169 mp->m_next = mpf; 4170 rxq->tpa_info[agg_index].mpf = mp; 4171 rxq->tpa_info[agg_index].mpl = mpl; 4172 } else { 4173 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset; 4174 rxq->tpa_info[agg_index].mpf = mp; 4175 rxq->tpa_info[agg_index].mpl = mp; 4176 mp->m_next = NULL; 4177 } 4178 4179 mp->m_flags |= M_PKTHDR; 4180 4181 /* assign packet to this interface interface */ 4182 mp->m_pkthdr.rcvif = ifp; 4183 4184 /* assume no hardware checksum has complated */ 4185 mp->m_pkthdr.csum_flags = 0; 4186 4187 //mp->m_pkthdr.flowid = fp->rss_id; 4188 mp->m_pkthdr.flowid = cqe->rss_hash; 4189 4190 hash_type = cqe->bitfields & 4191 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4192 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4193 4194 switch (hash_type) { 4195 case RSS_HASH_TYPE_IPV4: 4196 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4197 break; 4198 4199 case RSS_HASH_TYPE_TCP_IPV4: 4200 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4201 break; 4202 4203 case RSS_HASH_TYPE_IPV6: 4204 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4205 break; 4206 4207 case RSS_HASH_TYPE_TCP_IPV6: 4208 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4209 break; 4210 4211 default: 4212 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4213 break; 4214 } 4215 4216 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | 4217 CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4218 4219 mp->m_pkthdr.csum_data = 0xFFFF; 4220 4221 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) { 4222 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag); 4223 mp->m_flags |= M_VLANTAG; 4224 } 4225 4226 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START; 4227 4228 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n", 4229 fp->rss_id, rxq->tpa_info[agg_index].agg_state, 4230 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl); 4231 4232 return; 4233 } 4234 4235 static void 4236 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4237 struct qlnx_rx_queue *rxq, 4238 struct eth_fast_path_rx_tpa_cont_cqe *cqe) 4239 { 4240 struct sw_rx_data *sw_rx_data; 4241 int i; 4242 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4243 struct mbuf *mp; 4244 uint32_t agg_index; 4245 4246 QL_DPRINT7(ha, "[%d]: enter\n \ 4247 \t type = 0x%x\n \ 4248 \t tpa_agg_index = 0x%x\n \ 4249 \t len_list[0] = 0x%x\n \ 4250 \t len_list[1] = 0x%x\n \ 4251 \t len_list[2] = 0x%x\n \ 4252 \t len_list[3] = 0x%x\n \ 4253 \t len_list[4] = 0x%x\n \ 4254 \t len_list[5] = 0x%x\n", 4255 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4256 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4257 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]); 4258 4259 agg_index = cqe->tpa_agg_index; 4260 4261 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4262 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4263 fp->err_rx_tpa_invalid_agg_num++; 4264 return; 4265 } 4266 4267 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) { 4268 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4269 4270 if (cqe->len_list[i] == 0) 4271 break; 4272 4273 if (rxq->tpa_info[agg_index].agg_state != 4274 QLNX_AGG_STATE_START) { 4275 qlnx_reuse_rx_data(rxq); 4276 continue; 4277 } 4278 4279 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4280 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4281 BUS_DMASYNC_POSTREAD); 4282 4283 mpc = sw_rx_data->data; 4284 4285 if (mpc == NULL) { 4286 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4287 4288 fp->err_rx_mp_null++; 4289 if (mpf != NULL) 4290 m_freem(mpf); 4291 mpf = mpl = NULL; 4292 rxq->tpa_info[agg_index].agg_state = 4293 QLNX_AGG_STATE_ERROR; 4294 ecore_chain_consume(&rxq->rx_bd_ring); 4295 rxq->sw_rx_cons = 4296 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4297 continue; 4298 } 4299 4300 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4301 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4302 " dropping incoming packet and reusing its" 4303 " buffer\n", fp->rss_id); 4304 4305 qlnx_reuse_rx_data(rxq); 4306 4307 if (mpf != NULL) 4308 m_freem(mpf); 4309 mpf = mpl = NULL; 4310 4311 rxq->tpa_info[agg_index].agg_state = 4312 QLNX_AGG_STATE_ERROR; 4313 4314 ecore_chain_consume(&rxq->rx_bd_ring); 4315 rxq->sw_rx_cons = 4316 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4317 4318 continue; 4319 } 4320 4321 mpc->m_flags &= ~M_PKTHDR; 4322 mpc->m_next = NULL; 4323 mpc->m_len = cqe->len_list[i]; 4324 4325 if (mpf == NULL) { 4326 mpf = mpl = mpc; 4327 } else { 4328 mpl->m_len = ha->rx_buf_size; 4329 mpl->m_next = mpc; 4330 mpl = mpc; 4331 } 4332 4333 ecore_chain_consume(&rxq->rx_bd_ring); 4334 rxq->sw_rx_cons = 4335 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4336 } 4337 4338 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n", 4339 fp->rss_id, mpf, mpl); 4340 4341 if (mpf != NULL) { 4342 mp = rxq->tpa_info[agg_index].mpl; 4343 mp->m_len = ha->rx_buf_size; 4344 mp->m_next = mpf; 4345 rxq->tpa_info[agg_index].mpl = mpl; 4346 } 4347 4348 return; 4349 } 4350 4351 static int 4352 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4353 struct qlnx_rx_queue *rxq, 4354 struct eth_fast_path_rx_tpa_end_cqe *cqe) 4355 { 4356 struct sw_rx_data *sw_rx_data; 4357 int i; 4358 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4359 struct mbuf *mp; 4360 uint32_t agg_index; 4361 uint32_t len = 0; 4362 if_t ifp = ha->ifp; 4363 4364 QL_DPRINT7(ha, "[%d]: enter\n \ 4365 \t type = 0x%x\n \ 4366 \t tpa_agg_index = 0x%x\n \ 4367 \t total_packet_len = 0x%x\n \ 4368 \t num_of_bds = 0x%x\n \ 4369 \t end_reason = 0x%x\n \ 4370 \t num_of_coalesced_segs = 0x%x\n \ 4371 \t ts_delta = 0x%x\n \ 4372 \t len_list[0] = 0x%x\n \ 4373 \t len_list[1] = 0x%x\n \ 4374 \t len_list[2] = 0x%x\n \ 4375 \t len_list[3] = 0x%x\n", 4376 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4377 cqe->total_packet_len, cqe->num_of_bds, 4378 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta, 4379 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4380 cqe->len_list[3]); 4381 4382 agg_index = cqe->tpa_agg_index; 4383 4384 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4385 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4386 4387 fp->err_rx_tpa_invalid_agg_num++; 4388 return (0); 4389 } 4390 4391 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) { 4392 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4393 4394 if (cqe->len_list[i] == 0) 4395 break; 4396 4397 if (rxq->tpa_info[agg_index].agg_state != 4398 QLNX_AGG_STATE_START) { 4399 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id); 4400 4401 qlnx_reuse_rx_data(rxq); 4402 continue; 4403 } 4404 4405 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4406 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4407 BUS_DMASYNC_POSTREAD); 4408 4409 mpc = sw_rx_data->data; 4410 4411 if (mpc == NULL) { 4412 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4413 4414 fp->err_rx_mp_null++; 4415 if (mpf != NULL) 4416 m_freem(mpf); 4417 mpf = mpl = NULL; 4418 rxq->tpa_info[agg_index].agg_state = 4419 QLNX_AGG_STATE_ERROR; 4420 ecore_chain_consume(&rxq->rx_bd_ring); 4421 rxq->sw_rx_cons = 4422 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4423 continue; 4424 } 4425 4426 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4427 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4428 " dropping incoming packet and reusing its" 4429 " buffer\n", fp->rss_id); 4430 4431 qlnx_reuse_rx_data(rxq); 4432 4433 if (mpf != NULL) 4434 m_freem(mpf); 4435 mpf = mpl = NULL; 4436 4437 rxq->tpa_info[agg_index].agg_state = 4438 QLNX_AGG_STATE_ERROR; 4439 4440 ecore_chain_consume(&rxq->rx_bd_ring); 4441 rxq->sw_rx_cons = 4442 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4443 4444 continue; 4445 } 4446 4447 mpc->m_flags &= ~M_PKTHDR; 4448 mpc->m_next = NULL; 4449 mpc->m_len = cqe->len_list[i]; 4450 4451 if (mpf == NULL) { 4452 mpf = mpl = mpc; 4453 } else { 4454 mpl->m_len = ha->rx_buf_size; 4455 mpl->m_next = mpc; 4456 mpl = mpc; 4457 } 4458 4459 ecore_chain_consume(&rxq->rx_bd_ring); 4460 rxq->sw_rx_cons = 4461 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4462 } 4463 4464 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id); 4465 4466 if (mpf != NULL) { 4467 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id); 4468 4469 mp = rxq->tpa_info[agg_index].mpl; 4470 mp->m_len = ha->rx_buf_size; 4471 mp->m_next = mpf; 4472 } 4473 4474 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { 4475 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id); 4476 4477 if (rxq->tpa_info[agg_index].mpf != NULL) 4478 m_freem(rxq->tpa_info[agg_index].mpf); 4479 rxq->tpa_info[agg_index].mpf = NULL; 4480 rxq->tpa_info[agg_index].mpl = NULL; 4481 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4482 return (0); 4483 } 4484 4485 mp = rxq->tpa_info[agg_index].mpf; 4486 m_adj(mp, rxq->tpa_info[agg_index].placement_offset); 4487 mp->m_pkthdr.len = cqe->total_packet_len; 4488 4489 if (mp->m_next == NULL) 4490 mp->m_len = mp->m_pkthdr.len; 4491 else { 4492 /* compute the total packet length */ 4493 mpf = mp; 4494 while (mpf != NULL) { 4495 len += mpf->m_len; 4496 mpf = mpf->m_next; 4497 } 4498 4499 if (cqe->total_packet_len > len) { 4500 mpl = rxq->tpa_info[agg_index].mpl; 4501 mpl->m_len += (cqe->total_packet_len - len); 4502 } 4503 } 4504 4505 QLNX_INC_IPACKETS(ifp); 4506 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len)); 4507 4508 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \ 4509 m_len = 0x%x m_pkthdr_len = 0x%x\n", 4510 fp->rss_id, mp->m_pkthdr.csum_data, 4511 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len); 4512 4513 if_input(ifp, mp); 4514 4515 rxq->tpa_info[agg_index].mpf = NULL; 4516 rxq->tpa_info[agg_index].mpl = NULL; 4517 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4518 4519 return (cqe->num_of_coalesced_segs); 4520 } 4521 4522 static int 4523 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 4524 int lro_enable) 4525 { 4526 uint16_t hw_comp_cons, sw_comp_cons; 4527 int rx_pkt = 0; 4528 struct qlnx_rx_queue *rxq = fp->rxq; 4529 if_t ifp = ha->ifp; 4530 struct ecore_dev *cdev = &ha->cdev; 4531 struct ecore_hwfn *p_hwfn; 4532 4533 #ifdef QLNX_SOFT_LRO 4534 struct lro_ctrl *lro; 4535 4536 lro = &rxq->lro; 4537 #endif /* #ifdef QLNX_SOFT_LRO */ 4538 4539 hw_comp_cons = le16toh(*rxq->hw_cons_ptr); 4540 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4541 4542 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)]; 4543 4544 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 4545 * / BD in the while-loop before reading hw_comp_cons. If the CQE is 4546 * read before it is written by FW, then FW writes CQE and SB, and then 4547 * the CPU reads the hw_comp_cons, it will use an old CQE. 4548 */ 4549 4550 /* Loop to complete all indicated BDs */ 4551 while (sw_comp_cons != hw_comp_cons) { 4552 union eth_rx_cqe *cqe; 4553 struct eth_fast_path_rx_reg_cqe *fp_cqe; 4554 struct sw_rx_data *sw_rx_data; 4555 register struct mbuf *mp; 4556 enum eth_rx_cqe_type cqe_type; 4557 uint16_t len, pad, len_on_first_bd; 4558 uint8_t *data; 4559 uint8_t hash_type; 4560 4561 /* Get the CQE from the completion ring */ 4562 cqe = (union eth_rx_cqe *) 4563 ecore_chain_consume(&rxq->rx_comp_ring); 4564 cqe_type = cqe->fast_path_regular.type; 4565 4566 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { 4567 QL_DPRINT3(ha, "Got a slowath CQE\n"); 4568 4569 ecore_eth_cqe_completion(p_hwfn, 4570 (struct eth_slow_path_rx_cqe *)cqe); 4571 goto next_cqe; 4572 } 4573 4574 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { 4575 switch (cqe_type) { 4576 case ETH_RX_CQE_TYPE_TPA_START: 4577 qlnx_tpa_start(ha, fp, rxq, 4578 &cqe->fast_path_tpa_start); 4579 fp->tpa_start++; 4580 break; 4581 4582 case ETH_RX_CQE_TYPE_TPA_CONT: 4583 qlnx_tpa_cont(ha, fp, rxq, 4584 &cqe->fast_path_tpa_cont); 4585 fp->tpa_cont++; 4586 break; 4587 4588 case ETH_RX_CQE_TYPE_TPA_END: 4589 rx_pkt += qlnx_tpa_end(ha, fp, rxq, 4590 &cqe->fast_path_tpa_end); 4591 fp->tpa_end++; 4592 break; 4593 4594 default: 4595 break; 4596 } 4597 4598 goto next_cqe; 4599 } 4600 4601 /* Get the data from the SW ring */ 4602 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4603 mp = sw_rx_data->data; 4604 4605 if (mp == NULL) { 4606 QL_DPRINT1(ha, "mp = NULL\n"); 4607 fp->err_rx_mp_null++; 4608 rxq->sw_rx_cons = 4609 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4610 goto next_cqe; 4611 } 4612 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4613 BUS_DMASYNC_POSTREAD); 4614 4615 /* non GRO */ 4616 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */ 4617 len = le16toh(fp_cqe->pkt_len); 4618 pad = fp_cqe->placement_offset; 4619 #if 0 4620 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x," 4621 " len %u, parsing flags = %d pad = %d\n", 4622 cqe_type, fp_cqe->bitfields, 4623 le16toh(fp_cqe->vlan_tag), 4624 len, le16toh(fp_cqe->pars_flags.flags), pad); 4625 #endif 4626 data = mtod(mp, uint8_t *); 4627 data = data + pad; 4628 4629 if (0) 4630 qlnx_dump_buf8(ha, __func__, data, len); 4631 4632 /* For every Rx BD consumed, we allocate a new BD so the BD ring 4633 * is always with a fixed size. If allocation fails, we take the 4634 * consumed BD and return it to the ring in the PROD position. 4635 * The packet that was received on that BD will be dropped (and 4636 * not passed to the upper stack). 4637 */ 4638 /* If this is an error packet then drop it */ 4639 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) & 4640 CQE_FLAGS_ERR) { 4641 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x," 4642 " dropping incoming packet\n", sw_comp_cons, 4643 le16toh(cqe->fast_path_regular.pars_flags.flags)); 4644 fp->err_rx_hw_errors++; 4645 4646 qlnx_reuse_rx_data(rxq); 4647 4648 QLNX_INC_IERRORS(ifp); 4649 4650 goto next_cqe; 4651 } 4652 4653 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4654 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 4655 " incoming packet and reusing its buffer\n"); 4656 qlnx_reuse_rx_data(rxq); 4657 4658 fp->err_rx_alloc_errors++; 4659 4660 QLNX_INC_IQDROPS(ifp); 4661 4662 goto next_cqe; 4663 } 4664 4665 ecore_chain_consume(&rxq->rx_bd_ring); 4666 4667 len_on_first_bd = fp_cqe->len_on_first_bd; 4668 m_adj(mp, pad); 4669 mp->m_pkthdr.len = len; 4670 4671 if ((len > 60 ) && (len > len_on_first_bd)) { 4672 mp->m_len = len_on_first_bd; 4673 4674 if (qlnx_rx_jumbo_chain(ha, fp, mp, 4675 (len - len_on_first_bd)) != 0) { 4676 m_freem(mp); 4677 4678 QLNX_INC_IQDROPS(ifp); 4679 4680 goto next_cqe; 4681 } 4682 4683 } else if (len_on_first_bd < len) { 4684 fp->err_rx_jumbo_chain_pkts++; 4685 } else { 4686 mp->m_len = len; 4687 } 4688 4689 mp->m_flags |= M_PKTHDR; 4690 4691 /* assign packet to this interface interface */ 4692 mp->m_pkthdr.rcvif = ifp; 4693 4694 /* assume no hardware checksum has complated */ 4695 mp->m_pkthdr.csum_flags = 0; 4696 4697 mp->m_pkthdr.flowid = fp_cqe->rss_hash; 4698 4699 hash_type = fp_cqe->bitfields & 4700 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4701 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4702 4703 switch (hash_type) { 4704 case RSS_HASH_TYPE_IPV4: 4705 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4706 break; 4707 4708 case RSS_HASH_TYPE_TCP_IPV4: 4709 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4710 break; 4711 4712 case RSS_HASH_TYPE_IPV6: 4713 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4714 break; 4715 4716 case RSS_HASH_TYPE_TCP_IPV6: 4717 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4718 break; 4719 4720 default: 4721 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4722 break; 4723 } 4724 4725 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) { 4726 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4727 } 4728 4729 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) { 4730 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4731 } 4732 4733 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) { 4734 mp->m_pkthdr.csum_data = 0xFFFF; 4735 mp->m_pkthdr.csum_flags |= 4736 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4737 } 4738 4739 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) { 4740 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag); 4741 mp->m_flags |= M_VLANTAG; 4742 } 4743 4744 QLNX_INC_IPACKETS(ifp); 4745 QLNX_INC_IBYTES(ifp, len); 4746 4747 #ifdef QLNX_SOFT_LRO 4748 if (lro_enable) 4749 tcp_lro_queue_mbuf(lro, mp); 4750 else 4751 if_input(ifp, mp); 4752 #else 4753 4754 if_input(ifp, mp); 4755 4756 #endif /* #ifdef QLNX_SOFT_LRO */ 4757 4758 rx_pkt++; 4759 4760 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4761 4762 next_cqe: /* don't consume bd rx buffer */ 4763 ecore_chain_recycle_consumed(&rxq->rx_comp_ring); 4764 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4765 4766 /* CR TPA - revisit how to handle budget in TPA perhaps 4767 increase on "end" */ 4768 if (rx_pkt == budget) 4769 break; 4770 } /* repeat while sw_comp_cons != hw_comp_cons... */ 4771 4772 /* Update producers */ 4773 qlnx_update_rx_prod(p_hwfn, rxq); 4774 4775 return rx_pkt; 4776 } 4777 4778 /* 4779 * fast path interrupt 4780 */ 4781 4782 static void 4783 qlnx_fp_isr(void *arg) 4784 { 4785 qlnx_ivec_t *ivec = arg; 4786 qlnx_host_t *ha; 4787 struct qlnx_fastpath *fp = NULL; 4788 int idx; 4789 4790 ha = ivec->ha; 4791 4792 if (ha->state != QLNX_STATE_OPEN) { 4793 return; 4794 } 4795 4796 idx = ivec->rss_idx; 4797 4798 if ((idx = ivec->rss_idx) >= ha->num_rss) { 4799 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx); 4800 ha->err_illegal_intr++; 4801 return; 4802 } 4803 fp = &ha->fp_array[idx]; 4804 4805 if (fp == NULL) { 4806 ha->err_fp_null++; 4807 } else { 4808 int rx_int = 0; 4809 #ifdef QLNX_SOFT_LRO 4810 int total_rx_count = 0; 4811 #endif 4812 int lro_enable, tc; 4813 struct qlnx_tx_queue *txq; 4814 uint16_t elem_left; 4815 4816 lro_enable = if_getcapenable(ha->ifp) & IFCAP_LRO; 4817 4818 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); 4819 4820 do { 4821 for (tc = 0; tc < ha->num_tc; tc++) { 4822 txq = fp->txq[tc]; 4823 4824 if((int)(elem_left = 4825 ecore_chain_get_elem_left(&txq->tx_pbl)) < 4826 QLNX_TX_ELEM_THRESH) { 4827 if (mtx_trylock(&fp->tx_mtx)) { 4828 #ifdef QLNX_TRACE_PERF_DATA 4829 tx_compl = fp->tx_pkts_completed; 4830 #endif 4831 4832 qlnx_tx_int(ha, fp, fp->txq[tc]); 4833 #ifdef QLNX_TRACE_PERF_DATA 4834 fp->tx_pkts_compl_intr += 4835 (fp->tx_pkts_completed - tx_compl); 4836 if ((fp->tx_pkts_completed - tx_compl) <= 32) 4837 fp->tx_comInt[0]++; 4838 else if (((fp->tx_pkts_completed - tx_compl) > 32) && 4839 ((fp->tx_pkts_completed - tx_compl) <= 64)) 4840 fp->tx_comInt[1]++; 4841 else if(((fp->tx_pkts_completed - tx_compl) > 64) && 4842 ((fp->tx_pkts_completed - tx_compl) <= 128)) 4843 fp->tx_comInt[2]++; 4844 else if(((fp->tx_pkts_completed - tx_compl) > 128)) 4845 fp->tx_comInt[3]++; 4846 #endif 4847 mtx_unlock(&fp->tx_mtx); 4848 } 4849 } 4850 } 4851 4852 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, 4853 lro_enable); 4854 4855 if (rx_int) { 4856 fp->rx_pkts += rx_int; 4857 #ifdef QLNX_SOFT_LRO 4858 total_rx_count += rx_int; 4859 #endif 4860 } 4861 4862 } while (rx_int); 4863 4864 #ifdef QLNX_SOFT_LRO 4865 { 4866 struct lro_ctrl *lro; 4867 4868 lro = &fp->rxq->lro; 4869 4870 if (lro_enable && total_rx_count) { 4871 4872 #ifdef QLNX_TRACE_LRO_CNT 4873 if (lro->lro_mbuf_count & ~1023) 4874 fp->lro_cnt_1024++; 4875 else if (lro->lro_mbuf_count & ~511) 4876 fp->lro_cnt_512++; 4877 else if (lro->lro_mbuf_count & ~255) 4878 fp->lro_cnt_256++; 4879 else if (lro->lro_mbuf_count & ~127) 4880 fp->lro_cnt_128++; 4881 else if (lro->lro_mbuf_count & ~63) 4882 fp->lro_cnt_64++; 4883 #endif /* #ifdef QLNX_TRACE_LRO_CNT */ 4884 4885 tcp_lro_flush_all(lro); 4886 } 4887 } 4888 #endif /* #ifdef QLNX_SOFT_LRO */ 4889 4890 ecore_sb_update_sb_idx(fp->sb_info); 4891 rmb(); 4892 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 4893 } 4894 4895 return; 4896 } 4897 4898 /* 4899 * slow path interrupt processing function 4900 * can be invoked in polled mode or in interrupt mode via taskqueue. 4901 */ 4902 void 4903 qlnx_sp_isr(void *arg) 4904 { 4905 struct ecore_hwfn *p_hwfn; 4906 qlnx_host_t *ha; 4907 4908 p_hwfn = arg; 4909 4910 ha = (qlnx_host_t *)p_hwfn->p_dev; 4911 4912 ha->sp_interrupts++; 4913 4914 QL_DPRINT2(ha, "enter\n"); 4915 4916 ecore_int_sp_dpc(p_hwfn); 4917 4918 QL_DPRINT2(ha, "exit\n"); 4919 4920 return; 4921 } 4922 4923 /***************************************************************************** 4924 * Support Functions for DMA'able Memory 4925 *****************************************************************************/ 4926 4927 static void 4928 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 4929 { 4930 *((bus_addr_t *)arg) = 0; 4931 4932 if (error) { 4933 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 4934 return; 4935 } 4936 4937 *((bus_addr_t *)arg) = segs[0].ds_addr; 4938 4939 return; 4940 } 4941 4942 static int 4943 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 4944 { 4945 int ret = 0; 4946 bus_addr_t b_addr; 4947 4948 ret = bus_dma_tag_create( 4949 ha->parent_tag,/* parent */ 4950 dma_buf->alignment, 4951 ((bus_size_t)(1ULL << 32)),/* boundary */ 4952 BUS_SPACE_MAXADDR, /* lowaddr */ 4953 BUS_SPACE_MAXADDR, /* highaddr */ 4954 NULL, NULL, /* filter, filterarg */ 4955 dma_buf->size, /* maxsize */ 4956 1, /* nsegments */ 4957 dma_buf->size, /* maxsegsize */ 4958 0, /* flags */ 4959 NULL, NULL, /* lockfunc, lockarg */ 4960 &dma_buf->dma_tag); 4961 4962 if (ret) { 4963 QL_DPRINT1(ha, "could not create dma tag\n"); 4964 goto qlnx_alloc_dmabuf_exit; 4965 } 4966 ret = bus_dmamem_alloc(dma_buf->dma_tag, 4967 (void **)&dma_buf->dma_b, 4968 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 4969 &dma_buf->dma_map); 4970 if (ret) { 4971 bus_dma_tag_destroy(dma_buf->dma_tag); 4972 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n"); 4973 goto qlnx_alloc_dmabuf_exit; 4974 } 4975 4976 ret = bus_dmamap_load(dma_buf->dma_tag, 4977 dma_buf->dma_map, 4978 dma_buf->dma_b, 4979 dma_buf->size, 4980 qlnx_dmamap_callback, 4981 &b_addr, BUS_DMA_NOWAIT); 4982 4983 if (ret || !b_addr) { 4984 bus_dma_tag_destroy(dma_buf->dma_tag); 4985 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 4986 dma_buf->dma_map); 4987 ret = -1; 4988 goto qlnx_alloc_dmabuf_exit; 4989 } 4990 4991 dma_buf->dma_addr = b_addr; 4992 4993 qlnx_alloc_dmabuf_exit: 4994 4995 return ret; 4996 } 4997 4998 static void 4999 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 5000 { 5001 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 5002 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 5003 bus_dma_tag_destroy(dma_buf->dma_tag); 5004 return; 5005 } 5006 5007 void * 5008 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size) 5009 { 5010 qlnx_dma_t dma_buf; 5011 qlnx_dma_t *dma_p; 5012 qlnx_host_t *ha __unused; 5013 5014 ha = (qlnx_host_t *)ecore_dev; 5015 5016 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5017 5018 memset(&dma_buf, 0, sizeof (qlnx_dma_t)); 5019 5020 dma_buf.size = size + PAGE_SIZE; 5021 dma_buf.alignment = 8; 5022 5023 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0) 5024 return (NULL); 5025 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size); 5026 5027 *phys = dma_buf.dma_addr; 5028 5029 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size); 5030 5031 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t)); 5032 5033 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5034 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag, 5035 dma_buf.dma_b, (void *)dma_buf.dma_addr, size); 5036 5037 return (dma_buf.dma_b); 5038 } 5039 5040 void 5041 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys, 5042 uint32_t size) 5043 { 5044 qlnx_dma_t dma_buf, *dma_p; 5045 qlnx_host_t *ha; 5046 5047 ha = (qlnx_host_t *)ecore_dev; 5048 5049 if (v_addr == NULL) 5050 return; 5051 5052 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5053 5054 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size); 5055 5056 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5057 (void *)dma_p->dma_map, (void *)dma_p->dma_tag, 5058 dma_p->dma_b, (void *)dma_p->dma_addr, size); 5059 5060 dma_buf = *dma_p; 5061 5062 if (!ha->qlnxr_debug) 5063 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf); 5064 return; 5065 } 5066 5067 static int 5068 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha) 5069 { 5070 int ret; 5071 device_t dev; 5072 5073 dev = ha->pci_dev; 5074 5075 /* 5076 * Allocate parent DMA Tag 5077 */ 5078 ret = bus_dma_tag_create( 5079 bus_get_dma_tag(dev), /* parent */ 5080 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 5081 BUS_SPACE_MAXADDR, /* lowaddr */ 5082 BUS_SPACE_MAXADDR, /* highaddr */ 5083 NULL, NULL, /* filter, filterarg */ 5084 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 5085 0, /* nsegments */ 5086 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 5087 0, /* flags */ 5088 NULL, NULL, /* lockfunc, lockarg */ 5089 &ha->parent_tag); 5090 5091 if (ret) { 5092 QL_DPRINT1(ha, "could not create parent dma tag\n"); 5093 return (-1); 5094 } 5095 5096 ha->flags.parent_tag = 1; 5097 5098 return (0); 5099 } 5100 5101 static void 5102 qlnx_free_parent_dma_tag(qlnx_host_t *ha) 5103 { 5104 if (ha->parent_tag != NULL) { 5105 bus_dma_tag_destroy(ha->parent_tag); 5106 ha->parent_tag = NULL; 5107 } 5108 return; 5109 } 5110 5111 static int 5112 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha) 5113 { 5114 if (bus_dma_tag_create(NULL, /* parent */ 5115 1, 0, /* alignment, bounds */ 5116 BUS_SPACE_MAXADDR, /* lowaddr */ 5117 BUS_SPACE_MAXADDR, /* highaddr */ 5118 NULL, NULL, /* filter, filterarg */ 5119 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */ 5120 QLNX_MAX_SEGMENTS, /* nsegments */ 5121 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */ 5122 0, /* flags */ 5123 NULL, /* lockfunc */ 5124 NULL, /* lockfuncarg */ 5125 &ha->tx_tag)) { 5126 QL_DPRINT1(ha, "tx_tag alloc failed\n"); 5127 return (-1); 5128 } 5129 5130 return (0); 5131 } 5132 5133 static void 5134 qlnx_free_tx_dma_tag(qlnx_host_t *ha) 5135 { 5136 if (ha->tx_tag != NULL) { 5137 bus_dma_tag_destroy(ha->tx_tag); 5138 ha->tx_tag = NULL; 5139 } 5140 return; 5141 } 5142 5143 static int 5144 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha) 5145 { 5146 if (bus_dma_tag_create(NULL, /* parent */ 5147 1, 0, /* alignment, bounds */ 5148 BUS_SPACE_MAXADDR, /* lowaddr */ 5149 BUS_SPACE_MAXADDR, /* highaddr */ 5150 NULL, NULL, /* filter, filterarg */ 5151 MJUM9BYTES, /* maxsize */ 5152 1, /* nsegments */ 5153 MJUM9BYTES, /* maxsegsize */ 5154 0, /* flags */ 5155 NULL, /* lockfunc */ 5156 NULL, /* lockfuncarg */ 5157 &ha->rx_tag)) { 5158 QL_DPRINT1(ha, " rx_tag alloc failed\n"); 5159 5160 return (-1); 5161 } 5162 return (0); 5163 } 5164 5165 static void 5166 qlnx_free_rx_dma_tag(qlnx_host_t *ha) 5167 { 5168 if (ha->rx_tag != NULL) { 5169 bus_dma_tag_destroy(ha->rx_tag); 5170 ha->rx_tag = NULL; 5171 } 5172 return; 5173 } 5174 5175 /********************************* 5176 * Exported functions 5177 *********************************/ 5178 uint32_t 5179 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id) 5180 { 5181 uint32_t bar_size; 5182 5183 bar_id = bar_id * 2; 5184 5185 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev, 5186 SYS_RES_MEMORY, 5187 PCIR_BAR(bar_id)); 5188 5189 return (bar_size); 5190 } 5191 5192 uint32_t 5193 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value) 5194 { 5195 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5196 pci_reg, 1); 5197 return 0; 5198 } 5199 5200 uint32_t 5201 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg, 5202 uint16_t *reg_value) 5203 { 5204 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5205 pci_reg, 2); 5206 return 0; 5207 } 5208 5209 uint32_t 5210 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg, 5211 uint32_t *reg_value) 5212 { 5213 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5214 pci_reg, 4); 5215 return 0; 5216 } 5217 5218 void 5219 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value) 5220 { 5221 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5222 pci_reg, reg_value, 1); 5223 return; 5224 } 5225 5226 void 5227 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg, 5228 uint16_t reg_value) 5229 { 5230 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5231 pci_reg, reg_value, 2); 5232 return; 5233 } 5234 5235 void 5236 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg, 5237 uint32_t reg_value) 5238 { 5239 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5240 pci_reg, reg_value, 4); 5241 return; 5242 } 5243 5244 int 5245 qlnx_pci_find_capability(void *ecore_dev, int cap) 5246 { 5247 int reg; 5248 qlnx_host_t *ha; 5249 5250 ha = ecore_dev; 5251 5252 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0) 5253 return reg; 5254 else { 5255 QL_DPRINT1(ha, "failed\n"); 5256 return 0; 5257 } 5258 } 5259 5260 int 5261 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap) 5262 { 5263 int reg; 5264 qlnx_host_t *ha; 5265 5266 ha = ecore_dev; 5267 5268 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0) 5269 return reg; 5270 else { 5271 QL_DPRINT1(ha, "failed\n"); 5272 return 0; 5273 } 5274 } 5275 5276 uint32_t 5277 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr) 5278 { 5279 uint32_t data32; 5280 struct ecore_hwfn *p_hwfn; 5281 5282 p_hwfn = hwfn; 5283 5284 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5285 (bus_size_t)(p_hwfn->reg_offset + reg_addr)); 5286 5287 return (data32); 5288 } 5289 5290 void 5291 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5292 { 5293 struct ecore_hwfn *p_hwfn = hwfn; 5294 5295 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5296 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5297 5298 return; 5299 } 5300 5301 void 5302 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value) 5303 { 5304 struct ecore_hwfn *p_hwfn = hwfn; 5305 5306 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5307 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5308 return; 5309 } 5310 5311 void 5312 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value) 5313 { 5314 struct ecore_dev *cdev; 5315 struct ecore_hwfn *p_hwfn; 5316 uint32_t offset; 5317 5318 p_hwfn = hwfn; 5319 5320 cdev = p_hwfn->p_dev; 5321 5322 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells)); 5323 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value); 5324 5325 return; 5326 } 5327 5328 void 5329 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5330 { 5331 struct ecore_hwfn *p_hwfn = hwfn; 5332 5333 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \ 5334 (bus_size_t)(p_hwfn->db_offset + reg_addr), value); 5335 5336 return; 5337 } 5338 5339 uint32_t 5340 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr) 5341 { 5342 uint32_t data32; 5343 bus_size_t offset; 5344 struct ecore_dev *cdev; 5345 5346 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5347 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5348 5349 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset); 5350 5351 return (data32); 5352 } 5353 5354 void 5355 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value) 5356 { 5357 bus_size_t offset; 5358 struct ecore_dev *cdev; 5359 5360 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5361 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5362 5363 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5364 5365 return; 5366 } 5367 5368 void 5369 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value) 5370 { 5371 bus_size_t offset; 5372 struct ecore_dev *cdev; 5373 5374 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5375 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5376 5377 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5378 return; 5379 } 5380 5381 void * 5382 qlnx_zalloc(uint32_t size) 5383 { 5384 caddr_t va; 5385 5386 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT); 5387 bzero(va, size); 5388 return ((void *)va); 5389 } 5390 5391 void 5392 qlnx_barrier(void *p_hwfn) 5393 { 5394 qlnx_host_t *ha; 5395 5396 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5397 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE); 5398 } 5399 5400 void 5401 qlnx_link_update(void *p_hwfn) 5402 { 5403 qlnx_host_t *ha; 5404 int prev_link_state; 5405 5406 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5407 5408 qlnx_fill_link(ha, p_hwfn, &ha->if_link); 5409 5410 prev_link_state = ha->link_up; 5411 ha->link_up = ha->if_link.link_up; 5412 5413 if (prev_link_state != ha->link_up) { 5414 if (ha->link_up) { 5415 if_link_state_change(ha->ifp, LINK_STATE_UP); 5416 } else { 5417 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 5418 } 5419 } 5420 #ifndef QLNX_VF 5421 #ifdef CONFIG_ECORE_SRIOV 5422 5423 if (qlnx_vf_device(ha) != 0) { 5424 if (ha->sriov_initialized) 5425 qlnx_inform_vf_link_state(p_hwfn, ha); 5426 } 5427 5428 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 5429 #endif /* #ifdef QLNX_VF */ 5430 5431 return; 5432 } 5433 5434 static void 5435 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn, 5436 struct ecore_vf_acquire_sw_info *p_sw_info) 5437 { 5438 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) | 5439 (QLNX_VERSION_MINOR << 16) | 5440 QLNX_VERSION_BUILD; 5441 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD; 5442 5443 return; 5444 } 5445 5446 void 5447 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req, 5448 void *p_sw_info) 5449 { 5450 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info); 5451 5452 return; 5453 } 5454 5455 void 5456 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn, 5457 struct qlnx_link_output *if_link) 5458 { 5459 struct ecore_mcp_link_params link_params; 5460 struct ecore_mcp_link_state link_state; 5461 uint8_t p_change; 5462 struct ecore_ptt *p_ptt = NULL; 5463 5464 memset(if_link, 0, sizeof(*if_link)); 5465 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params)); 5466 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state)); 5467 5468 ha = (qlnx_host_t *)hwfn->p_dev; 5469 5470 /* Prepare source inputs */ 5471 /* we only deal with physical functions */ 5472 if (qlnx_vf_device(ha) != 0) { 5473 p_ptt = ecore_ptt_acquire(hwfn); 5474 5475 if (p_ptt == NULL) { 5476 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 5477 return; 5478 } 5479 5480 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type); 5481 ecore_ptt_release(hwfn, p_ptt); 5482 5483 memcpy(&link_params, ecore_mcp_get_link_params(hwfn), 5484 sizeof(link_params)); 5485 memcpy(&link_state, ecore_mcp_get_link_state(hwfn), 5486 sizeof(link_state)); 5487 } else { 5488 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type); 5489 ecore_vf_read_bulletin(hwfn, &p_change); 5490 ecore_vf_get_link_params(hwfn, &link_params); 5491 ecore_vf_get_link_state(hwfn, &link_state); 5492 } 5493 5494 /* Set the link parameters to pass to protocol driver */ 5495 if (link_state.link_up) { 5496 if_link->link_up = true; 5497 if_link->speed = link_state.speed; 5498 } 5499 5500 if_link->supported_caps = QLNX_LINK_CAP_FIBRE; 5501 5502 if (link_params.speed.autoneg) 5503 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg; 5504 5505 if (link_params.pause.autoneg || 5506 (link_params.pause.forced_rx && link_params.pause.forced_tx)) 5507 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause; 5508 5509 if (link_params.pause.autoneg || link_params.pause.forced_rx || 5510 link_params.pause.forced_tx) 5511 if_link->supported_caps |= QLNX_LINK_CAP_Pause; 5512 5513 if (link_params.speed.advertised_speeds & 5514 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 5515 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half | 5516 QLNX_LINK_CAP_1000baseT_Full; 5517 5518 if (link_params.speed.advertised_speeds & 5519 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 5520 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5521 5522 if (link_params.speed.advertised_speeds & 5523 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 5524 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5525 5526 if (link_params.speed.advertised_speeds & 5527 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) 5528 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5529 5530 if (link_params.speed.advertised_speeds & 5531 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 5532 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5533 5534 if (link_params.speed.advertised_speeds & 5535 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 5536 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5537 5538 if_link->advertised_caps = if_link->supported_caps; 5539 5540 if_link->autoneg = link_params.speed.autoneg; 5541 if_link->duplex = QLNX_LINK_DUPLEX; 5542 5543 /* Link partner capabilities */ 5544 5545 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD) 5546 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half; 5547 5548 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD) 5549 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full; 5550 5551 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G) 5552 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5553 5554 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G) 5555 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5556 5557 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G) 5558 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5559 5560 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G) 5561 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5562 5563 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G) 5564 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5565 5566 if (link_state.an_complete) 5567 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg; 5568 5569 if (link_state.partner_adv_pause) 5570 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause; 5571 5572 if ((link_state.partner_adv_pause == 5573 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) || 5574 (link_state.partner_adv_pause == 5575 ECORE_LINK_PARTNER_BOTH_PAUSE)) 5576 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause; 5577 5578 return; 5579 } 5580 5581 void 5582 qlnx_schedule_recovery(void *p_hwfn) 5583 { 5584 qlnx_host_t *ha; 5585 5586 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5587 5588 if (qlnx_vf_device(ha) != 0) { 5589 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 5590 } 5591 5592 return; 5593 } 5594 5595 static int 5596 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params) 5597 { 5598 int rc, i; 5599 5600 for (i = 0; i < cdev->num_hwfns; i++) { 5601 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 5602 p_hwfn->pf_params = *func_params; 5603 5604 #ifdef QLNX_ENABLE_IWARP 5605 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) { 5606 p_hwfn->using_ll2 = true; 5607 } 5608 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5609 } 5610 5611 rc = ecore_resc_alloc(cdev); 5612 if (rc) 5613 goto qlnx_nic_setup_exit; 5614 5615 ecore_resc_setup(cdev); 5616 5617 qlnx_nic_setup_exit: 5618 5619 return rc; 5620 } 5621 5622 static int 5623 qlnx_nic_start(struct ecore_dev *cdev) 5624 { 5625 int rc; 5626 struct ecore_hw_init_params params; 5627 5628 bzero(¶ms, sizeof (struct ecore_hw_init_params)); 5629 5630 params.p_tunn = NULL; 5631 params.b_hw_start = true; 5632 params.int_mode = cdev->int_mode; 5633 params.allow_npar_tx_switch = true; 5634 params.bin_fw_data = NULL; 5635 5636 rc = ecore_hw_init(cdev, ¶ms); 5637 if (rc) { 5638 ecore_resc_free(cdev); 5639 return rc; 5640 } 5641 5642 return 0; 5643 } 5644 5645 static int 5646 qlnx_slowpath_start(qlnx_host_t *ha) 5647 { 5648 struct ecore_dev *cdev; 5649 struct ecore_pf_params pf_params; 5650 int rc; 5651 5652 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 5653 pf_params.eth_pf_params.num_cons = 5654 (ha->num_rss) * (ha->num_tc + 1); 5655 5656 #ifdef QLNX_ENABLE_IWARP 5657 if (qlnx_vf_device(ha) != 0) { 5658 if(ha->personality == ECORE_PCI_ETH_IWARP) { 5659 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n"); 5660 pf_params.rdma_pf_params.num_qps = 1024; 5661 pf_params.rdma_pf_params.num_srqs = 1024; 5662 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5663 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP; 5664 } else if(ha->personality == ECORE_PCI_ETH_ROCE) { 5665 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n"); 5666 pf_params.rdma_pf_params.num_qps = 8192; 5667 pf_params.rdma_pf_params.num_srqs = 8192; 5668 //pf_params.rdma_pf_params.min_dpis = 0; 5669 pf_params.rdma_pf_params.min_dpis = 8; 5670 pf_params.rdma_pf_params.roce_edpm_mode = 0; 5671 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5672 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE; 5673 } 5674 } 5675 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5676 5677 cdev = &ha->cdev; 5678 5679 rc = qlnx_nic_setup(cdev, &pf_params); 5680 if (rc) 5681 goto qlnx_slowpath_start_exit; 5682 5683 cdev->int_mode = ECORE_INT_MODE_MSIX; 5684 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; 5685 5686 #ifdef QLNX_MAX_COALESCE 5687 cdev->rx_coalesce_usecs = 255; 5688 cdev->tx_coalesce_usecs = 255; 5689 #endif 5690 5691 rc = qlnx_nic_start(cdev); 5692 5693 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs; 5694 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs; 5695 5696 #ifdef QLNX_USER_LLDP 5697 (void)qlnx_set_lldp_tlvx(ha, NULL); 5698 #endif /* #ifdef QLNX_USER_LLDP */ 5699 5700 qlnx_slowpath_start_exit: 5701 5702 return (rc); 5703 } 5704 5705 static int 5706 qlnx_slowpath_stop(qlnx_host_t *ha) 5707 { 5708 struct ecore_dev *cdev; 5709 device_t dev = ha->pci_dev; 5710 int i; 5711 5712 cdev = &ha->cdev; 5713 5714 ecore_hw_stop(cdev); 5715 5716 for (i = 0; i < ha->cdev.num_hwfns; i++) { 5717 if (ha->sp_handle[i]) 5718 (void)bus_teardown_intr(dev, ha->sp_irq[i], 5719 ha->sp_handle[i]); 5720 5721 ha->sp_handle[i] = NULL; 5722 5723 if (ha->sp_irq[i]) 5724 (void) bus_release_resource(dev, SYS_RES_IRQ, 5725 ha->sp_irq_rid[i], ha->sp_irq[i]); 5726 ha->sp_irq[i] = NULL; 5727 } 5728 5729 ecore_resc_free(cdev); 5730 5731 return 0; 5732 } 5733 5734 static void 5735 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 5736 char ver_str[VER_SIZE]) 5737 { 5738 int i; 5739 5740 memcpy(cdev->name, name, NAME_SIZE); 5741 5742 for_each_hwfn(cdev, i) { 5743 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 5744 } 5745 5746 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD; 5747 5748 return ; 5749 } 5750 5751 void 5752 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats) 5753 { 5754 enum ecore_mcp_protocol_type type; 5755 union ecore_mcp_protocol_stats *stats; 5756 struct ecore_eth_stats eth_stats; 5757 qlnx_host_t *ha; 5758 5759 ha = cdev; 5760 stats = proto_stats; 5761 type = proto_type; 5762 5763 switch (type) { 5764 case ECORE_MCP_LAN_STATS: 5765 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats); 5766 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; 5767 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts; 5768 stats->lan_stats.fcs_err = -1; 5769 break; 5770 5771 default: 5772 ha->err_get_proto_invalid_type++; 5773 5774 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type); 5775 break; 5776 } 5777 return; 5778 } 5779 5780 static int 5781 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver) 5782 { 5783 struct ecore_hwfn *p_hwfn; 5784 struct ecore_ptt *p_ptt; 5785 5786 p_hwfn = &ha->cdev.hwfns[0]; 5787 p_ptt = ecore_ptt_acquire(p_hwfn); 5788 5789 if (p_ptt == NULL) { 5790 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 5791 return (-1); 5792 } 5793 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL); 5794 5795 ecore_ptt_release(p_hwfn, p_ptt); 5796 5797 return (0); 5798 } 5799 5800 static int 5801 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size) 5802 { 5803 struct ecore_hwfn *p_hwfn; 5804 struct ecore_ptt *p_ptt; 5805 5806 p_hwfn = &ha->cdev.hwfns[0]; 5807 p_ptt = ecore_ptt_acquire(p_hwfn); 5808 5809 if (p_ptt == NULL) { 5810 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n"); 5811 return (-1); 5812 } 5813 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size); 5814 5815 ecore_ptt_release(p_hwfn, p_ptt); 5816 5817 return (0); 5818 } 5819 5820 static int 5821 qlnx_alloc_mem_arrays(qlnx_host_t *ha) 5822 { 5823 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS)); 5824 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS)); 5825 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS)); 5826 5827 return 0; 5828 } 5829 5830 static void 5831 qlnx_init_fp(qlnx_host_t *ha) 5832 { 5833 int rss_id, txq_array_index, tc; 5834 5835 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 5836 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 5837 5838 fp->rss_id = rss_id; 5839 fp->edev = ha; 5840 fp->sb_info = &ha->sb_array[rss_id]; 5841 fp->rxq = &ha->rxq_array[rss_id]; 5842 fp->rxq->rxq_id = rss_id; 5843 5844 for (tc = 0; tc < ha->num_tc; tc++) { 5845 txq_array_index = tc * ha->num_rss + rss_id; 5846 fp->txq[tc] = &ha->txq_array[txq_array_index]; 5847 fp->txq[tc]->index = txq_array_index; 5848 } 5849 5850 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str, 5851 rss_id); 5852 5853 fp->tx_ring_full = 0; 5854 5855 /* reset all the statistics counters */ 5856 5857 fp->tx_pkts_processed = 0; 5858 fp->tx_pkts_freed = 0; 5859 fp->tx_pkts_transmitted = 0; 5860 fp->tx_pkts_completed = 0; 5861 5862 #ifdef QLNX_TRACE_PERF_DATA 5863 fp->tx_pkts_trans_ctx = 0; 5864 fp->tx_pkts_compl_ctx = 0; 5865 fp->tx_pkts_trans_fp = 0; 5866 fp->tx_pkts_compl_fp = 0; 5867 fp->tx_pkts_compl_intr = 0; 5868 #endif 5869 fp->tx_lso_wnd_min_len = 0; 5870 fp->tx_defrag = 0; 5871 fp->tx_nsegs_gt_elem_left = 0; 5872 fp->tx_tso_max_nsegs = 0; 5873 fp->tx_tso_min_nsegs = 0; 5874 fp->err_tx_nsegs_gt_elem_left = 0; 5875 fp->err_tx_dmamap_create = 0; 5876 fp->err_tx_defrag_dmamap_load = 0; 5877 fp->err_tx_non_tso_max_seg = 0; 5878 fp->err_tx_dmamap_load = 0; 5879 fp->err_tx_defrag = 0; 5880 fp->err_tx_free_pkt_null = 0; 5881 fp->err_tx_cons_idx_conflict = 0; 5882 5883 fp->rx_pkts = 0; 5884 fp->err_m_getcl = 0; 5885 fp->err_m_getjcl = 0; 5886 } 5887 return; 5888 } 5889 5890 void 5891 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info) 5892 { 5893 struct ecore_dev *cdev; 5894 5895 cdev = &ha->cdev; 5896 5897 if (sb_info->sb_virt) { 5898 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt), 5899 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt))); 5900 sb_info->sb_virt = NULL; 5901 } 5902 } 5903 5904 static int 5905 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info, 5906 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id) 5907 { 5908 struct ecore_hwfn *p_hwfn; 5909 int hwfn_index, rc; 5910 u16 rel_sb_id; 5911 5912 hwfn_index = sb_id % cdev->num_hwfns; 5913 p_hwfn = &cdev->hwfns[hwfn_index]; 5914 rel_sb_id = sb_id / cdev->num_hwfns; 5915 5916 QL_DPRINT2(((qlnx_host_t *)cdev), 5917 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \ 5918 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n", 5919 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info, 5920 sb_virt_addr, (void *)sb_phy_addr); 5921 5922 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, 5923 sb_virt_addr, sb_phy_addr, rel_sb_id); 5924 5925 return rc; 5926 } 5927 5928 /* This function allocates fast-path status block memory */ 5929 int 5930 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id) 5931 { 5932 struct status_block_e4 *sb_virt; 5933 bus_addr_t sb_phys; 5934 int rc; 5935 uint32_t size; 5936 struct ecore_dev *cdev; 5937 5938 cdev = &ha->cdev; 5939 5940 size = sizeof(*sb_virt); 5941 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size); 5942 5943 if (!sb_virt) { 5944 QL_DPRINT1(ha, "Status block allocation failed\n"); 5945 return -ENOMEM; 5946 } 5947 5948 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id); 5949 if (rc) { 5950 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size); 5951 } 5952 5953 return rc; 5954 } 5955 5956 static void 5957 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5958 { 5959 int i; 5960 struct sw_rx_data *rx_buf; 5961 5962 for (i = 0; i < rxq->num_rx_buffers; i++) { 5963 rx_buf = &rxq->sw_rx_ring[i]; 5964 5965 if (rx_buf->data != NULL) { 5966 if (rx_buf->map != NULL) { 5967 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 5968 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 5969 rx_buf->map = NULL; 5970 } 5971 m_freem(rx_buf->data); 5972 rx_buf->data = NULL; 5973 } 5974 } 5975 return; 5976 } 5977 5978 static void 5979 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5980 { 5981 struct ecore_dev *cdev; 5982 int i; 5983 5984 cdev = &ha->cdev; 5985 5986 qlnx_free_rx_buffers(ha, rxq); 5987 5988 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 5989 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]); 5990 if (rxq->tpa_info[i].mpf != NULL) 5991 m_freem(rxq->tpa_info[i].mpf); 5992 } 5993 5994 bzero((void *)&rxq->sw_rx_ring[0], 5995 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 5996 5997 /* Free the real RQ ring used by FW */ 5998 if (rxq->rx_bd_ring.p_virt_addr) { 5999 ecore_chain_free(cdev, &rxq->rx_bd_ring); 6000 rxq->rx_bd_ring.p_virt_addr = NULL; 6001 } 6002 6003 /* Free the real completion ring used by FW */ 6004 if (rxq->rx_comp_ring.p_virt_addr && 6005 rxq->rx_comp_ring.pbl_sp.p_virt_table) { 6006 ecore_chain_free(cdev, &rxq->rx_comp_ring); 6007 rxq->rx_comp_ring.p_virt_addr = NULL; 6008 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL; 6009 } 6010 6011 #ifdef QLNX_SOFT_LRO 6012 { 6013 struct lro_ctrl *lro; 6014 6015 lro = &rxq->lro; 6016 tcp_lro_free(lro); 6017 } 6018 #endif /* #ifdef QLNX_SOFT_LRO */ 6019 6020 return; 6021 } 6022 6023 static int 6024 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6025 { 6026 register struct mbuf *mp; 6027 uint16_t rx_buf_size; 6028 struct sw_rx_data *sw_rx_data; 6029 struct eth_rx_bd *rx_bd; 6030 dma_addr_t dma_addr; 6031 bus_dmamap_t map; 6032 bus_dma_segment_t segs[1]; 6033 int nsegs; 6034 int ret; 6035 6036 rx_buf_size = rxq->rx_buf_size; 6037 6038 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6039 6040 if (mp == NULL) { 6041 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6042 return -ENOMEM; 6043 } 6044 6045 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6046 6047 map = (bus_dmamap_t)0; 6048 6049 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6050 BUS_DMA_NOWAIT); 6051 dma_addr = segs[0].ds_addr; 6052 6053 if (ret || !dma_addr || (nsegs != 1)) { 6054 m_freem(mp); 6055 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6056 ret, (long long unsigned int)dma_addr, nsegs); 6057 return -ENOMEM; 6058 } 6059 6060 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6061 sw_rx_data->data = mp; 6062 sw_rx_data->dma_addr = dma_addr; 6063 sw_rx_data->map = map; 6064 6065 /* Advance PROD and get BD pointer */ 6066 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); 6067 rx_bd->addr.hi = htole32(U64_HI(dma_addr)); 6068 rx_bd->addr.lo = htole32(U64_LO(dma_addr)); 6069 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6070 6071 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6072 6073 return 0; 6074 } 6075 6076 static int 6077 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 6078 struct qlnx_agg_info *tpa) 6079 { 6080 struct mbuf *mp; 6081 dma_addr_t dma_addr; 6082 bus_dmamap_t map; 6083 bus_dma_segment_t segs[1]; 6084 int nsegs; 6085 int ret; 6086 struct sw_rx_data *rx_buf; 6087 6088 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6089 6090 if (mp == NULL) { 6091 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6092 return -ENOMEM; 6093 } 6094 6095 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6096 6097 map = (bus_dmamap_t)0; 6098 6099 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6100 BUS_DMA_NOWAIT); 6101 dma_addr = segs[0].ds_addr; 6102 6103 if (ret || !dma_addr || (nsegs != 1)) { 6104 m_freem(mp); 6105 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6106 ret, (long long unsigned int)dma_addr, nsegs); 6107 return -ENOMEM; 6108 } 6109 6110 rx_buf = &tpa->rx_buf; 6111 6112 memset(rx_buf, 0, sizeof (struct sw_rx_data)); 6113 6114 rx_buf->data = mp; 6115 rx_buf->dma_addr = dma_addr; 6116 rx_buf->map = map; 6117 6118 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6119 6120 return (0); 6121 } 6122 6123 static void 6124 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa) 6125 { 6126 struct sw_rx_data *rx_buf; 6127 6128 rx_buf = &tpa->rx_buf; 6129 6130 if (rx_buf->data != NULL) { 6131 if (rx_buf->map != NULL) { 6132 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 6133 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 6134 rx_buf->map = NULL; 6135 } 6136 m_freem(rx_buf->data); 6137 rx_buf->data = NULL; 6138 } 6139 return; 6140 } 6141 6142 /* This function allocates all memory needed per Rx queue */ 6143 static int 6144 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6145 { 6146 int i, rc, num_allocated; 6147 struct ecore_dev *cdev; 6148 6149 cdev = &ha->cdev; 6150 6151 rxq->num_rx_buffers = RX_RING_SIZE; 6152 6153 rxq->rx_buf_size = ha->rx_buf_size; 6154 6155 /* Allocate the parallel driver ring for Rx buffers */ 6156 bzero((void *)&rxq->sw_rx_ring[0], 6157 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 6158 6159 /* Allocate FW Rx ring */ 6160 6161 rc = ecore_chain_alloc(cdev, 6162 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6163 ECORE_CHAIN_MODE_NEXT_PTR, 6164 ECORE_CHAIN_CNT_TYPE_U16, 6165 RX_RING_SIZE, 6166 sizeof(struct eth_rx_bd), 6167 &rxq->rx_bd_ring, NULL); 6168 6169 if (rc) 6170 goto err; 6171 6172 /* Allocate FW completion ring */ 6173 rc = ecore_chain_alloc(cdev, 6174 ECORE_CHAIN_USE_TO_CONSUME, 6175 ECORE_CHAIN_MODE_PBL, 6176 ECORE_CHAIN_CNT_TYPE_U16, 6177 RX_RING_SIZE, 6178 sizeof(union eth_rx_cqe), 6179 &rxq->rx_comp_ring, NULL); 6180 6181 if (rc) 6182 goto err; 6183 6184 /* Allocate buffers for the Rx ring */ 6185 6186 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 6187 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size, 6188 &rxq->tpa_info[i]); 6189 if (rc) 6190 break; 6191 } 6192 6193 for (i = 0; i < rxq->num_rx_buffers; i++) { 6194 rc = qlnx_alloc_rx_buffer(ha, rxq); 6195 if (rc) 6196 break; 6197 } 6198 num_allocated = i; 6199 if (!num_allocated) { 6200 QL_DPRINT1(ha, "Rx buffers allocation failed\n"); 6201 goto err; 6202 } else if (num_allocated < rxq->num_rx_buffers) { 6203 QL_DPRINT1(ha, "Allocated less buffers than" 6204 " desired (%d allocated)\n", num_allocated); 6205 } 6206 6207 #ifdef QLNX_SOFT_LRO 6208 6209 { 6210 struct lro_ctrl *lro; 6211 6212 lro = &rxq->lro; 6213 6214 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) { 6215 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 6216 rxq->rxq_id); 6217 goto err; 6218 } 6219 6220 lro->ifp = ha->ifp; 6221 } 6222 #endif /* #ifdef QLNX_SOFT_LRO */ 6223 return 0; 6224 6225 err: 6226 qlnx_free_mem_rxq(ha, rxq); 6227 return -ENOMEM; 6228 } 6229 6230 static void 6231 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6232 struct qlnx_tx_queue *txq) 6233 { 6234 struct ecore_dev *cdev; 6235 6236 cdev = &ha->cdev; 6237 6238 bzero((void *)&txq->sw_tx_ring[0], 6239 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6240 6241 /* Free the real RQ ring used by FW */ 6242 if (txq->tx_pbl.p_virt_addr) { 6243 ecore_chain_free(cdev, &txq->tx_pbl); 6244 txq->tx_pbl.p_virt_addr = NULL; 6245 } 6246 return; 6247 } 6248 6249 /* This function allocates all memory needed per Tx queue */ 6250 static int 6251 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6252 struct qlnx_tx_queue *txq) 6253 { 6254 int ret = ECORE_SUCCESS; 6255 union eth_tx_bd_types *p_virt; 6256 struct ecore_dev *cdev; 6257 6258 cdev = &ha->cdev; 6259 6260 bzero((void *)&txq->sw_tx_ring[0], 6261 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6262 6263 /* Allocate the real Tx ring to be used by FW */ 6264 ret = ecore_chain_alloc(cdev, 6265 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6266 ECORE_CHAIN_MODE_PBL, 6267 ECORE_CHAIN_CNT_TYPE_U16, 6268 TX_RING_SIZE, 6269 sizeof(*p_virt), 6270 &txq->tx_pbl, NULL); 6271 6272 if (ret != ECORE_SUCCESS) { 6273 goto err; 6274 } 6275 6276 txq->num_tx_buffers = TX_RING_SIZE; 6277 6278 return 0; 6279 6280 err: 6281 qlnx_free_mem_txq(ha, fp, txq); 6282 return -ENOMEM; 6283 } 6284 6285 static void 6286 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6287 { 6288 struct mbuf *mp; 6289 if_t ifp = ha->ifp; 6290 6291 if (mtx_initialized(&fp->tx_mtx)) { 6292 if (fp->tx_br != NULL) { 6293 mtx_lock(&fp->tx_mtx); 6294 6295 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 6296 fp->tx_pkts_freed++; 6297 m_freem(mp); 6298 } 6299 6300 mtx_unlock(&fp->tx_mtx); 6301 6302 buf_ring_free(fp->tx_br, M_DEVBUF); 6303 fp->tx_br = NULL; 6304 } 6305 mtx_destroy(&fp->tx_mtx); 6306 } 6307 return; 6308 } 6309 6310 static void 6311 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6312 { 6313 int tc; 6314 6315 qlnx_free_mem_sb(ha, fp->sb_info); 6316 6317 qlnx_free_mem_rxq(ha, fp->rxq); 6318 6319 for (tc = 0; tc < ha->num_tc; tc++) 6320 qlnx_free_mem_txq(ha, fp, fp->txq[tc]); 6321 6322 return; 6323 } 6324 6325 static int 6326 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6327 { 6328 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 6329 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id); 6330 6331 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 6332 6333 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF, 6334 M_NOWAIT, &fp->tx_mtx); 6335 if (fp->tx_br == NULL) { 6336 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n", 6337 ha->dev_unit, fp->rss_id); 6338 return -ENOMEM; 6339 } 6340 return 0; 6341 } 6342 6343 static int 6344 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6345 { 6346 int rc, tc; 6347 6348 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id); 6349 if (rc) 6350 goto err; 6351 6352 if (ha->rx_jumbo_buf_eq_mtu) { 6353 if (ha->max_frame_size <= MCLBYTES) 6354 ha->rx_buf_size = MCLBYTES; 6355 else if (ha->max_frame_size <= MJUMPAGESIZE) 6356 ha->rx_buf_size = MJUMPAGESIZE; 6357 else if (ha->max_frame_size <= MJUM9BYTES) 6358 ha->rx_buf_size = MJUM9BYTES; 6359 else if (ha->max_frame_size <= MJUM16BYTES) 6360 ha->rx_buf_size = MJUM16BYTES; 6361 } else { 6362 if (ha->max_frame_size <= MCLBYTES) 6363 ha->rx_buf_size = MCLBYTES; 6364 else 6365 ha->rx_buf_size = MJUMPAGESIZE; 6366 } 6367 6368 rc = qlnx_alloc_mem_rxq(ha, fp->rxq); 6369 if (rc) 6370 goto err; 6371 6372 for (tc = 0; tc < ha->num_tc; tc++) { 6373 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]); 6374 if (rc) 6375 goto err; 6376 } 6377 6378 return 0; 6379 6380 err: 6381 qlnx_free_mem_fp(ha, fp); 6382 return -ENOMEM; 6383 } 6384 6385 static void 6386 qlnx_free_mem_load(qlnx_host_t *ha) 6387 { 6388 int i; 6389 6390 for (i = 0; i < ha->num_rss; i++) { 6391 struct qlnx_fastpath *fp = &ha->fp_array[i]; 6392 6393 qlnx_free_mem_fp(ha, fp); 6394 } 6395 return; 6396 } 6397 6398 static int 6399 qlnx_alloc_mem_load(qlnx_host_t *ha) 6400 { 6401 int rc = 0, rss_id; 6402 6403 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 6404 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 6405 6406 rc = qlnx_alloc_mem_fp(ha, fp); 6407 if (rc) 6408 break; 6409 } 6410 return (rc); 6411 } 6412 6413 static int 6414 qlnx_start_vport(struct ecore_dev *cdev, 6415 u8 vport_id, 6416 u16 mtu, 6417 u8 drop_ttl0_flg, 6418 u8 inner_vlan_removal_en_flg, 6419 u8 tx_switching, 6420 u8 hw_lro_enable) 6421 { 6422 int rc, i; 6423 struct ecore_sp_vport_start_params vport_start_params = { 0 }; 6424 qlnx_host_t *ha __unused; 6425 6426 ha = (qlnx_host_t *)cdev; 6427 6428 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg; 6429 vport_start_params.tx_switching = 0; 6430 vport_start_params.handle_ptp_pkts = 0; 6431 vport_start_params.only_untagged = 0; 6432 vport_start_params.drop_ttl0 = drop_ttl0_flg; 6433 6434 vport_start_params.tpa_mode = 6435 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE); 6436 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6437 6438 vport_start_params.vport_id = vport_id; 6439 vport_start_params.mtu = mtu; 6440 6441 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id); 6442 6443 for_each_hwfn(cdev, i) { 6444 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6445 6446 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid; 6447 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6448 6449 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params); 6450 6451 if (rc) { 6452 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d" 6453 " with MTU %d\n" , vport_id, mtu); 6454 return -ENOMEM; 6455 } 6456 6457 ecore_hw_start_fastpath(p_hwfn); 6458 6459 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n", 6460 vport_id, mtu); 6461 } 6462 return 0; 6463 } 6464 6465 static int 6466 qlnx_update_vport(struct ecore_dev *cdev, 6467 struct qlnx_update_vport_params *params) 6468 { 6469 struct ecore_sp_vport_update_params sp_params; 6470 int rc, i, j, fp_index; 6471 struct ecore_hwfn *p_hwfn; 6472 struct ecore_rss_params *rss; 6473 qlnx_host_t *ha = (qlnx_host_t *)cdev; 6474 struct qlnx_fastpath *fp; 6475 6476 memset(&sp_params, 0, sizeof(sp_params)); 6477 /* Translate protocol params into sp params */ 6478 sp_params.vport_id = params->vport_id; 6479 6480 sp_params.update_vport_active_rx_flg = 6481 params->update_vport_active_rx_flg; 6482 sp_params.vport_active_rx_flg = params->vport_active_rx_flg; 6483 6484 sp_params.update_vport_active_tx_flg = 6485 params->update_vport_active_tx_flg; 6486 sp_params.vport_active_tx_flg = params->vport_active_tx_flg; 6487 6488 sp_params.update_inner_vlan_removal_flg = 6489 params->update_inner_vlan_removal_flg; 6490 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; 6491 6492 sp_params.sge_tpa_params = params->sge_tpa_params; 6493 6494 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 6495 * We need to re-fix the rss values per engine for CMT. 6496 */ 6497 if (params->rss_params->update_rss_config) 6498 sp_params.rss_params = params->rss_params; 6499 else 6500 sp_params.rss_params = NULL; 6501 6502 for_each_hwfn(cdev, i) { 6503 p_hwfn = &cdev->hwfns[i]; 6504 6505 if ((cdev->num_hwfns > 1) && 6506 params->rss_params->update_rss_config && 6507 params->rss_params->rss_enable) { 6508 rss = params->rss_params; 6509 6510 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) { 6511 fp_index = ((cdev->num_hwfns * j) + i) % 6512 ha->num_rss; 6513 6514 fp = &ha->fp_array[fp_index]; 6515 rss->rss_ind_table[j] = fp->rxq->handle; 6516 } 6517 6518 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) { 6519 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n", 6520 rss->rss_ind_table[j], 6521 rss->rss_ind_table[j+1], 6522 rss->rss_ind_table[j+2], 6523 rss->rss_ind_table[j+3], 6524 rss->rss_ind_table[j+4], 6525 rss->rss_ind_table[j+5], 6526 rss->rss_ind_table[j+6], 6527 rss->rss_ind_table[j+7]); 6528 j += 8; 6529 } 6530 } 6531 6532 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6533 6534 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id); 6535 6536 rc = ecore_sp_vport_update(p_hwfn, &sp_params, 6537 ECORE_SPQ_MODE_EBLOCK, NULL); 6538 if (rc) { 6539 QL_DPRINT1(ha, "Failed to update VPORT\n"); 6540 return rc; 6541 } 6542 6543 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \ 6544 rx_active_flag %d [tx_update %d], [rx_update %d]\n", 6545 params->vport_id, params->vport_active_tx_flg, 6546 params->vport_active_rx_flg, 6547 params->update_vport_active_tx_flg, 6548 params->update_vport_active_rx_flg); 6549 } 6550 6551 return 0; 6552 } 6553 6554 static void 6555 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq) 6556 { 6557 struct eth_rx_bd *rx_bd_cons = 6558 ecore_chain_consume(&rxq->rx_bd_ring); 6559 struct eth_rx_bd *rx_bd_prod = 6560 ecore_chain_produce(&rxq->rx_bd_ring); 6561 struct sw_rx_data *sw_rx_data_cons = 6562 &rxq->sw_rx_ring[rxq->sw_rx_cons]; 6563 struct sw_rx_data *sw_rx_data_prod = 6564 &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6565 6566 sw_rx_data_prod->data = sw_rx_data_cons->data; 6567 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); 6568 6569 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 6570 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6571 6572 return; 6573 } 6574 6575 static void 6576 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq) 6577 { 6578 6579 uint16_t bd_prod; 6580 uint16_t cqe_prod; 6581 union { 6582 struct eth_rx_prod_data rx_prod_data; 6583 uint32_t data32; 6584 } rx_prods; 6585 6586 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); 6587 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); 6588 6589 /* Update producers */ 6590 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod); 6591 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod); 6592 6593 /* Make sure that the BD and SGE data is updated before updating the 6594 * producers since FW might read the BD/SGE right after the producer 6595 * is updated. 6596 */ 6597 wmb(); 6598 6599 #ifdef ECORE_CONFIG_DIRECT_HWFN 6600 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr, 6601 sizeof(rx_prods), &rx_prods.data32); 6602 #else 6603 internal_ram_wr(rxq->hw_rxq_prod_addr, 6604 sizeof(rx_prods), &rx_prods.data32); 6605 #endif 6606 6607 /* mmiowb is needed to synchronize doorbell writes from more than one 6608 * processor. It guarantees that the write arrives to the device before 6609 * the napi lock is released and another qlnx_poll is called (possibly 6610 * on another CPU). Without this barrier, the next doorbell can bypass 6611 * this doorbell. This is applicable to IA64/Altix systems. 6612 */ 6613 wmb(); 6614 6615 return; 6616 } 6617 6618 static uint32_t qlnx_hash_key[] = { 6619 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda), 6620 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2), 6621 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d), 6622 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0), 6623 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb), 6624 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4), 6625 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3), 6626 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c), 6627 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b), 6628 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)}; 6629 6630 static int 6631 qlnx_start_queues(qlnx_host_t *ha) 6632 { 6633 int rc, tc, i, vport_id = 0, 6634 drop_ttl0_flg = 1, vlan_removal_en = 1, 6635 tx_switching = 0, hw_lro_enable = 0; 6636 struct ecore_dev *cdev = &ha->cdev; 6637 struct ecore_rss_params *rss_params = &ha->rss_params; 6638 struct qlnx_update_vport_params vport_update_params; 6639 if_t ifp; 6640 struct ecore_hwfn *p_hwfn; 6641 struct ecore_sge_tpa_params tpa_params; 6642 struct ecore_queue_start_common_params qparams; 6643 struct qlnx_fastpath *fp; 6644 6645 ifp = ha->ifp; 6646 6647 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss); 6648 6649 if (!ha->num_rss) { 6650 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there" 6651 " are no Rx queues\n"); 6652 return -EINVAL; 6653 } 6654 6655 #ifndef QLNX_SOFT_LRO 6656 hw_lro_enable = if_getcapenable(ifp) & IFCAP_LRO; 6657 #endif /* #ifndef QLNX_SOFT_LRO */ 6658 6659 rc = qlnx_start_vport(cdev, vport_id, if_getmtu(ifp), drop_ttl0_flg, 6660 vlan_removal_en, tx_switching, hw_lro_enable); 6661 6662 if (rc) { 6663 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc); 6664 return rc; 6665 } 6666 6667 QL_DPRINT2(ha, "Start vport ramrod passed, " 6668 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n", 6669 vport_id, (int)(if_getmtu(ifp) + 0xe), vlan_removal_en); 6670 6671 for_each_rss(i) { 6672 struct ecore_rxq_start_ret_params rx_ret_params; 6673 struct ecore_txq_start_ret_params tx_ret_params; 6674 6675 fp = &ha->fp_array[i]; 6676 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)]; 6677 6678 bzero(&qparams, sizeof(struct ecore_queue_start_common_params)); 6679 bzero(&rx_ret_params, 6680 sizeof (struct ecore_rxq_start_ret_params)); 6681 6682 qparams.queue_id = i ; 6683 qparams.vport_id = vport_id; 6684 qparams.stats_id = vport_id; 6685 qparams.p_sb = fp->sb_info; 6686 qparams.sb_idx = RX_PI; 6687 6688 6689 rc = ecore_eth_rx_queue_start(p_hwfn, 6690 p_hwfn->hw_info.opaque_fid, 6691 &qparams, 6692 fp->rxq->rx_buf_size, /* bd_max_bytes */ 6693 /* bd_chain_phys_addr */ 6694 fp->rxq->rx_bd_ring.p_phys_addr, 6695 /* cqe_pbl_addr */ 6696 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring), 6697 /* cqe_pbl_size */ 6698 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring), 6699 &rx_ret_params); 6700 6701 if (rc) { 6702 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc); 6703 return rc; 6704 } 6705 6706 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod; 6707 fp->rxq->handle = rx_ret_params.p_handle; 6708 fp->rxq->hw_cons_ptr = 6709 &fp->sb_info->sb_virt->pi_array[RX_PI]; 6710 6711 qlnx_update_rx_prod(p_hwfn, fp->rxq); 6712 6713 for (tc = 0; tc < ha->num_tc; tc++) { 6714 struct qlnx_tx_queue *txq = fp->txq[tc]; 6715 6716 bzero(&qparams, 6717 sizeof(struct ecore_queue_start_common_params)); 6718 bzero(&tx_ret_params, 6719 sizeof (struct ecore_txq_start_ret_params)); 6720 6721 qparams.queue_id = txq->index / cdev->num_hwfns ; 6722 qparams.vport_id = vport_id; 6723 qparams.stats_id = vport_id; 6724 qparams.p_sb = fp->sb_info; 6725 qparams.sb_idx = TX_PI(tc); 6726 6727 rc = ecore_eth_tx_queue_start(p_hwfn, 6728 p_hwfn->hw_info.opaque_fid, 6729 &qparams, tc, 6730 /* bd_chain_phys_addr */ 6731 ecore_chain_get_pbl_phys(&txq->tx_pbl), 6732 ecore_chain_get_page_cnt(&txq->tx_pbl), 6733 &tx_ret_params); 6734 6735 if (rc) { 6736 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n", 6737 txq->index, rc); 6738 return rc; 6739 } 6740 6741 txq->doorbell_addr = tx_ret_params.p_doorbell; 6742 txq->handle = tx_ret_params.p_handle; 6743 6744 txq->hw_cons_ptr = 6745 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; 6746 SET_FIELD(txq->tx_db.data.params, 6747 ETH_DB_DATA_DEST, DB_DEST_XCM); 6748 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, 6749 DB_AGG_CMD_SET); 6750 SET_FIELD(txq->tx_db.data.params, 6751 ETH_DB_DATA_AGG_VAL_SEL, 6752 DQ_XCM_ETH_TX_BD_PROD_CMD); 6753 6754 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 6755 } 6756 } 6757 6758 /* Fill struct with RSS params */ 6759 if (ha->num_rss > 1) { 6760 rss_params->update_rss_config = 1; 6761 rss_params->rss_enable = 1; 6762 rss_params->update_rss_capabilities = 1; 6763 rss_params->update_rss_ind_table = 1; 6764 rss_params->update_rss_key = 1; 6765 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | 6766 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; 6767 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */ 6768 6769 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 6770 fp = &ha->fp_array[(i % ha->num_rss)]; 6771 rss_params->rss_ind_table[i] = fp->rxq->handle; 6772 } 6773 6774 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 6775 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i]; 6776 6777 } else { 6778 memset(rss_params, 0, sizeof(*rss_params)); 6779 } 6780 6781 /* Prepare and send the vport enable */ 6782 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6783 vport_update_params.vport_id = vport_id; 6784 vport_update_params.update_vport_active_tx_flg = 1; 6785 vport_update_params.vport_active_tx_flg = 1; 6786 vport_update_params.update_vport_active_rx_flg = 1; 6787 vport_update_params.vport_active_rx_flg = 1; 6788 vport_update_params.rss_params = rss_params; 6789 vport_update_params.update_inner_vlan_removal_flg = 1; 6790 vport_update_params.inner_vlan_removal_flg = 1; 6791 6792 if (hw_lro_enable) { 6793 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params)); 6794 6795 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6796 6797 tpa_params.update_tpa_en_flg = 1; 6798 tpa_params.tpa_ipv4_en_flg = 1; 6799 tpa_params.tpa_ipv6_en_flg = 1; 6800 6801 tpa_params.update_tpa_param_flg = 1; 6802 tpa_params.tpa_pkt_split_flg = 0; 6803 tpa_params.tpa_hdr_data_split_flg = 0; 6804 tpa_params.tpa_gro_consistent_flg = 0; 6805 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 6806 tpa_params.tpa_max_size = (uint16_t)(-1); 6807 tpa_params.tpa_min_size_to_start = if_getmtu(ifp) / 2; 6808 tpa_params.tpa_min_size_to_cont = if_getmtu(ifp) / 2; 6809 6810 vport_update_params.sge_tpa_params = &tpa_params; 6811 } 6812 6813 rc = qlnx_update_vport(cdev, &vport_update_params); 6814 if (rc) { 6815 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc); 6816 return rc; 6817 } 6818 6819 return 0; 6820 } 6821 6822 static int 6823 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6824 struct qlnx_tx_queue *txq) 6825 { 6826 uint16_t hw_bd_cons; 6827 uint16_t ecore_cons_idx; 6828 6829 QL_DPRINT2(ha, "enter\n"); 6830 6831 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6832 6833 while (hw_bd_cons != 6834 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 6835 mtx_lock(&fp->tx_mtx); 6836 6837 (void)qlnx_tx_int(ha, fp, txq); 6838 6839 mtx_unlock(&fp->tx_mtx); 6840 6841 qlnx_mdelay(__func__, 2); 6842 6843 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6844 } 6845 6846 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index); 6847 6848 return 0; 6849 } 6850 6851 static int 6852 qlnx_stop_queues(qlnx_host_t *ha) 6853 { 6854 struct qlnx_update_vport_params vport_update_params; 6855 struct ecore_dev *cdev; 6856 struct qlnx_fastpath *fp; 6857 int rc, tc, i; 6858 6859 cdev = &ha->cdev; 6860 6861 /* Disable the vport */ 6862 6863 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6864 6865 vport_update_params.vport_id = 0; 6866 vport_update_params.update_vport_active_tx_flg = 1; 6867 vport_update_params.vport_active_tx_flg = 0; 6868 vport_update_params.update_vport_active_rx_flg = 1; 6869 vport_update_params.vport_active_rx_flg = 0; 6870 vport_update_params.rss_params = &ha->rss_params; 6871 vport_update_params.rss_params->update_rss_config = 0; 6872 vport_update_params.rss_params->rss_enable = 0; 6873 vport_update_params.update_inner_vlan_removal_flg = 0; 6874 vport_update_params.inner_vlan_removal_flg = 0; 6875 6876 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id); 6877 6878 rc = qlnx_update_vport(cdev, &vport_update_params); 6879 if (rc) { 6880 QL_DPRINT1(ha, "Failed to update vport\n"); 6881 return rc; 6882 } 6883 6884 /* Flush Tx queues. If needed, request drain from MCP */ 6885 for_each_rss(i) { 6886 fp = &ha->fp_array[i]; 6887 6888 for (tc = 0; tc < ha->num_tc; tc++) { 6889 struct qlnx_tx_queue *txq = fp->txq[tc]; 6890 6891 rc = qlnx_drain_txq(ha, fp, txq); 6892 if (rc) 6893 return rc; 6894 } 6895 } 6896 6897 /* Stop all Queues in reverse order*/ 6898 for (i = ha->num_rss - 1; i >= 0; i--) { 6899 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)]; 6900 6901 fp = &ha->fp_array[i]; 6902 6903 /* Stop the Tx Queue(s)*/ 6904 for (tc = 0; tc < ha->num_tc; tc++) { 6905 int tx_queue_id __unused; 6906 6907 tx_queue_id = tc * ha->num_rss + i; 6908 rc = ecore_eth_tx_queue_stop(p_hwfn, 6909 fp->txq[tc]->handle); 6910 6911 if (rc) { 6912 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n", 6913 tx_queue_id); 6914 return rc; 6915 } 6916 } 6917 6918 /* Stop the Rx Queue*/ 6919 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false, 6920 false); 6921 if (rc) { 6922 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i); 6923 return rc; 6924 } 6925 } 6926 6927 /* Stop the vport */ 6928 for_each_hwfn(cdev, i) { 6929 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6930 6931 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0); 6932 6933 if (rc) { 6934 QL_DPRINT1(ha, "Failed to stop VPORT\n"); 6935 return rc; 6936 } 6937 } 6938 6939 return rc; 6940 } 6941 6942 static int 6943 qlnx_set_ucast_rx_mac(qlnx_host_t *ha, 6944 enum ecore_filter_opcode opcode, 6945 unsigned char mac[ETH_ALEN]) 6946 { 6947 struct ecore_filter_ucast ucast; 6948 struct ecore_dev *cdev; 6949 int rc; 6950 6951 cdev = &ha->cdev; 6952 6953 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6954 6955 ucast.opcode = opcode; 6956 ucast.type = ECORE_FILTER_MAC; 6957 ucast.is_rx_filter = 1; 6958 ucast.vport_to_add_to = 0; 6959 memcpy(&ucast.mac[0], mac, ETH_ALEN); 6960 6961 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6962 6963 return (rc); 6964 } 6965 6966 static int 6967 qlnx_remove_all_ucast_mac(qlnx_host_t *ha) 6968 { 6969 struct ecore_filter_ucast ucast; 6970 struct ecore_dev *cdev; 6971 int rc; 6972 6973 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6974 6975 ucast.opcode = ECORE_FILTER_REPLACE; 6976 ucast.type = ECORE_FILTER_MAC; 6977 ucast.is_rx_filter = 1; 6978 6979 cdev = &ha->cdev; 6980 6981 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6982 6983 return (rc); 6984 } 6985 6986 static int 6987 qlnx_remove_all_mcast_mac(qlnx_host_t *ha) 6988 { 6989 struct ecore_filter_mcast *mcast; 6990 struct ecore_dev *cdev; 6991 int rc, i; 6992 6993 cdev = &ha->cdev; 6994 6995 mcast = &ha->ecore_mcast; 6996 bzero(mcast, sizeof(struct ecore_filter_mcast)); 6997 6998 mcast->opcode = ECORE_FILTER_REMOVE; 6999 7000 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 7001 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] || 7002 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] || 7003 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) { 7004 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN); 7005 mcast->num_mc_addrs++; 7006 } 7007 } 7008 mcast = &ha->ecore_mcast; 7009 7010 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 7011 7012 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS)); 7013 ha->nmcast = 0; 7014 7015 return (rc); 7016 } 7017 7018 static int 7019 qlnx_clean_filters(qlnx_host_t *ha) 7020 { 7021 int rc = 0; 7022 7023 /* Remove all unicast macs */ 7024 rc = qlnx_remove_all_ucast_mac(ha); 7025 if (rc) 7026 return rc; 7027 7028 /* Remove all multicast macs */ 7029 rc = qlnx_remove_all_mcast_mac(ha); 7030 if (rc) 7031 return rc; 7032 7033 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac); 7034 7035 return (rc); 7036 } 7037 7038 static int 7039 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter) 7040 { 7041 struct ecore_filter_accept_flags accept; 7042 int rc = 0; 7043 struct ecore_dev *cdev; 7044 7045 cdev = &ha->cdev; 7046 7047 bzero(&accept, sizeof(struct ecore_filter_accept_flags)); 7048 7049 accept.update_rx_mode_config = 1; 7050 accept.rx_accept_filter = filter; 7051 7052 accept.update_tx_mode_config = 1; 7053 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 7054 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST; 7055 7056 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false, 7057 ECORE_SPQ_MODE_CB, NULL); 7058 7059 return (rc); 7060 } 7061 7062 static int 7063 qlnx_set_rx_mode(qlnx_host_t *ha) 7064 { 7065 int rc = 0; 7066 uint8_t filter; 7067 7068 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac); 7069 if (rc) 7070 return rc; 7071 7072 rc = qlnx_remove_all_mcast_mac(ha); 7073 if (rc) 7074 return rc; 7075 7076 filter = ECORE_ACCEPT_UCAST_MATCHED | 7077 ECORE_ACCEPT_MCAST_MATCHED | 7078 ECORE_ACCEPT_BCAST; 7079 7080 if (qlnx_vf_device(ha) == 0) { 7081 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 7082 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 7083 } 7084 ha->filter = filter; 7085 7086 rc = qlnx_set_rx_accept_filter(ha, filter); 7087 7088 return (rc); 7089 } 7090 7091 static int 7092 qlnx_set_link(qlnx_host_t *ha, bool link_up) 7093 { 7094 int i, rc = 0; 7095 struct ecore_dev *cdev; 7096 struct ecore_hwfn *hwfn; 7097 struct ecore_ptt *ptt; 7098 7099 if (qlnx_vf_device(ha) == 0) 7100 return (0); 7101 7102 cdev = &ha->cdev; 7103 7104 for_each_hwfn(cdev, i) { 7105 hwfn = &cdev->hwfns[i]; 7106 7107 ptt = ecore_ptt_acquire(hwfn); 7108 if (!ptt) 7109 return -EBUSY; 7110 7111 rc = ecore_mcp_set_link(hwfn, ptt, link_up); 7112 7113 ecore_ptt_release(hwfn, ptt); 7114 7115 if (rc) 7116 return rc; 7117 } 7118 return (rc); 7119 } 7120 7121 static uint64_t 7122 qlnx_get_counter(if_t ifp, ift_counter cnt) 7123 { 7124 qlnx_host_t *ha; 7125 uint64_t count; 7126 7127 ha = (qlnx_host_t *)if_getsoftc(ifp); 7128 7129 switch (cnt) { 7130 case IFCOUNTER_IPACKETS: 7131 count = ha->hw_stats.common.rx_ucast_pkts + 7132 ha->hw_stats.common.rx_mcast_pkts + 7133 ha->hw_stats.common.rx_bcast_pkts; 7134 break; 7135 7136 case IFCOUNTER_IERRORS: 7137 count = ha->hw_stats.common.rx_crc_errors + 7138 ha->hw_stats.common.rx_align_errors + 7139 ha->hw_stats.common.rx_oversize_packets + 7140 ha->hw_stats.common.rx_undersize_packets; 7141 break; 7142 7143 case IFCOUNTER_OPACKETS: 7144 count = ha->hw_stats.common.tx_ucast_pkts + 7145 ha->hw_stats.common.tx_mcast_pkts + 7146 ha->hw_stats.common.tx_bcast_pkts; 7147 break; 7148 7149 case IFCOUNTER_OERRORS: 7150 count = ha->hw_stats.common.tx_err_drop_pkts; 7151 break; 7152 7153 case IFCOUNTER_COLLISIONS: 7154 return (0); 7155 7156 case IFCOUNTER_IBYTES: 7157 count = ha->hw_stats.common.rx_ucast_bytes + 7158 ha->hw_stats.common.rx_mcast_bytes + 7159 ha->hw_stats.common.rx_bcast_bytes; 7160 break; 7161 7162 case IFCOUNTER_OBYTES: 7163 count = ha->hw_stats.common.tx_ucast_bytes + 7164 ha->hw_stats.common.tx_mcast_bytes + 7165 ha->hw_stats.common.tx_bcast_bytes; 7166 break; 7167 7168 case IFCOUNTER_IMCASTS: 7169 count = ha->hw_stats.common.rx_mcast_bytes; 7170 break; 7171 7172 case IFCOUNTER_OMCASTS: 7173 count = ha->hw_stats.common.tx_mcast_bytes; 7174 break; 7175 7176 case IFCOUNTER_IQDROPS: 7177 case IFCOUNTER_OQDROPS: 7178 case IFCOUNTER_NOPROTO: 7179 7180 default: 7181 return (if_get_counter_default(ifp, cnt)); 7182 } 7183 return (count); 7184 } 7185 7186 static void 7187 qlnx_timer(void *arg) 7188 { 7189 qlnx_host_t *ha; 7190 7191 ha = (qlnx_host_t *)arg; 7192 7193 if (ha->error_recovery) { 7194 ha->error_recovery = 0; 7195 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 7196 return; 7197 } 7198 7199 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats); 7200 7201 if (ha->storm_stats_gather) 7202 qlnx_sample_storm_stats(ha); 7203 7204 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7205 7206 return; 7207 } 7208 7209 static int 7210 qlnx_load(qlnx_host_t *ha) 7211 { 7212 int i; 7213 int rc = 0; 7214 device_t dev; 7215 7216 dev = ha->pci_dev; 7217 7218 QL_DPRINT2(ha, "enter\n"); 7219 7220 rc = qlnx_alloc_mem_arrays(ha); 7221 if (rc) 7222 goto qlnx_load_exit0; 7223 7224 qlnx_init_fp(ha); 7225 7226 rc = qlnx_alloc_mem_load(ha); 7227 if (rc) 7228 goto qlnx_load_exit1; 7229 7230 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n", 7231 ha->num_rss, ha->num_tc); 7232 7233 for (i = 0; i < ha->num_rss; i++) { 7234 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq, 7235 (INTR_TYPE_NET | INTR_MPSAFE), 7236 NULL, qlnx_fp_isr, &ha->irq_vec[i], 7237 &ha->irq_vec[i].handle))) { 7238 QL_DPRINT1(ha, "could not setup interrupt\n"); 7239 goto qlnx_load_exit2; 7240 } 7241 7242 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \ 7243 irq %p handle %p\n", i, 7244 ha->irq_vec[i].irq_rid, 7245 ha->irq_vec[i].irq, ha->irq_vec[i].handle); 7246 7247 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus)); 7248 } 7249 7250 rc = qlnx_start_queues(ha); 7251 if (rc) 7252 goto qlnx_load_exit2; 7253 7254 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n"); 7255 7256 /* Add primary mac and set Rx filters */ 7257 rc = qlnx_set_rx_mode(ha); 7258 if (rc) 7259 goto qlnx_load_exit2; 7260 7261 /* Ask for link-up using current configuration */ 7262 qlnx_set_link(ha, true); 7263 7264 if (qlnx_vf_device(ha) == 0) 7265 qlnx_link_update(&ha->cdev.hwfns[0]); 7266 7267 ha->state = QLNX_STATE_OPEN; 7268 7269 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats)); 7270 7271 if (ha->flags.callout_init) 7272 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7273 7274 goto qlnx_load_exit0; 7275 7276 qlnx_load_exit2: 7277 qlnx_free_mem_load(ha); 7278 7279 qlnx_load_exit1: 7280 ha->num_rss = 0; 7281 7282 qlnx_load_exit0: 7283 QL_DPRINT2(ha, "exit [%d]\n", rc); 7284 return rc; 7285 } 7286 7287 static void 7288 qlnx_drain_soft_lro(qlnx_host_t *ha) 7289 { 7290 #ifdef QLNX_SOFT_LRO 7291 7292 if_t ifp; 7293 int i; 7294 7295 ifp = ha->ifp; 7296 7297 if (if_getcapenable(ifp) & IFCAP_LRO) { 7298 for (i = 0; i < ha->num_rss; i++) { 7299 struct qlnx_fastpath *fp = &ha->fp_array[i]; 7300 struct lro_ctrl *lro; 7301 7302 lro = &fp->rxq->lro; 7303 7304 tcp_lro_flush_all(lro); 7305 } 7306 } 7307 7308 #endif /* #ifdef QLNX_SOFT_LRO */ 7309 7310 return; 7311 } 7312 7313 static void 7314 qlnx_unload(qlnx_host_t *ha) 7315 { 7316 struct ecore_dev *cdev; 7317 device_t dev; 7318 int i; 7319 7320 cdev = &ha->cdev; 7321 dev = ha->pci_dev; 7322 7323 QL_DPRINT2(ha, "enter\n"); 7324 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state); 7325 7326 if (ha->state == QLNX_STATE_OPEN) { 7327 qlnx_set_link(ha, false); 7328 qlnx_clean_filters(ha); 7329 qlnx_stop_queues(ha); 7330 ecore_hw_stop_fastpath(cdev); 7331 7332 for (i = 0; i < ha->num_rss; i++) { 7333 if (ha->irq_vec[i].handle) { 7334 (void)bus_teardown_intr(dev, 7335 ha->irq_vec[i].irq, 7336 ha->irq_vec[i].handle); 7337 ha->irq_vec[i].handle = NULL; 7338 } 7339 } 7340 7341 qlnx_drain_fp_taskqueues(ha); 7342 qlnx_drain_soft_lro(ha); 7343 qlnx_free_mem_load(ha); 7344 } 7345 7346 if (ha->flags.callout_init) 7347 callout_drain(&ha->qlnx_callout); 7348 7349 qlnx_mdelay(__func__, 1000); 7350 7351 ha->state = QLNX_STATE_CLOSED; 7352 7353 QL_DPRINT2(ha, "exit\n"); 7354 return; 7355 } 7356 7357 static int 7358 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7359 { 7360 int rval = -1; 7361 struct ecore_hwfn *p_hwfn; 7362 struct ecore_ptt *p_ptt; 7363 7364 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7365 7366 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7367 p_ptt = ecore_ptt_acquire(p_hwfn); 7368 7369 if (!p_ptt) { 7370 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7371 return (rval); 7372 } 7373 7374 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7375 7376 if (rval == DBG_STATUS_OK) 7377 rval = 0; 7378 else { 7379 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed" 7380 "[0x%x]\n", rval); 7381 } 7382 7383 ecore_ptt_release(p_hwfn, p_ptt); 7384 7385 return (rval); 7386 } 7387 7388 static int 7389 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7390 { 7391 int rval = -1; 7392 struct ecore_hwfn *p_hwfn; 7393 struct ecore_ptt *p_ptt; 7394 7395 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7396 7397 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7398 p_ptt = ecore_ptt_acquire(p_hwfn); 7399 7400 if (!p_ptt) { 7401 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7402 return (rval); 7403 } 7404 7405 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7406 7407 if (rval == DBG_STATUS_OK) 7408 rval = 0; 7409 else { 7410 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed" 7411 " [0x%x]\n", rval); 7412 } 7413 7414 ecore_ptt_release(p_hwfn, p_ptt); 7415 7416 return (rval); 7417 } 7418 7419 static void 7420 qlnx_sample_storm_stats(qlnx_host_t *ha) 7421 { 7422 int i, index; 7423 struct ecore_dev *cdev; 7424 qlnx_storm_stats_t *s_stats; 7425 uint32_t reg; 7426 struct ecore_ptt *p_ptt; 7427 struct ecore_hwfn *hwfn; 7428 7429 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) { 7430 ha->storm_stats_gather = 0; 7431 return; 7432 } 7433 7434 cdev = &ha->cdev; 7435 7436 for_each_hwfn(cdev, i) { 7437 hwfn = &cdev->hwfns[i]; 7438 7439 p_ptt = ecore_ptt_acquire(hwfn); 7440 if (!p_ptt) 7441 return; 7442 7443 index = ha->storm_stats_index + 7444 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN); 7445 7446 s_stats = &ha->storm_stats[index]; 7447 7448 /* XSTORM */ 7449 reg = XSEM_REG_FAST_MEMORY + 7450 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7451 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7452 7453 reg = XSEM_REG_FAST_MEMORY + 7454 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7455 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7456 7457 reg = XSEM_REG_FAST_MEMORY + 7458 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7459 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7460 7461 reg = XSEM_REG_FAST_MEMORY + 7462 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7463 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7464 7465 /* YSTORM */ 7466 reg = YSEM_REG_FAST_MEMORY + 7467 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7468 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7469 7470 reg = YSEM_REG_FAST_MEMORY + 7471 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7472 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7473 7474 reg = YSEM_REG_FAST_MEMORY + 7475 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7476 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7477 7478 reg = YSEM_REG_FAST_MEMORY + 7479 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7480 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7481 7482 /* PSTORM */ 7483 reg = PSEM_REG_FAST_MEMORY + 7484 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7485 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7486 7487 reg = PSEM_REG_FAST_MEMORY + 7488 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7489 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7490 7491 reg = PSEM_REG_FAST_MEMORY + 7492 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7493 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7494 7495 reg = PSEM_REG_FAST_MEMORY + 7496 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7497 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7498 7499 /* TSTORM */ 7500 reg = TSEM_REG_FAST_MEMORY + 7501 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7502 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7503 7504 reg = TSEM_REG_FAST_MEMORY + 7505 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7506 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7507 7508 reg = TSEM_REG_FAST_MEMORY + 7509 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7510 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7511 7512 reg = TSEM_REG_FAST_MEMORY + 7513 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7514 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7515 7516 /* MSTORM */ 7517 reg = MSEM_REG_FAST_MEMORY + 7518 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7519 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7520 7521 reg = MSEM_REG_FAST_MEMORY + 7522 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7523 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7524 7525 reg = MSEM_REG_FAST_MEMORY + 7526 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7527 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7528 7529 reg = MSEM_REG_FAST_MEMORY + 7530 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7531 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7532 7533 /* USTORM */ 7534 reg = USEM_REG_FAST_MEMORY + 7535 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7536 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7537 7538 reg = USEM_REG_FAST_MEMORY + 7539 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7540 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7541 7542 reg = USEM_REG_FAST_MEMORY + 7543 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7544 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7545 7546 reg = USEM_REG_FAST_MEMORY + 7547 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7548 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7549 7550 ecore_ptt_release(hwfn, p_ptt); 7551 } 7552 7553 ha->storm_stats_index++; 7554 7555 return; 7556 } 7557 7558 /* 7559 * Name: qlnx_dump_buf8 7560 * Function: dumps a buffer as bytes 7561 */ 7562 static void 7563 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len) 7564 { 7565 device_t dev; 7566 uint32_t i = 0; 7567 uint8_t *buf; 7568 7569 dev = ha->pci_dev; 7570 buf = dbuf; 7571 7572 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len); 7573 7574 while (len >= 16) { 7575 device_printf(dev,"0x%08x:" 7576 " %02x %02x %02x %02x %02x %02x %02x %02x" 7577 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7578 buf[0], buf[1], buf[2], buf[3], 7579 buf[4], buf[5], buf[6], buf[7], 7580 buf[8], buf[9], buf[10], buf[11], 7581 buf[12], buf[13], buf[14], buf[15]); 7582 i += 16; 7583 len -= 16; 7584 buf += 16; 7585 } 7586 switch (len) { 7587 case 1: 7588 device_printf(dev,"0x%08x: %02x\n", i, buf[0]); 7589 break; 7590 case 2: 7591 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]); 7592 break; 7593 case 3: 7594 device_printf(dev,"0x%08x: %02x %02x %02x\n", 7595 i, buf[0], buf[1], buf[2]); 7596 break; 7597 case 4: 7598 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i, 7599 buf[0], buf[1], buf[2], buf[3]); 7600 break; 7601 case 5: 7602 device_printf(dev,"0x%08x:" 7603 " %02x %02x %02x %02x %02x\n", i, 7604 buf[0], buf[1], buf[2], buf[3], buf[4]); 7605 break; 7606 case 6: 7607 device_printf(dev,"0x%08x:" 7608 " %02x %02x %02x %02x %02x %02x\n", i, 7609 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); 7610 break; 7611 case 7: 7612 device_printf(dev,"0x%08x:" 7613 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7614 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); 7615 break; 7616 case 8: 7617 device_printf(dev,"0x%08x:" 7618 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7619 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7620 buf[7]); 7621 break; 7622 case 9: 7623 device_printf(dev,"0x%08x:" 7624 " %02x %02x %02x %02x %02x %02x %02x %02x" 7625 " %02x\n", i, 7626 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7627 buf[7], buf[8]); 7628 break; 7629 case 10: 7630 device_printf(dev,"0x%08x:" 7631 " %02x %02x %02x %02x %02x %02x %02x %02x" 7632 " %02x %02x\n", i, 7633 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7634 buf[7], buf[8], buf[9]); 7635 break; 7636 case 11: 7637 device_printf(dev,"0x%08x:" 7638 " %02x %02x %02x %02x %02x %02x %02x %02x" 7639 " %02x %02x %02x\n", i, 7640 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7641 buf[7], buf[8], buf[9], buf[10]); 7642 break; 7643 case 12: 7644 device_printf(dev,"0x%08x:" 7645 " %02x %02x %02x %02x %02x %02x %02x %02x" 7646 " %02x %02x %02x %02x\n", i, 7647 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7648 buf[7], buf[8], buf[9], buf[10], buf[11]); 7649 break; 7650 case 13: 7651 device_printf(dev,"0x%08x:" 7652 " %02x %02x %02x %02x %02x %02x %02x %02x" 7653 " %02x %02x %02x %02x %02x\n", i, 7654 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7655 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]); 7656 break; 7657 case 14: 7658 device_printf(dev,"0x%08x:" 7659 " %02x %02x %02x %02x %02x %02x %02x %02x" 7660 " %02x %02x %02x %02x %02x %02x\n", i, 7661 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7662 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7663 buf[13]); 7664 break; 7665 case 15: 7666 device_printf(dev,"0x%08x:" 7667 " %02x %02x %02x %02x %02x %02x %02x %02x" 7668 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7669 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7670 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7671 buf[13], buf[14]); 7672 break; 7673 default: 7674 break; 7675 } 7676 7677 device_printf(dev, "%s: %s dump end\n", __func__, msg); 7678 7679 return; 7680 } 7681 7682 #ifdef CONFIG_ECORE_SRIOV 7683 7684 static void 7685 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id) 7686 { 7687 struct ecore_public_vf_info *vf_info; 7688 7689 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false); 7690 7691 if (!vf_info) 7692 return; 7693 7694 /* Clear the VF mac */ 7695 memset(vf_info->forced_mac, 0, ETH_ALEN); 7696 7697 vf_info->forced_vlan = 0; 7698 7699 return; 7700 } 7701 7702 void 7703 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id) 7704 { 7705 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id); 7706 return; 7707 } 7708 7709 static int 7710 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid, 7711 struct ecore_filter_ucast *params) 7712 { 7713 struct ecore_public_vf_info *vf; 7714 7715 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) { 7716 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev), 7717 "VF[%d] vport not initialized\n", vfid); 7718 return ECORE_INVAL; 7719 } 7720 7721 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true); 7722 if (!vf) 7723 return -EINVAL; 7724 7725 /* No real decision to make; Store the configured MAC */ 7726 if (params->type == ECORE_FILTER_MAC || 7727 params->type == ECORE_FILTER_MAC_VLAN) 7728 memcpy(params->mac, vf->forced_mac, ETH_ALEN); 7729 7730 return 0; 7731 } 7732 7733 int 7734 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params) 7735 { 7736 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params)); 7737 } 7738 7739 static int 7740 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid, 7741 struct ecore_sp_vport_update_params *params, uint16_t * tlvs) 7742 { 7743 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) { 7744 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev), 7745 "VF[%d] vport not initialized\n", vfid); 7746 return ECORE_INVAL; 7747 } 7748 7749 /* Untrusted VFs can't even be trusted to know that fact. 7750 * Simply indicate everything is configured fine, and trace 7751 * configuration 'behind their back'. 7752 */ 7753 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM))) 7754 return 0; 7755 7756 return 0; 7757 7758 } 7759 int 7760 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs) 7761 { 7762 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs)); 7763 } 7764 7765 static int 7766 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn) 7767 { 7768 int i; 7769 struct ecore_dev *cdev; 7770 7771 cdev = p_hwfn->p_dev; 7772 7773 for (i = 0; i < cdev->num_hwfns; i++) { 7774 if (&cdev->hwfns[i] == p_hwfn) 7775 break; 7776 } 7777 7778 if (i >= cdev->num_hwfns) 7779 return (-1); 7780 7781 return (i); 7782 } 7783 7784 static int 7785 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id) 7786 { 7787 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 7788 int i; 7789 7790 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n", 7791 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id); 7792 7793 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 7794 return (-1); 7795 7796 if (ha->sriov_task[i].pf_taskqueue != NULL) { 7797 atomic_testandset_32(&ha->sriov_task[i].flags, 7798 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG); 7799 7800 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 7801 &ha->sriov_task[i].pf_task); 7802 } 7803 7804 return (ECORE_SUCCESS); 7805 } 7806 7807 int 7808 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id) 7809 { 7810 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id)); 7811 } 7812 7813 static void 7814 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn) 7815 { 7816 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 7817 int i; 7818 7819 if (!ha->sriov_initialized) 7820 return; 7821 7822 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 7823 ha, p_hwfn->p_dev, p_hwfn); 7824 7825 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 7826 return; 7827 7828 if (ha->sriov_task[i].pf_taskqueue != NULL) { 7829 atomic_testandset_32(&ha->sriov_task[i].flags, 7830 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE); 7831 7832 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 7833 &ha->sriov_task[i].pf_task); 7834 } 7835 7836 return; 7837 } 7838 7839 void 7840 qlnx_vf_flr_update(void *p_hwfn) 7841 { 7842 __qlnx_vf_flr_update(p_hwfn); 7843 7844 return; 7845 } 7846 7847 #ifndef QLNX_VF 7848 7849 static void 7850 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn) 7851 { 7852 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 7853 int i; 7854 7855 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 7856 ha, p_hwfn->p_dev, p_hwfn); 7857 7858 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 7859 return; 7860 7861 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n", 7862 ha, p_hwfn->p_dev, p_hwfn, i); 7863 7864 if (ha->sriov_task[i].pf_taskqueue != NULL) { 7865 atomic_testandset_32(&ha->sriov_task[i].flags, 7866 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE); 7867 7868 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 7869 &ha->sriov_task[i].pf_task); 7870 } 7871 } 7872 7873 static void 7874 qlnx_initialize_sriov(qlnx_host_t *ha) 7875 { 7876 device_t dev; 7877 nvlist_t *pf_schema, *vf_schema; 7878 int iov_error; 7879 7880 dev = ha->pci_dev; 7881 7882 pf_schema = pci_iov_schema_alloc_node(); 7883 vf_schema = pci_iov_schema_alloc_node(); 7884 7885 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 7886 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 7887 IOV_SCHEMA_HASDEFAULT, FALSE); 7888 pci_iov_schema_add_bool(vf_schema, "allow-promisc", 7889 IOV_SCHEMA_HASDEFAULT, FALSE); 7890 pci_iov_schema_add_uint16(vf_schema, "num-queues", 7891 IOV_SCHEMA_HASDEFAULT, 1); 7892 7893 iov_error = pci_iov_attach(dev, pf_schema, vf_schema); 7894 7895 if (iov_error != 0) { 7896 ha->sriov_initialized = 0; 7897 } else { 7898 device_printf(dev, "SRIOV initialized\n"); 7899 ha->sriov_initialized = 1; 7900 } 7901 7902 return; 7903 } 7904 7905 static void 7906 qlnx_sriov_disable(qlnx_host_t *ha) 7907 { 7908 struct ecore_dev *cdev; 7909 int i, j; 7910 7911 cdev = &ha->cdev; 7912 7913 ecore_iov_set_vfs_to_disable(cdev, true); 7914 7915 for_each_hwfn(cdev, i) { 7916 struct ecore_hwfn *hwfn = &cdev->hwfns[i]; 7917 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 7918 7919 if (!ptt) { 7920 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 7921 return; 7922 } 7923 /* Clean WFQ db and configure equal weight for all vports */ 7924 ecore_clean_wfq_db(hwfn, ptt); 7925 7926 ecore_for_each_vf(hwfn, j) { 7927 int k = 0; 7928 7929 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false)) 7930 continue; 7931 7932 if (ecore_iov_is_vf_started(hwfn, j)) { 7933 /* Wait until VF is disabled before releasing */ 7934 7935 for (k = 0; k < 100; k++) { 7936 if (!ecore_iov_is_vf_stopped(hwfn, j)) { 7937 qlnx_mdelay(__func__, 10); 7938 } else 7939 break; 7940 } 7941 } 7942 7943 if (k < 100) 7944 ecore_iov_release_hw_for_vf(&cdev->hwfns[i], 7945 ptt, j); 7946 else { 7947 QL_DPRINT1(ha, 7948 "Timeout waiting for VF's FLR to end\n"); 7949 } 7950 } 7951 ecore_ptt_release(hwfn, ptt); 7952 } 7953 7954 ecore_iov_set_vfs_to_disable(cdev, false); 7955 7956 return; 7957 } 7958 7959 static void 7960 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid, 7961 struct ecore_iov_vf_init_params *params) 7962 { 7963 u16 base, i; 7964 7965 /* Since we have an equal resource distribution per-VF, and we assume 7966 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting 7967 * sequentially from there. 7968 */ 7969 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues; 7970 7971 params->rel_vf_id = vfid; 7972 7973 for (i = 0; i < params->num_queues; i++) { 7974 params->req_rx_queue[i] = base + i; 7975 params->req_tx_queue[i] = base + i; 7976 } 7977 7978 /* PF uses indices 0 for itself; Set vport/RSS afterwards */ 7979 params->vport_id = vfid + 1; 7980 params->rss_eng_id = vfid + 1; 7981 7982 return; 7983 } 7984 7985 static int 7986 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params) 7987 { 7988 qlnx_host_t *ha; 7989 struct ecore_dev *cdev; 7990 struct ecore_iov_vf_init_params params; 7991 int ret, j, i; 7992 uint32_t max_vfs; 7993 7994 if ((ha = device_get_softc(dev)) == NULL) { 7995 device_printf(dev, "%s: cannot get softc\n", __func__); 7996 return (-1); 7997 } 7998 7999 if (qlnx_create_pf_taskqueues(ha) != 0) 8000 goto qlnx_iov_init_err0; 8001 8002 cdev = &ha->cdev; 8003 8004 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT); 8005 8006 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n", 8007 dev, num_vfs, max_vfs); 8008 8009 if (num_vfs >= max_vfs) { 8010 QL_DPRINT1(ha, "Can start at most %d VFs\n", 8011 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1)); 8012 goto qlnx_iov_init_err0; 8013 } 8014 8015 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF, 8016 M_NOWAIT); 8017 8018 if (ha->vf_attr == NULL) 8019 goto qlnx_iov_init_err0; 8020 8021 memset(¶ms, 0, sizeof(params)); 8022 8023 /* Initialize HW for VF access */ 8024 for_each_hwfn(cdev, j) { 8025 struct ecore_hwfn *hwfn = &cdev->hwfns[j]; 8026 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 8027 8028 /* Make sure not to use more than 16 queues per VF */ 8029 params.num_queues = min_t(int, 8030 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs), 8031 16); 8032 8033 if (!ptt) { 8034 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 8035 goto qlnx_iov_init_err1; 8036 } 8037 8038 for (i = 0; i < num_vfs; i++) { 8039 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true)) 8040 continue; 8041 8042 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms); 8043 8044 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms); 8045 8046 if (ret) { 8047 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i); 8048 ecore_ptt_release(hwfn, ptt); 8049 goto qlnx_iov_init_err1; 8050 } 8051 } 8052 8053 ecore_ptt_release(hwfn, ptt); 8054 } 8055 8056 ha->num_vfs = num_vfs; 8057 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha); 8058 8059 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs); 8060 8061 return (0); 8062 8063 qlnx_iov_init_err1: 8064 qlnx_sriov_disable(ha); 8065 8066 qlnx_iov_init_err0: 8067 qlnx_destroy_pf_taskqueues(ha); 8068 ha->num_vfs = 0; 8069 8070 return (-1); 8071 } 8072 8073 static void 8074 qlnx_iov_uninit(device_t dev) 8075 { 8076 qlnx_host_t *ha; 8077 8078 if ((ha = device_get_softc(dev)) == NULL) { 8079 device_printf(dev, "%s: cannot get softc\n", __func__); 8080 return; 8081 } 8082 8083 QL_DPRINT2(ha," dev = %p enter\n", dev); 8084 8085 qlnx_sriov_disable(ha); 8086 qlnx_destroy_pf_taskqueues(ha); 8087 8088 free(ha->vf_attr, M_QLNXBUF); 8089 ha->vf_attr = NULL; 8090 8091 ha->num_vfs = 0; 8092 8093 QL_DPRINT2(ha," dev = %p exit\n", dev); 8094 return; 8095 } 8096 8097 static int 8098 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 8099 { 8100 qlnx_host_t *ha; 8101 qlnx_vf_attr_t *vf_attr; 8102 unsigned const char *mac; 8103 size_t size; 8104 struct ecore_hwfn *p_hwfn; 8105 8106 if ((ha = device_get_softc(dev)) == NULL) { 8107 device_printf(dev, "%s: cannot get softc\n", __func__); 8108 return (-1); 8109 } 8110 8111 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum); 8112 8113 if (vfnum > (ha->num_vfs - 1)) { 8114 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n", 8115 vfnum, (ha->num_vfs - 1)); 8116 } 8117 8118 vf_attr = &ha->vf_attr[vfnum]; 8119 8120 if (nvlist_exists_binary(params, "mac-addr")) { 8121 mac = nvlist_get_binary(params, "mac-addr", &size); 8122 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN); 8123 device_printf(dev, 8124 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", 8125 __func__, vf_attr->mac_addr[0], 8126 vf_attr->mac_addr[1], vf_attr->mac_addr[2], 8127 vf_attr->mac_addr[3], vf_attr->mac_addr[4], 8128 vf_attr->mac_addr[5]); 8129 p_hwfn = &ha->cdev.hwfns[0]; 8130 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr, 8131 vfnum); 8132 } 8133 8134 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum); 8135 return (0); 8136 } 8137 8138 static void 8139 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8140 { 8141 uint64_t events[ECORE_VF_ARRAY_LENGTH]; 8142 struct ecore_ptt *ptt; 8143 int i; 8144 8145 ptt = ecore_ptt_acquire(p_hwfn); 8146 if (!ptt) { 8147 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8148 __qlnx_pf_vf_msg(p_hwfn, 0); 8149 return; 8150 } 8151 8152 ecore_iov_pf_get_pending_events(p_hwfn, events); 8153 8154 QL_DPRINT2(ha, "Event mask of VF events:" 8155 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n", 8156 events[0], events[1], events[2]); 8157 8158 ecore_for_each_vf(p_hwfn, i) { 8159 /* Skip VFs with no pending messages */ 8160 if (!(events[i / 64] & (1ULL << (i % 64)))) 8161 continue; 8162 8163 QL_DPRINT2(ha, 8164 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", 8165 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i); 8166 8167 /* Copy VF's message to PF's request buffer for that VF */ 8168 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i)) 8169 continue; 8170 8171 ecore_iov_process_mbx_req(p_hwfn, ptt, i); 8172 } 8173 8174 ecore_ptt_release(p_hwfn, ptt); 8175 8176 return; 8177 } 8178 8179 static void 8180 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8181 { 8182 struct ecore_ptt *ptt; 8183 int ret; 8184 8185 ptt = ecore_ptt_acquire(p_hwfn); 8186 8187 if (!ptt) { 8188 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8189 __qlnx_vf_flr_update(p_hwfn); 8190 return; 8191 } 8192 8193 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt); 8194 8195 if (ret) { 8196 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n"); 8197 } 8198 8199 ecore_ptt_release(p_hwfn, ptt); 8200 8201 return; 8202 } 8203 8204 static void 8205 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8206 { 8207 struct ecore_ptt *ptt; 8208 int i; 8209 8210 ptt = ecore_ptt_acquire(p_hwfn); 8211 8212 if (!ptt) { 8213 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8214 qlnx_vf_bulleting_update(p_hwfn); 8215 return; 8216 } 8217 8218 ecore_for_each_vf(p_hwfn, i) { 8219 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n", 8220 p_hwfn, i); 8221 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt); 8222 } 8223 8224 ecore_ptt_release(p_hwfn, ptt); 8225 8226 return; 8227 } 8228 8229 static void 8230 qlnx_pf_taskqueue(void *context, int pending) 8231 { 8232 struct ecore_hwfn *p_hwfn; 8233 qlnx_host_t *ha; 8234 int i; 8235 8236 p_hwfn = context; 8237 8238 if (p_hwfn == NULL) 8239 return; 8240 8241 ha = (qlnx_host_t *)(p_hwfn->p_dev); 8242 8243 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8244 return; 8245 8246 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8247 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG)) 8248 qlnx_handle_vf_msg(ha, p_hwfn); 8249 8250 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8251 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE)) 8252 qlnx_handle_vf_flr_update(ha, p_hwfn); 8253 8254 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8255 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE)) 8256 qlnx_handle_bulletin_update(ha, p_hwfn); 8257 8258 return; 8259 } 8260 8261 static int 8262 qlnx_create_pf_taskqueues(qlnx_host_t *ha) 8263 { 8264 int i; 8265 uint8_t tq_name[32]; 8266 8267 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8268 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 8269 8270 bzero(tq_name, sizeof (tq_name)); 8271 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i); 8272 8273 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn); 8274 8275 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 8276 taskqueue_thread_enqueue, 8277 &ha->sriov_task[i].pf_taskqueue); 8278 8279 if (ha->sriov_task[i].pf_taskqueue == NULL) 8280 return (-1); 8281 8282 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1, 8283 PI_NET, "%s", tq_name); 8284 8285 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue); 8286 } 8287 8288 return (0); 8289 } 8290 8291 static void 8292 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha) 8293 { 8294 int i; 8295 8296 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8297 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8298 taskqueue_drain(ha->sriov_task[i].pf_taskqueue, 8299 &ha->sriov_task[i].pf_task); 8300 taskqueue_free(ha->sriov_task[i].pf_taskqueue); 8301 ha->sriov_task[i].pf_taskqueue = NULL; 8302 } 8303 } 8304 return; 8305 } 8306 8307 static void 8308 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha) 8309 { 8310 struct ecore_mcp_link_capabilities caps; 8311 struct ecore_mcp_link_params params; 8312 struct ecore_mcp_link_state link; 8313 int i; 8314 8315 if (!p_hwfn->pf_iov_info) 8316 return; 8317 8318 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params)); 8319 memset(&link, 0, sizeof(struct ecore_mcp_link_state)); 8320 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities)); 8321 8322 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 8323 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link)); 8324 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params)); 8325 8326 QL_DPRINT2(ha, "called\n"); 8327 8328 /* Update bulletin of all future possible VFs with link configuration */ 8329 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) { 8330 /* Modify link according to the VF's configured link state */ 8331 8332 link.link_up = false; 8333 8334 if (ha->link_up) { 8335 link.link_up = true; 8336 /* Set speed according to maximum supported by HW. 8337 * that is 40G for regular devices and 100G for CMT 8338 * mode devices. 8339 */ 8340 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ? 8341 100000 : link.speed; 8342 } 8343 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up); 8344 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps); 8345 } 8346 8347 qlnx_vf_bulleting_update(p_hwfn); 8348 8349 return; 8350 } 8351 #endif /* #ifndef QLNX_VF */ 8352 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 8353