1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File: qlnx_os.c 30 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. 31 */ 32 33 #include <sys/cdefs.h> 34 #include "qlnx_os.h" 35 #include "bcm_osal.h" 36 #include "reg_addr.h" 37 #include "ecore_gtt_reg_addr.h" 38 #include "ecore.h" 39 #include "ecore_chain.h" 40 #include "ecore_status.h" 41 #include "ecore_hw.h" 42 #include "ecore_rt_defs.h" 43 #include "ecore_init_ops.h" 44 #include "ecore_int.h" 45 #include "ecore_cxt.h" 46 #include "ecore_spq.h" 47 #include "ecore_init_fw_funcs.h" 48 #include "ecore_sp_commands.h" 49 #include "ecore_dev_api.h" 50 #include "ecore_l2_api.h" 51 #include "ecore_mcp.h" 52 #include "ecore_hw_defs.h" 53 #include "mcp_public.h" 54 #include "ecore_iro.h" 55 #include "nvm_cfg.h" 56 #include "ecore_dbg_fw_funcs.h" 57 #include "ecore_iov_api.h" 58 #include "ecore_vf_api.h" 59 60 #include "qlnx_ioctl.h" 61 #include "qlnx_def.h" 62 #include "qlnx_ver.h" 63 64 #ifdef QLNX_ENABLE_IWARP 65 #include "qlnx_rdma.h" 66 #endif /* #ifdef QLNX_ENABLE_IWARP */ 67 68 #ifdef CONFIG_ECORE_SRIOV 69 #include <sys/nv.h> 70 #include <sys/iov_schema.h> 71 #include <dev/pci/pci_iov.h> 72 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 73 74 #include <sys/smp.h> 75 76 /* 77 * static functions 78 */ 79 /* 80 * ioctl related functions 81 */ 82 static void qlnx_add_sysctls(qlnx_host_t *ha); 83 84 /* 85 * main driver 86 */ 87 static void qlnx_release(qlnx_host_t *ha); 88 static void qlnx_fp_isr(void *arg); 89 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha); 90 static void qlnx_init(void *arg); 91 static void qlnx_init_locked(qlnx_host_t *ha); 92 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi); 93 static int qlnx_set_promisc(qlnx_host_t *ha); 94 static int qlnx_set_allmulti(qlnx_host_t *ha); 95 static int qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data); 96 static int qlnx_media_change(if_t ifp); 97 static void qlnx_media_status(if_t ifp, struct ifmediareq *ifmr); 98 static void qlnx_stop(qlnx_host_t *ha); 99 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, 100 struct mbuf **m_headp); 101 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha); 102 static uint32_t qlnx_get_optics(qlnx_host_t *ha, 103 struct qlnx_link_output *if_link); 104 static int qlnx_transmit(if_t ifp, struct mbuf *mp); 105 static int qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, 106 struct mbuf *mp); 107 static void qlnx_qflush(if_t ifp); 108 109 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha); 110 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha); 111 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha); 112 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha); 113 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha); 114 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha); 115 116 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver); 117 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size); 118 119 static int qlnx_nic_setup(struct ecore_dev *cdev, 120 struct ecore_pf_params *func_params); 121 static int qlnx_nic_start(struct ecore_dev *cdev); 122 static int qlnx_slowpath_start(qlnx_host_t *ha); 123 static int qlnx_slowpath_stop(qlnx_host_t *ha); 124 static int qlnx_init_hw(qlnx_host_t *ha); 125 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 126 char ver_str[VER_SIZE]); 127 static void qlnx_unload(qlnx_host_t *ha); 128 static int qlnx_load(qlnx_host_t *ha); 129 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 130 uint32_t add_mac); 131 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, 132 uint32_t len); 133 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq); 134 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq); 135 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, 136 struct qlnx_rx_queue *rxq); 137 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter); 138 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, 139 int hwfn_index); 140 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, 141 int hwfn_index); 142 static void qlnx_timer(void *arg); 143 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 144 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 145 static void qlnx_trigger_dump(qlnx_host_t *ha); 146 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 147 struct qlnx_tx_queue *txq); 148 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 149 struct qlnx_tx_queue *txq); 150 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 151 int lro_enable); 152 static void qlnx_fp_taskqueue(void *context, int pending); 153 static void qlnx_sample_storm_stats(qlnx_host_t *ha); 154 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 155 struct qlnx_agg_info *tpa); 156 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa); 157 158 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt); 159 160 /* 161 * Hooks to the Operating Systems 162 */ 163 static int qlnx_pci_probe (device_t); 164 static int qlnx_pci_attach (device_t); 165 static int qlnx_pci_detach (device_t); 166 167 #ifndef QLNX_VF 168 169 #ifdef CONFIG_ECORE_SRIOV 170 171 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params); 172 static void qlnx_iov_uninit(device_t dev); 173 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params); 174 static void qlnx_initialize_sriov(qlnx_host_t *ha); 175 static void qlnx_pf_taskqueue(void *context, int pending); 176 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha); 177 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha); 178 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha); 179 180 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 181 182 static device_method_t qlnx_pci_methods[] = { 183 /* Device interface */ 184 DEVMETHOD(device_probe, qlnx_pci_probe), 185 DEVMETHOD(device_attach, qlnx_pci_attach), 186 DEVMETHOD(device_detach, qlnx_pci_detach), 187 188 #ifdef CONFIG_ECORE_SRIOV 189 DEVMETHOD(pci_iov_init, qlnx_iov_init), 190 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit), 191 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf), 192 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 193 { 0, 0 } 194 }; 195 196 static driver_t qlnx_pci_driver = { 197 "ql", qlnx_pci_methods, sizeof (qlnx_host_t), 198 }; 199 200 MODULE_VERSION(if_qlnxe,1); 201 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, 0, 0); 202 203 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1); 204 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1); 205 206 #else 207 208 static device_method_t qlnxv_pci_methods[] = { 209 /* Device interface */ 210 DEVMETHOD(device_probe, qlnx_pci_probe), 211 DEVMETHOD(device_attach, qlnx_pci_attach), 212 DEVMETHOD(device_detach, qlnx_pci_detach), 213 { 0, 0 } 214 }; 215 216 static driver_t qlnxv_pci_driver = { 217 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t), 218 }; 219 220 MODULE_VERSION(if_qlnxev,1); 221 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, 0, 0); 222 223 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1); 224 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1); 225 226 #endif /* #ifdef QLNX_VF */ 227 228 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver"); 229 230 char qlnx_dev_str[128]; 231 char qlnx_ver_str[VER_SIZE]; 232 char qlnx_name_str[NAME_SIZE]; 233 234 /* 235 * Some PCI Configuration Space Related Defines 236 */ 237 238 #ifndef PCI_VENDOR_QLOGIC 239 #define PCI_VENDOR_QLOGIC 0x1077 240 #endif 241 242 /* 40G Adapter QLE45xxx*/ 243 #ifndef QLOGIC_PCI_DEVICE_ID_1634 244 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634 245 #endif 246 247 /* 100G Adapter QLE45xxx*/ 248 #ifndef QLOGIC_PCI_DEVICE_ID_1644 249 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644 250 #endif 251 252 /* 25G Adapter QLE45xxx*/ 253 #ifndef QLOGIC_PCI_DEVICE_ID_1656 254 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656 255 #endif 256 257 /* 50G Adapter QLE45xxx*/ 258 #ifndef QLOGIC_PCI_DEVICE_ID_1654 259 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654 260 #endif 261 262 /* 10G/25G/40G Adapter QLE41xxx*/ 263 #ifndef QLOGIC_PCI_DEVICE_ID_8070 264 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070 265 #endif 266 267 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/ 268 #ifndef QLOGIC_PCI_DEVICE_ID_8090 269 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090 270 #endif 271 272 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 273 "qlnxe driver parameters"); 274 275 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */ 276 static int qlnxe_queue_count = QLNX_DEFAULT_RSS; 277 278 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 279 &qlnxe_queue_count, 0, "Multi-Queue queue count"); 280 281 /* 282 * Note on RDMA personality setting 283 * 284 * Read the personality configured in NVRAM 285 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and 286 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT 287 * use the personality in NVRAM. 288 289 * Otherwise use t the personality configured in sysctl. 290 * 291 */ 292 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */ 293 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */ 294 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */ 295 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */ 296 #define QLNX_PERSONALITY_BITS_PER_FUNC 4 297 #define QLNX_PERSONALIY_MASK 0xF 298 299 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/ 300 static uint64_t qlnxe_rdma_configuration = 0x22222222; 301 302 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN, 303 &qlnxe_rdma_configuration, 0, "RDMA Configuration"); 304 305 int 306 qlnx_vf_device(qlnx_host_t *ha) 307 { 308 uint16_t device_id; 309 310 device_id = ha->device_id; 311 312 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 313 return 0; 314 315 return -1; 316 } 317 318 static int 319 qlnx_valid_device(qlnx_host_t *ha) 320 { 321 uint16_t device_id; 322 323 device_id = ha->device_id; 324 325 #ifndef QLNX_VF 326 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 327 (device_id == QLOGIC_PCI_DEVICE_ID_1644) || 328 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 329 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 330 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 331 return 0; 332 #else 333 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 334 return 0; 335 336 #endif /* #ifndef QLNX_VF */ 337 return -1; 338 } 339 340 #ifdef QLNX_ENABLE_IWARP 341 static int 342 qlnx_rdma_supported(struct qlnx_host *ha) 343 { 344 uint16_t device_id; 345 346 device_id = pci_get_device(ha->pci_dev); 347 348 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 349 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 350 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 351 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 352 return (0); 353 354 return (-1); 355 } 356 #endif /* #ifdef QLNX_ENABLE_IWARP */ 357 358 /* 359 * Name: qlnx_pci_probe 360 * Function: Validate the PCI device to be a QLA80XX device 361 */ 362 static int 363 qlnx_pci_probe(device_t dev) 364 { 365 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d", 366 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); 367 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx"); 368 369 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) { 370 return (ENXIO); 371 } 372 373 switch (pci_get_device(dev)) { 374 #ifndef QLNX_VF 375 376 case QLOGIC_PCI_DEVICE_ID_1644: 377 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 378 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function", 379 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 380 QLNX_VERSION_BUILD); 381 device_set_desc_copy(dev, qlnx_dev_str); 382 383 break; 384 385 case QLOGIC_PCI_DEVICE_ID_1634: 386 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 387 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function", 388 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 389 QLNX_VERSION_BUILD); 390 device_set_desc_copy(dev, qlnx_dev_str); 391 392 break; 393 394 case QLOGIC_PCI_DEVICE_ID_1656: 395 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 396 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function", 397 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 398 QLNX_VERSION_BUILD); 399 device_set_desc_copy(dev, qlnx_dev_str); 400 401 break; 402 403 case QLOGIC_PCI_DEVICE_ID_1654: 404 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 405 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function", 406 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 407 QLNX_VERSION_BUILD); 408 device_set_desc_copy(dev, qlnx_dev_str); 409 410 break; 411 412 case QLOGIC_PCI_DEVICE_ID_8070: 413 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 414 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)" 415 " Adapter-Ethernet Function", 416 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 417 QLNX_VERSION_BUILD); 418 device_set_desc_copy(dev, qlnx_dev_str); 419 420 break; 421 422 #else 423 case QLOGIC_PCI_DEVICE_ID_8090: 424 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 425 "Qlogic SRIOV PCI CNA (AH) " 426 "Adapter-Ethernet Function", 427 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 428 QLNX_VERSION_BUILD); 429 device_set_desc_copy(dev, qlnx_dev_str); 430 431 break; 432 433 #endif /* #ifndef QLNX_VF */ 434 435 default: 436 return (ENXIO); 437 } 438 439 #ifdef QLNX_ENABLE_IWARP 440 qlnx_rdma_init(); 441 #endif /* #ifdef QLNX_ENABLE_IWARP */ 442 443 return (BUS_PROBE_DEFAULT); 444 } 445 446 static uint16_t 447 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 448 struct qlnx_tx_queue *txq) 449 { 450 u16 hw_bd_cons; 451 u16 ecore_cons_idx; 452 453 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 454 455 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl); 456 457 return (hw_bd_cons - ecore_cons_idx); 458 } 459 460 static void 461 qlnx_sp_intr(void *arg) 462 { 463 struct ecore_hwfn *p_hwfn; 464 qlnx_host_t *ha; 465 int i; 466 467 p_hwfn = arg; 468 469 if (p_hwfn == NULL) { 470 printf("%s: spurious slowpath intr\n", __func__); 471 return; 472 } 473 474 ha = (qlnx_host_t *)p_hwfn->p_dev; 475 476 QL_DPRINT2(ha, "enter\n"); 477 478 for (i = 0; i < ha->cdev.num_hwfns; i++) { 479 if (&ha->cdev.hwfns[i] == p_hwfn) { 480 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]); 481 break; 482 } 483 } 484 QL_DPRINT2(ha, "exit\n"); 485 486 return; 487 } 488 489 static void 490 qlnx_sp_taskqueue(void *context, int pending) 491 { 492 struct ecore_hwfn *p_hwfn; 493 494 p_hwfn = context; 495 496 if (p_hwfn != NULL) { 497 qlnx_sp_isr(p_hwfn); 498 } 499 return; 500 } 501 502 static int 503 qlnx_create_sp_taskqueues(qlnx_host_t *ha) 504 { 505 int i; 506 uint8_t tq_name[32]; 507 508 for (i = 0; i < ha->cdev.num_hwfns; i++) { 509 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 510 511 bzero(tq_name, sizeof (tq_name)); 512 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i); 513 514 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn); 515 516 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT, 517 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]); 518 519 if (ha->sp_taskqueue[i] == NULL) 520 return (-1); 521 522 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s", 523 tq_name); 524 525 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]); 526 } 527 528 return (0); 529 } 530 531 static void 532 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha) 533 { 534 int i; 535 536 for (i = 0; i < ha->cdev.num_hwfns; i++) { 537 if (ha->sp_taskqueue[i] != NULL) { 538 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]); 539 taskqueue_free(ha->sp_taskqueue[i]); 540 } 541 } 542 return; 543 } 544 545 static void 546 qlnx_fp_taskqueue(void *context, int pending) 547 { 548 struct qlnx_fastpath *fp; 549 qlnx_host_t *ha; 550 if_t ifp; 551 552 fp = context; 553 554 if (fp == NULL) 555 return; 556 557 ha = (qlnx_host_t *)fp->edev; 558 559 ifp = ha->ifp; 560 561 if(if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 562 if (!drbr_empty(ifp, fp->tx_br)) { 563 if(mtx_trylock(&fp->tx_mtx)) { 564 #ifdef QLNX_TRACE_PERF_DATA 565 tx_pkts = fp->tx_pkts_transmitted; 566 tx_compl = fp->tx_pkts_completed; 567 #endif 568 569 qlnx_transmit_locked(ifp, fp, NULL); 570 571 #ifdef QLNX_TRACE_PERF_DATA 572 fp->tx_pkts_trans_fp += 573 (fp->tx_pkts_transmitted - tx_pkts); 574 fp->tx_pkts_compl_fp += 575 (fp->tx_pkts_completed - tx_compl); 576 #endif 577 mtx_unlock(&fp->tx_mtx); 578 } 579 } 580 } 581 582 QL_DPRINT2(ha, "exit \n"); 583 return; 584 } 585 586 static int 587 qlnx_create_fp_taskqueues(qlnx_host_t *ha) 588 { 589 int i; 590 uint8_t tq_name[32]; 591 struct qlnx_fastpath *fp; 592 593 for (i = 0; i < ha->num_rss; i++) { 594 fp = &ha->fp_array[i]; 595 596 bzero(tq_name, sizeof (tq_name)); 597 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 598 599 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp); 600 601 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 602 taskqueue_thread_enqueue, 603 &fp->fp_taskqueue); 604 605 if (fp->fp_taskqueue == NULL) 606 return (-1); 607 608 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 609 tq_name); 610 611 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue); 612 } 613 614 return (0); 615 } 616 617 static void 618 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha) 619 { 620 int i; 621 struct qlnx_fastpath *fp; 622 623 for (i = 0; i < ha->num_rss; i++) { 624 fp = &ha->fp_array[i]; 625 626 if (fp->fp_taskqueue != NULL) { 627 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 628 taskqueue_free(fp->fp_taskqueue); 629 fp->fp_taskqueue = NULL; 630 } 631 } 632 return; 633 } 634 635 static void 636 qlnx_drain_fp_taskqueues(qlnx_host_t *ha) 637 { 638 int i; 639 struct qlnx_fastpath *fp; 640 641 for (i = 0; i < ha->num_rss; i++) { 642 fp = &ha->fp_array[i]; 643 644 if (fp->fp_taskqueue != NULL) { 645 QLNX_UNLOCK(ha); 646 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 647 QLNX_LOCK(ha); 648 } 649 } 650 return; 651 } 652 653 static void 654 qlnx_get_params(qlnx_host_t *ha) 655 { 656 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) { 657 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n", 658 qlnxe_queue_count); 659 qlnxe_queue_count = 0; 660 } 661 return; 662 } 663 664 static void 665 qlnx_error_recovery_taskqueue(void *context, int pending) 666 { 667 qlnx_host_t *ha; 668 669 ha = context; 670 671 QL_DPRINT2(ha, "enter\n"); 672 673 QLNX_LOCK(ha); 674 qlnx_stop(ha); 675 QLNX_UNLOCK(ha); 676 677 #ifdef QLNX_ENABLE_IWARP 678 qlnx_rdma_dev_remove(ha); 679 #endif /* #ifdef QLNX_ENABLE_IWARP */ 680 681 qlnx_slowpath_stop(ha); 682 qlnx_slowpath_start(ha); 683 684 #ifdef QLNX_ENABLE_IWARP 685 qlnx_rdma_dev_add(ha); 686 #endif /* #ifdef QLNX_ENABLE_IWARP */ 687 688 qlnx_init(ha); 689 690 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 691 692 QL_DPRINT2(ha, "exit\n"); 693 694 return; 695 } 696 697 static int 698 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha) 699 { 700 uint8_t tq_name[32]; 701 702 bzero(tq_name, sizeof (tq_name)); 703 snprintf(tq_name, sizeof (tq_name), "ql_err_tq"); 704 705 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha); 706 707 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 708 taskqueue_thread_enqueue, &ha->err_taskqueue); 709 710 if (ha->err_taskqueue == NULL) 711 return (-1); 712 713 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name); 714 715 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue); 716 717 return (0); 718 } 719 720 static void 721 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha) 722 { 723 if (ha->err_taskqueue != NULL) { 724 taskqueue_drain(ha->err_taskqueue, &ha->err_task); 725 taskqueue_free(ha->err_taskqueue); 726 } 727 728 ha->err_taskqueue = NULL; 729 730 return; 731 } 732 733 /* 734 * Name: qlnx_pci_attach 735 * Function: attaches the device to the operating system 736 */ 737 static int 738 qlnx_pci_attach(device_t dev) 739 { 740 qlnx_host_t *ha = NULL; 741 uint32_t rsrc_len_reg __unused = 0; 742 uint32_t rsrc_len_dbells = 0; 743 uint32_t rsrc_len_msix __unused = 0; 744 int i; 745 uint32_t mfw_ver; 746 uint32_t num_sp_msix = 0; 747 uint32_t num_rdma_irqs = 0; 748 749 if ((ha = device_get_softc(dev)) == NULL) { 750 device_printf(dev, "cannot get softc\n"); 751 return (ENOMEM); 752 } 753 754 memset(ha, 0, sizeof (qlnx_host_t)); 755 756 ha->device_id = pci_get_device(dev); 757 758 if (qlnx_valid_device(ha) != 0) { 759 device_printf(dev, "device is not valid device\n"); 760 return (ENXIO); 761 } 762 ha->pci_func = pci_get_function(dev); 763 764 ha->pci_dev = dev; 765 766 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 767 768 ha->flags.lock_init = 1; 769 770 pci_enable_busmaster(dev); 771 772 /* 773 * map the PCI BARs 774 */ 775 776 ha->reg_rid = PCIR_BAR(0); 777 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 778 RF_ACTIVE); 779 780 if (ha->pci_reg == NULL) { 781 device_printf(dev, "unable to map BAR0\n"); 782 goto qlnx_pci_attach_err; 783 } 784 785 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 786 ha->reg_rid); 787 788 ha->dbells_rid = PCIR_BAR(2); 789 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, 790 SYS_RES_MEMORY, 791 ha->dbells_rid); 792 if (rsrc_len_dbells) { 793 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 794 &ha->dbells_rid, RF_ACTIVE); 795 796 if (ha->pci_dbells == NULL) { 797 device_printf(dev, "unable to map BAR1\n"); 798 goto qlnx_pci_attach_err; 799 } 800 ha->dbells_phys_addr = (uint64_t) 801 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid); 802 803 ha->dbells_size = rsrc_len_dbells; 804 } else { 805 if (qlnx_vf_device(ha) != 0) { 806 device_printf(dev, " BAR1 size is zero\n"); 807 goto qlnx_pci_attach_err; 808 } 809 } 810 811 ha->msix_rid = PCIR_BAR(4); 812 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 813 &ha->msix_rid, RF_ACTIVE); 814 815 if (ha->msix_bar == NULL) { 816 device_printf(dev, "unable to map BAR2\n"); 817 goto qlnx_pci_attach_err; 818 } 819 820 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 821 ha->msix_rid); 822 823 ha->dbg_level = 0x0000; 824 825 QL_DPRINT1(ha, "\n\t\t\t" 826 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 827 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 828 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 829 " msix_avail = 0x%x " 830 "\n\t\t\t[ncpus = %d]\n", 831 ha->pci_dev, ha->pci_reg, rsrc_len_reg, 832 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 833 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 834 mp_ncpus); 835 /* 836 * allocate dma tags 837 */ 838 839 if (qlnx_alloc_parent_dma_tag(ha)) 840 goto qlnx_pci_attach_err; 841 842 if (qlnx_alloc_tx_dma_tag(ha)) 843 goto qlnx_pci_attach_err; 844 845 if (qlnx_alloc_rx_dma_tag(ha)) 846 goto qlnx_pci_attach_err; 847 848 849 if (qlnx_init_hw(ha) != 0) 850 goto qlnx_pci_attach_err; 851 852 ha->flags.hw_init = 1; 853 854 qlnx_get_params(ha); 855 856 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) && 857 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) { 858 qlnxe_queue_count = QLNX_MAX_RSS; 859 } 860 861 /* 862 * Allocate MSI-x vectors 863 */ 864 if (qlnx_vf_device(ha) != 0) { 865 if (qlnxe_queue_count == 0) 866 ha->num_rss = QLNX_DEFAULT_RSS; 867 else 868 ha->num_rss = qlnxe_queue_count; 869 870 num_sp_msix = ha->cdev.num_hwfns; 871 } else { 872 uint8_t max_rxq; 873 uint8_t max_txq; 874 875 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq); 876 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq); 877 878 if (max_rxq < max_txq) 879 ha->num_rss = max_rxq; 880 else 881 ha->num_rss = max_txq; 882 883 if (ha->num_rss > QLNX_MAX_VF_RSS) 884 ha->num_rss = QLNX_MAX_VF_RSS; 885 886 num_sp_msix = 0; 887 } 888 889 if (ha->num_rss > mp_ncpus) 890 ha->num_rss = mp_ncpus; 891 892 ha->num_tc = QLNX_MAX_TC; 893 894 ha->msix_count = pci_msix_count(dev); 895 896 #ifdef QLNX_ENABLE_IWARP 897 898 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha); 899 900 #endif /* #ifdef QLNX_ENABLE_IWARP */ 901 902 if (!ha->msix_count || 903 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) { 904 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 905 ha->msix_count); 906 goto qlnx_pci_attach_err; 907 } 908 909 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs)) 910 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs; 911 else 912 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs); 913 914 QL_DPRINT1(ha, "\n\t\t\t" 915 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 916 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 917 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 918 " msix_avail = 0x%x msix_alloc = 0x%x" 919 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n", 920 ha->pci_reg, rsrc_len_reg, 921 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 922 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 923 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc); 924 925 if (pci_alloc_msix(dev, &ha->msix_count)) { 926 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__, 927 ha->msix_count); 928 ha->msix_count = 0; 929 goto qlnx_pci_attach_err; 930 } 931 932 /* 933 * Initialize slow path interrupt and task queue 934 */ 935 936 if (num_sp_msix) { 937 if (qlnx_create_sp_taskqueues(ha) != 0) 938 goto qlnx_pci_attach_err; 939 940 for (i = 0; i < ha->cdev.num_hwfns; i++) { 941 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 942 943 ha->sp_irq_rid[i] = i + 1; 944 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, 945 &ha->sp_irq_rid[i], 946 (RF_ACTIVE | RF_SHAREABLE)); 947 if (ha->sp_irq[i] == NULL) { 948 device_printf(dev, 949 "could not allocate mbx interrupt\n"); 950 goto qlnx_pci_attach_err; 951 } 952 953 if (bus_setup_intr(dev, ha->sp_irq[i], 954 (INTR_TYPE_NET | INTR_MPSAFE), NULL, 955 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) { 956 device_printf(dev, 957 "could not setup slow path interrupt\n"); 958 goto qlnx_pci_attach_err; 959 } 960 961 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d" 962 " sp_irq %p sp_handle %p\n", p_hwfn, 963 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]); 964 } 965 } 966 967 /* 968 * initialize fast path interrupt 969 */ 970 if (qlnx_create_fp_taskqueues(ha) != 0) 971 goto qlnx_pci_attach_err; 972 973 for (i = 0; i < ha->num_rss; i++) { 974 ha->irq_vec[i].rss_idx = i; 975 ha->irq_vec[i].ha = ha; 976 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i; 977 978 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 979 &ha->irq_vec[i].irq_rid, 980 (RF_ACTIVE | RF_SHAREABLE)); 981 982 if (ha->irq_vec[i].irq == NULL) { 983 device_printf(dev, 984 "could not allocate interrupt[%d] irq_rid = %d\n", 985 i, ha->irq_vec[i].irq_rid); 986 goto qlnx_pci_attach_err; 987 } 988 989 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) { 990 device_printf(dev, "could not allocate tx_br[%d]\n", i); 991 goto qlnx_pci_attach_err; 992 } 993 } 994 995 if (qlnx_vf_device(ha) != 0) { 996 callout_init(&ha->qlnx_callout, 1); 997 ha->flags.callout_init = 1; 998 999 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1000 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0) 1001 goto qlnx_pci_attach_err; 1002 if (ha->grcdump_size[i] == 0) 1003 goto qlnx_pci_attach_err; 1004 1005 ha->grcdump_size[i] = ha->grcdump_size[i] << 2; 1006 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n", 1007 i, ha->grcdump_size[i]); 1008 1009 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]); 1010 if (ha->grcdump[i] == NULL) { 1011 device_printf(dev, "grcdump alloc[%d] failed\n", i); 1012 goto qlnx_pci_attach_err; 1013 } 1014 1015 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0) 1016 goto qlnx_pci_attach_err; 1017 if (ha->idle_chk_size[i] == 0) 1018 goto qlnx_pci_attach_err; 1019 1020 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2; 1021 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n", 1022 i, ha->idle_chk_size[i]); 1023 1024 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]); 1025 1026 if (ha->idle_chk[i] == NULL) { 1027 device_printf(dev, "idle_chk alloc failed\n"); 1028 goto qlnx_pci_attach_err; 1029 } 1030 } 1031 1032 if (qlnx_create_error_recovery_taskqueue(ha) != 0) 1033 goto qlnx_pci_attach_err; 1034 } 1035 1036 if (qlnx_slowpath_start(ha) != 0) 1037 goto qlnx_pci_attach_err; 1038 else 1039 ha->flags.slowpath_start = 1; 1040 1041 if (qlnx_vf_device(ha) != 0) { 1042 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) { 1043 qlnx_mdelay(__func__, 1000); 1044 qlnx_trigger_dump(ha); 1045 1046 goto qlnx_pci_attach_err0; 1047 } 1048 1049 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) { 1050 qlnx_mdelay(__func__, 1000); 1051 qlnx_trigger_dump(ha); 1052 1053 goto qlnx_pci_attach_err0; 1054 } 1055 } else { 1056 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 1057 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL); 1058 } 1059 1060 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d", 1061 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF), 1062 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF)); 1063 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d", 1064 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, 1065 FW_ENGINEERING_VERSION); 1066 1067 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n", 1068 ha->stormfw_ver, ha->mfw_ver); 1069 1070 qlnx_init_ifnet(dev, ha); 1071 1072 /* 1073 * add sysctls 1074 */ 1075 qlnx_add_sysctls(ha); 1076 1077 qlnx_pci_attach_err0: 1078 /* 1079 * create ioctl device interface 1080 */ 1081 if (qlnx_vf_device(ha) != 0) { 1082 if (qlnx_make_cdev(ha)) { 1083 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 1084 goto qlnx_pci_attach_err; 1085 } 1086 1087 #ifdef QLNX_ENABLE_IWARP 1088 qlnx_rdma_dev_add(ha); 1089 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1090 } 1091 1092 #ifndef QLNX_VF 1093 #ifdef CONFIG_ECORE_SRIOV 1094 1095 if (qlnx_vf_device(ha) != 0) 1096 qlnx_initialize_sriov(ha); 1097 1098 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1099 #endif /* #ifdef QLNX_VF */ 1100 1101 QL_DPRINT2(ha, "success\n"); 1102 1103 return (0); 1104 1105 qlnx_pci_attach_err: 1106 1107 qlnx_release(ha); 1108 1109 return (ENXIO); 1110 } 1111 1112 /* 1113 * Name: qlnx_pci_detach 1114 * Function: Unhooks the device from the operating system 1115 */ 1116 static int 1117 qlnx_pci_detach(device_t dev) 1118 { 1119 qlnx_host_t *ha = NULL; 1120 1121 if ((ha = device_get_softc(dev)) == NULL) { 1122 device_printf(dev, "%s: cannot get softc\n", __func__); 1123 return (ENOMEM); 1124 } 1125 1126 if (qlnx_vf_device(ha) != 0) { 1127 #ifdef CONFIG_ECORE_SRIOV 1128 int ret; 1129 1130 ret = pci_iov_detach(dev); 1131 if (ret) { 1132 device_printf(dev, "%s: SRIOV in use\n", __func__); 1133 return (ret); 1134 } 1135 1136 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1137 1138 #ifdef QLNX_ENABLE_IWARP 1139 if (qlnx_rdma_dev_remove(ha) != 0) 1140 return (EBUSY); 1141 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1142 } 1143 1144 QLNX_LOCK(ha); 1145 qlnx_stop(ha); 1146 QLNX_UNLOCK(ha); 1147 1148 qlnx_release(ha); 1149 1150 return (0); 1151 } 1152 1153 #ifdef QLNX_ENABLE_IWARP 1154 1155 static uint8_t 1156 qlnx_get_personality(uint8_t pci_func) 1157 { 1158 uint8_t personality; 1159 1160 personality = (qlnxe_rdma_configuration >> 1161 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) & 1162 QLNX_PERSONALIY_MASK; 1163 return (personality); 1164 } 1165 1166 static void 1167 qlnx_set_personality(qlnx_host_t *ha) 1168 { 1169 uint8_t personality; 1170 1171 personality = qlnx_get_personality(ha->pci_func); 1172 1173 switch (personality) { 1174 case QLNX_PERSONALITY_DEFAULT: 1175 device_printf(ha->pci_dev, "%s: DEFAULT\n", 1176 __func__); 1177 ha->personality = ECORE_PCI_DEFAULT; 1178 break; 1179 1180 case QLNX_PERSONALITY_ETH_ONLY: 1181 device_printf(ha->pci_dev, "%s: ETH_ONLY\n", 1182 __func__); 1183 ha->personality = ECORE_PCI_ETH; 1184 break; 1185 1186 case QLNX_PERSONALITY_ETH_IWARP: 1187 device_printf(ha->pci_dev, "%s: ETH_IWARP\n", 1188 __func__); 1189 ha->personality = ECORE_PCI_ETH_IWARP; 1190 break; 1191 1192 case QLNX_PERSONALITY_ETH_ROCE: 1193 device_printf(ha->pci_dev, "%s: ETH_ROCE\n", 1194 __func__); 1195 ha->personality = ECORE_PCI_ETH_ROCE; 1196 break; 1197 } 1198 1199 return; 1200 } 1201 1202 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1203 1204 static int 1205 qlnx_init_hw(qlnx_host_t *ha) 1206 { 1207 int rval = 0; 1208 struct ecore_hw_prepare_params params; 1209 1210 ecore_init_struct(&ha->cdev); 1211 1212 /* ha->dp_module = ECORE_MSG_PROBE | 1213 ECORE_MSG_INTR | 1214 ECORE_MSG_SP | 1215 ECORE_MSG_LINK | 1216 ECORE_MSG_SPQ | 1217 ECORE_MSG_RDMA; 1218 ha->dp_level = ECORE_LEVEL_VERBOSE;*/ 1219 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2; 1220 ha->dp_level = ECORE_LEVEL_NOTICE; 1221 //ha->dp_level = ECORE_LEVEL_VERBOSE; 1222 1223 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev); 1224 1225 ha->cdev.regview = ha->pci_reg; 1226 1227 ha->personality = ECORE_PCI_DEFAULT; 1228 1229 if (qlnx_vf_device(ha) == 0) { 1230 ha->cdev.b_is_vf = true; 1231 1232 if (ha->pci_dbells != NULL) { 1233 ha->cdev.doorbells = ha->pci_dbells; 1234 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1235 ha->cdev.db_size = ha->dbells_size; 1236 } else { 1237 ha->pci_dbells = ha->pci_reg; 1238 } 1239 } else { 1240 ha->cdev.doorbells = ha->pci_dbells; 1241 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1242 ha->cdev.db_size = ha->dbells_size; 1243 1244 #ifdef QLNX_ENABLE_IWARP 1245 1246 if (qlnx_rdma_supported(ha) == 0) 1247 qlnx_set_personality(ha); 1248 1249 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1250 } 1251 QL_DPRINT2(ha, "%s: %s\n", __func__, 1252 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet")); 1253 1254 bzero(¶ms, sizeof (struct ecore_hw_prepare_params)); 1255 1256 params.personality = ha->personality; 1257 1258 params.drv_resc_alloc = false; 1259 params.chk_reg_fifo = false; 1260 params.initiate_pf_flr = true; 1261 params.epoch = 0; 1262 1263 ecore_hw_prepare(&ha->cdev, ¶ms); 1264 1265 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str); 1266 1267 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n", 1268 ha, &ha->cdev, &ha->cdev.hwfns[0]); 1269 1270 return (rval); 1271 } 1272 1273 static void 1274 qlnx_release(qlnx_host_t *ha) 1275 { 1276 device_t dev; 1277 int i; 1278 1279 dev = ha->pci_dev; 1280 1281 QL_DPRINT2(ha, "enter\n"); 1282 1283 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) { 1284 if (ha->idle_chk[i] != NULL) { 1285 free(ha->idle_chk[i], M_QLNXBUF); 1286 ha->idle_chk[i] = NULL; 1287 } 1288 1289 if (ha->grcdump[i] != NULL) { 1290 free(ha->grcdump[i], M_QLNXBUF); 1291 ha->grcdump[i] = NULL; 1292 } 1293 } 1294 1295 if (ha->flags.callout_init) 1296 callout_drain(&ha->qlnx_callout); 1297 1298 if (ha->flags.slowpath_start) { 1299 qlnx_slowpath_stop(ha); 1300 } 1301 1302 if (ha->flags.hw_init) 1303 ecore_hw_remove(&ha->cdev); 1304 1305 qlnx_del_cdev(ha); 1306 1307 if (ha->ifp != NULL) 1308 ether_ifdetach(ha->ifp); 1309 1310 qlnx_free_tx_dma_tag(ha); 1311 1312 qlnx_free_rx_dma_tag(ha); 1313 1314 qlnx_free_parent_dma_tag(ha); 1315 1316 if (qlnx_vf_device(ha) != 0) { 1317 qlnx_destroy_error_recovery_taskqueue(ha); 1318 } 1319 1320 for (i = 0; i < ha->num_rss; i++) { 1321 struct qlnx_fastpath *fp = &ha->fp_array[i]; 1322 1323 if (ha->irq_vec[i].handle) { 1324 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 1325 ha->irq_vec[i].handle); 1326 } 1327 1328 if (ha->irq_vec[i].irq) { 1329 (void)bus_release_resource(dev, SYS_RES_IRQ, 1330 ha->irq_vec[i].irq_rid, 1331 ha->irq_vec[i].irq); 1332 } 1333 1334 qlnx_free_tx_br(ha, fp); 1335 } 1336 qlnx_destroy_fp_taskqueues(ha); 1337 1338 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1339 if (ha->sp_handle[i]) 1340 (void)bus_teardown_intr(dev, ha->sp_irq[i], 1341 ha->sp_handle[i]); 1342 1343 if (ha->sp_irq[i]) 1344 (void) bus_release_resource(dev, SYS_RES_IRQ, 1345 ha->sp_irq_rid[i], ha->sp_irq[i]); 1346 } 1347 1348 qlnx_destroy_sp_taskqueues(ha); 1349 1350 if (ha->msix_count) 1351 pci_release_msi(dev); 1352 1353 if (ha->flags.lock_init) { 1354 mtx_destroy(&ha->hw_lock); 1355 } 1356 1357 if (ha->pci_reg) 1358 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 1359 ha->pci_reg); 1360 1361 if (ha->dbells_size && ha->pci_dbells) 1362 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid, 1363 ha->pci_dbells); 1364 1365 if (ha->msix_bar) 1366 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid, 1367 ha->msix_bar); 1368 1369 QL_DPRINT2(ha, "exit\n"); 1370 return; 1371 } 1372 1373 static void 1374 qlnx_trigger_dump(qlnx_host_t *ha) 1375 { 1376 int i; 1377 1378 if (ha->ifp != NULL) 1379 if_setdrvflagbits(ha->ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING)); 1380 1381 QL_DPRINT2(ha, "enter\n"); 1382 1383 if (qlnx_vf_device(ha) == 0) 1384 return; 1385 1386 ha->error_recovery = 1; 1387 1388 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1389 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i); 1390 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i); 1391 } 1392 1393 QL_DPRINT2(ha, "exit\n"); 1394 1395 return; 1396 } 1397 1398 static int 1399 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS) 1400 { 1401 int err, ret = 0; 1402 qlnx_host_t *ha; 1403 1404 err = sysctl_handle_int(oidp, &ret, 0, req); 1405 1406 if (err || !req->newptr) 1407 return (err); 1408 1409 if (ret == 1) { 1410 ha = (qlnx_host_t *)arg1; 1411 qlnx_trigger_dump(ha); 1412 } 1413 return (err); 1414 } 1415 1416 static int 1417 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS) 1418 { 1419 int err, i, ret = 0, usecs = 0; 1420 qlnx_host_t *ha; 1421 struct ecore_hwfn *p_hwfn; 1422 struct qlnx_fastpath *fp; 1423 1424 err = sysctl_handle_int(oidp, &usecs, 0, req); 1425 1426 if (err || !req->newptr || !usecs || (usecs > 255)) 1427 return (err); 1428 1429 ha = (qlnx_host_t *)arg1; 1430 1431 if (qlnx_vf_device(ha) == 0) 1432 return (-1); 1433 1434 for (i = 0; i < ha->num_rss; i++) { 1435 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1436 1437 fp = &ha->fp_array[i]; 1438 1439 if (fp->txq[0]->handle != NULL) { 1440 ret = ecore_set_queue_coalesce(p_hwfn, 0, 1441 (uint16_t)usecs, fp->txq[0]->handle); 1442 } 1443 } 1444 1445 if (!ret) 1446 ha->tx_coalesce_usecs = (uint8_t)usecs; 1447 1448 return (err); 1449 } 1450 1451 static int 1452 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS) 1453 { 1454 int err, i, ret = 0, usecs = 0; 1455 qlnx_host_t *ha; 1456 struct ecore_hwfn *p_hwfn; 1457 struct qlnx_fastpath *fp; 1458 1459 err = sysctl_handle_int(oidp, &usecs, 0, req); 1460 1461 if (err || !req->newptr || !usecs || (usecs > 255)) 1462 return (err); 1463 1464 ha = (qlnx_host_t *)arg1; 1465 1466 if (qlnx_vf_device(ha) == 0) 1467 return (-1); 1468 1469 for (i = 0; i < ha->num_rss; i++) { 1470 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1471 1472 fp = &ha->fp_array[i]; 1473 1474 if (fp->rxq->handle != NULL) { 1475 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs, 1476 0, fp->rxq->handle); 1477 } 1478 } 1479 1480 if (!ret) 1481 ha->rx_coalesce_usecs = (uint8_t)usecs; 1482 1483 return (err); 1484 } 1485 1486 static void 1487 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha) 1488 { 1489 struct sysctl_ctx_list *ctx; 1490 struct sysctl_oid_list *children; 1491 struct sysctl_oid *ctx_oid; 1492 1493 ctx = device_get_sysctl_ctx(ha->pci_dev); 1494 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1495 1496 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat", 1497 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat"); 1498 children = SYSCTL_CHILDREN(ctx_oid); 1499 1500 SYSCTL_ADD_QUAD(ctx, children, 1501 OID_AUTO, "sp_interrupts", 1502 CTLFLAG_RD, &ha->sp_interrupts, 1503 "No. of slowpath interrupts"); 1504 1505 return; 1506 } 1507 1508 static void 1509 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha) 1510 { 1511 struct sysctl_ctx_list *ctx; 1512 struct sysctl_oid_list *children; 1513 struct sysctl_oid_list *node_children; 1514 struct sysctl_oid *ctx_oid; 1515 int i, j; 1516 uint8_t name_str[16]; 1517 1518 ctx = device_get_sysctl_ctx(ha->pci_dev); 1519 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1520 1521 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat", 1522 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat"); 1523 children = SYSCTL_CHILDREN(ctx_oid); 1524 1525 for (i = 0; i < ha->num_rss; i++) { 1526 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1527 snprintf(name_str, sizeof(name_str), "%d", i); 1528 1529 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 1530 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str); 1531 node_children = SYSCTL_CHILDREN(ctx_oid); 1532 1533 /* Tx Related */ 1534 1535 SYSCTL_ADD_QUAD(ctx, node_children, 1536 OID_AUTO, "tx_pkts_processed", 1537 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed, 1538 "No. of packets processed for transmission"); 1539 1540 SYSCTL_ADD_QUAD(ctx, node_children, 1541 OID_AUTO, "tx_pkts_freed", 1542 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed, 1543 "No. of freed packets"); 1544 1545 SYSCTL_ADD_QUAD(ctx, node_children, 1546 OID_AUTO, "tx_pkts_transmitted", 1547 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted, 1548 "No. of transmitted packets"); 1549 1550 SYSCTL_ADD_QUAD(ctx, node_children, 1551 OID_AUTO, "tx_pkts_completed", 1552 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed, 1553 "No. of transmit completions"); 1554 1555 SYSCTL_ADD_QUAD(ctx, node_children, 1556 OID_AUTO, "tx_non_tso_pkts", 1557 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts, 1558 "No. of non LSO transmited packets"); 1559 1560 #ifdef QLNX_TRACE_PERF_DATA 1561 1562 SYSCTL_ADD_QUAD(ctx, node_children, 1563 OID_AUTO, "tx_pkts_trans_ctx", 1564 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx, 1565 "No. of transmitted packets in transmit context"); 1566 1567 SYSCTL_ADD_QUAD(ctx, node_children, 1568 OID_AUTO, "tx_pkts_compl_ctx", 1569 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx, 1570 "No. of transmit completions in transmit context"); 1571 1572 SYSCTL_ADD_QUAD(ctx, node_children, 1573 OID_AUTO, "tx_pkts_trans_fp", 1574 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp, 1575 "No. of transmitted packets in taskqueue"); 1576 1577 SYSCTL_ADD_QUAD(ctx, node_children, 1578 OID_AUTO, "tx_pkts_compl_fp", 1579 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp, 1580 "No. of transmit completions in taskqueue"); 1581 1582 SYSCTL_ADD_QUAD(ctx, node_children, 1583 OID_AUTO, "tx_pkts_compl_intr", 1584 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr, 1585 "No. of transmit completions in interrupt ctx"); 1586 #endif 1587 1588 SYSCTL_ADD_QUAD(ctx, node_children, 1589 OID_AUTO, "tx_tso_pkts", 1590 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts, 1591 "No. of LSO transmited packets"); 1592 1593 SYSCTL_ADD_QUAD(ctx, node_children, 1594 OID_AUTO, "tx_lso_wnd_min_len", 1595 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len, 1596 "tx_lso_wnd_min_len"); 1597 1598 SYSCTL_ADD_QUAD(ctx, node_children, 1599 OID_AUTO, "tx_defrag", 1600 CTLFLAG_RD, &ha->fp_array[i].tx_defrag, 1601 "tx_defrag"); 1602 1603 SYSCTL_ADD_QUAD(ctx, node_children, 1604 OID_AUTO, "tx_nsegs_gt_elem_left", 1605 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left, 1606 "tx_nsegs_gt_elem_left"); 1607 1608 SYSCTL_ADD_UINT(ctx, node_children, 1609 OID_AUTO, "tx_tso_max_nsegs", 1610 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs, 1611 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs"); 1612 1613 SYSCTL_ADD_UINT(ctx, node_children, 1614 OID_AUTO, "tx_tso_min_nsegs", 1615 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs, 1616 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs"); 1617 1618 SYSCTL_ADD_UINT(ctx, node_children, 1619 OID_AUTO, "tx_tso_max_pkt_len", 1620 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len, 1621 ha->fp_array[i].tx_tso_max_pkt_len, 1622 "tx_tso_max_pkt_len"); 1623 1624 SYSCTL_ADD_UINT(ctx, node_children, 1625 OID_AUTO, "tx_tso_min_pkt_len", 1626 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len, 1627 ha->fp_array[i].tx_tso_min_pkt_len, 1628 "tx_tso_min_pkt_len"); 1629 1630 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) { 1631 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1632 snprintf(name_str, sizeof(name_str), 1633 "tx_pkts_nseg_%02d", (j+1)); 1634 1635 SYSCTL_ADD_QUAD(ctx, node_children, 1636 OID_AUTO, name_str, CTLFLAG_RD, 1637 &ha->fp_array[i].tx_pkts[j], name_str); 1638 } 1639 1640 #ifdef QLNX_TRACE_PERF_DATA 1641 for (j = 0; j < 18; j++) { 1642 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1643 snprintf(name_str, sizeof(name_str), 1644 "tx_pkts_hist_%02d", (j+1)); 1645 1646 SYSCTL_ADD_QUAD(ctx, node_children, 1647 OID_AUTO, name_str, CTLFLAG_RD, 1648 &ha->fp_array[i].tx_pkts_hist[j], name_str); 1649 } 1650 for (j = 0; j < 5; j++) { 1651 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1652 snprintf(name_str, sizeof(name_str), 1653 "tx_comInt_%02d", (j+1)); 1654 1655 SYSCTL_ADD_QUAD(ctx, node_children, 1656 OID_AUTO, name_str, CTLFLAG_RD, 1657 &ha->fp_array[i].tx_comInt[j], name_str); 1658 } 1659 for (j = 0; j < 18; j++) { 1660 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1661 snprintf(name_str, sizeof(name_str), 1662 "tx_pkts_q_%02d", (j+1)); 1663 1664 SYSCTL_ADD_QUAD(ctx, node_children, 1665 OID_AUTO, name_str, CTLFLAG_RD, 1666 &ha->fp_array[i].tx_pkts_q[j], name_str); 1667 } 1668 #endif 1669 1670 SYSCTL_ADD_QUAD(ctx, node_children, 1671 OID_AUTO, "err_tx_nsegs_gt_elem_left", 1672 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left, 1673 "err_tx_nsegs_gt_elem_left"); 1674 1675 SYSCTL_ADD_QUAD(ctx, node_children, 1676 OID_AUTO, "err_tx_dmamap_create", 1677 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create, 1678 "err_tx_dmamap_create"); 1679 1680 SYSCTL_ADD_QUAD(ctx, node_children, 1681 OID_AUTO, "err_tx_defrag_dmamap_load", 1682 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load, 1683 "err_tx_defrag_dmamap_load"); 1684 1685 SYSCTL_ADD_QUAD(ctx, node_children, 1686 OID_AUTO, "err_tx_non_tso_max_seg", 1687 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg, 1688 "err_tx_non_tso_max_seg"); 1689 1690 SYSCTL_ADD_QUAD(ctx, node_children, 1691 OID_AUTO, "err_tx_dmamap_load", 1692 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load, 1693 "err_tx_dmamap_load"); 1694 1695 SYSCTL_ADD_QUAD(ctx, node_children, 1696 OID_AUTO, "err_tx_defrag", 1697 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag, 1698 "err_tx_defrag"); 1699 1700 SYSCTL_ADD_QUAD(ctx, node_children, 1701 OID_AUTO, "err_tx_free_pkt_null", 1702 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null, 1703 "err_tx_free_pkt_null"); 1704 1705 SYSCTL_ADD_QUAD(ctx, node_children, 1706 OID_AUTO, "err_tx_cons_idx_conflict", 1707 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict, 1708 "err_tx_cons_idx_conflict"); 1709 1710 SYSCTL_ADD_QUAD(ctx, node_children, 1711 OID_AUTO, "lro_cnt_64", 1712 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64, 1713 "lro_cnt_64"); 1714 1715 SYSCTL_ADD_QUAD(ctx, node_children, 1716 OID_AUTO, "lro_cnt_128", 1717 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128, 1718 "lro_cnt_128"); 1719 1720 SYSCTL_ADD_QUAD(ctx, node_children, 1721 OID_AUTO, "lro_cnt_256", 1722 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256, 1723 "lro_cnt_256"); 1724 1725 SYSCTL_ADD_QUAD(ctx, node_children, 1726 OID_AUTO, "lro_cnt_512", 1727 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512, 1728 "lro_cnt_512"); 1729 1730 SYSCTL_ADD_QUAD(ctx, node_children, 1731 OID_AUTO, "lro_cnt_1024", 1732 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024, 1733 "lro_cnt_1024"); 1734 1735 /* Rx Related */ 1736 1737 SYSCTL_ADD_QUAD(ctx, node_children, 1738 OID_AUTO, "rx_pkts", 1739 CTLFLAG_RD, &ha->fp_array[i].rx_pkts, 1740 "No. of received packets"); 1741 1742 SYSCTL_ADD_QUAD(ctx, node_children, 1743 OID_AUTO, "tpa_start", 1744 CTLFLAG_RD, &ha->fp_array[i].tpa_start, 1745 "No. of tpa_start packets"); 1746 1747 SYSCTL_ADD_QUAD(ctx, node_children, 1748 OID_AUTO, "tpa_cont", 1749 CTLFLAG_RD, &ha->fp_array[i].tpa_cont, 1750 "No. of tpa_cont packets"); 1751 1752 SYSCTL_ADD_QUAD(ctx, node_children, 1753 OID_AUTO, "tpa_end", 1754 CTLFLAG_RD, &ha->fp_array[i].tpa_end, 1755 "No. of tpa_end packets"); 1756 1757 SYSCTL_ADD_QUAD(ctx, node_children, 1758 OID_AUTO, "err_m_getcl", 1759 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl, 1760 "err_m_getcl"); 1761 1762 SYSCTL_ADD_QUAD(ctx, node_children, 1763 OID_AUTO, "err_m_getjcl", 1764 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl, 1765 "err_m_getjcl"); 1766 1767 SYSCTL_ADD_QUAD(ctx, node_children, 1768 OID_AUTO, "err_rx_hw_errors", 1769 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors, 1770 "err_rx_hw_errors"); 1771 1772 SYSCTL_ADD_QUAD(ctx, node_children, 1773 OID_AUTO, "err_rx_alloc_errors", 1774 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors, 1775 "err_rx_alloc_errors"); 1776 } 1777 1778 return; 1779 } 1780 1781 static void 1782 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha) 1783 { 1784 struct sysctl_ctx_list *ctx; 1785 struct sysctl_oid_list *children; 1786 struct sysctl_oid *ctx_oid; 1787 1788 ctx = device_get_sysctl_ctx(ha->pci_dev); 1789 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1790 1791 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat", 1792 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat"); 1793 children = SYSCTL_CHILDREN(ctx_oid); 1794 1795 SYSCTL_ADD_QUAD(ctx, children, 1796 OID_AUTO, "no_buff_discards", 1797 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards, 1798 "No. of packets discarded due to lack of buffer"); 1799 1800 SYSCTL_ADD_QUAD(ctx, children, 1801 OID_AUTO, "packet_too_big_discard", 1802 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard, 1803 "No. of packets discarded because packet was too big"); 1804 1805 SYSCTL_ADD_QUAD(ctx, children, 1806 OID_AUTO, "ttl0_discard", 1807 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard, 1808 "ttl0_discard"); 1809 1810 SYSCTL_ADD_QUAD(ctx, children, 1811 OID_AUTO, "rx_ucast_bytes", 1812 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes, 1813 "rx_ucast_bytes"); 1814 1815 SYSCTL_ADD_QUAD(ctx, children, 1816 OID_AUTO, "rx_mcast_bytes", 1817 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes, 1818 "rx_mcast_bytes"); 1819 1820 SYSCTL_ADD_QUAD(ctx, children, 1821 OID_AUTO, "rx_bcast_bytes", 1822 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes, 1823 "rx_bcast_bytes"); 1824 1825 SYSCTL_ADD_QUAD(ctx, children, 1826 OID_AUTO, "rx_ucast_pkts", 1827 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts, 1828 "rx_ucast_pkts"); 1829 1830 SYSCTL_ADD_QUAD(ctx, children, 1831 OID_AUTO, "rx_mcast_pkts", 1832 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts, 1833 "rx_mcast_pkts"); 1834 1835 SYSCTL_ADD_QUAD(ctx, children, 1836 OID_AUTO, "rx_bcast_pkts", 1837 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts, 1838 "rx_bcast_pkts"); 1839 1840 SYSCTL_ADD_QUAD(ctx, children, 1841 OID_AUTO, "mftag_filter_discards", 1842 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards, 1843 "mftag_filter_discards"); 1844 1845 SYSCTL_ADD_QUAD(ctx, children, 1846 OID_AUTO, "mac_filter_discards", 1847 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards, 1848 "mac_filter_discards"); 1849 1850 SYSCTL_ADD_QUAD(ctx, children, 1851 OID_AUTO, "tx_ucast_bytes", 1852 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes, 1853 "tx_ucast_bytes"); 1854 1855 SYSCTL_ADD_QUAD(ctx, children, 1856 OID_AUTO, "tx_mcast_bytes", 1857 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes, 1858 "tx_mcast_bytes"); 1859 1860 SYSCTL_ADD_QUAD(ctx, children, 1861 OID_AUTO, "tx_bcast_bytes", 1862 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes, 1863 "tx_bcast_bytes"); 1864 1865 SYSCTL_ADD_QUAD(ctx, children, 1866 OID_AUTO, "tx_ucast_pkts", 1867 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts, 1868 "tx_ucast_pkts"); 1869 1870 SYSCTL_ADD_QUAD(ctx, children, 1871 OID_AUTO, "tx_mcast_pkts", 1872 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts, 1873 "tx_mcast_pkts"); 1874 1875 SYSCTL_ADD_QUAD(ctx, children, 1876 OID_AUTO, "tx_bcast_pkts", 1877 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts, 1878 "tx_bcast_pkts"); 1879 1880 SYSCTL_ADD_QUAD(ctx, children, 1881 OID_AUTO, "tx_err_drop_pkts", 1882 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts, 1883 "tx_err_drop_pkts"); 1884 1885 SYSCTL_ADD_QUAD(ctx, children, 1886 OID_AUTO, "tpa_coalesced_pkts", 1887 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts, 1888 "tpa_coalesced_pkts"); 1889 1890 SYSCTL_ADD_QUAD(ctx, children, 1891 OID_AUTO, "tpa_coalesced_events", 1892 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events, 1893 "tpa_coalesced_events"); 1894 1895 SYSCTL_ADD_QUAD(ctx, children, 1896 OID_AUTO, "tpa_aborts_num", 1897 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num, 1898 "tpa_aborts_num"); 1899 1900 SYSCTL_ADD_QUAD(ctx, children, 1901 OID_AUTO, "tpa_not_coalesced_pkts", 1902 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts, 1903 "tpa_not_coalesced_pkts"); 1904 1905 SYSCTL_ADD_QUAD(ctx, children, 1906 OID_AUTO, "tpa_coalesced_bytes", 1907 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes, 1908 "tpa_coalesced_bytes"); 1909 1910 SYSCTL_ADD_QUAD(ctx, children, 1911 OID_AUTO, "rx_64_byte_packets", 1912 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets, 1913 "rx_64_byte_packets"); 1914 1915 SYSCTL_ADD_QUAD(ctx, children, 1916 OID_AUTO, "rx_65_to_127_byte_packets", 1917 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets, 1918 "rx_65_to_127_byte_packets"); 1919 1920 SYSCTL_ADD_QUAD(ctx, children, 1921 OID_AUTO, "rx_128_to_255_byte_packets", 1922 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets, 1923 "rx_128_to_255_byte_packets"); 1924 1925 SYSCTL_ADD_QUAD(ctx, children, 1926 OID_AUTO, "rx_256_to_511_byte_packets", 1927 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets, 1928 "rx_256_to_511_byte_packets"); 1929 1930 SYSCTL_ADD_QUAD(ctx, children, 1931 OID_AUTO, "rx_512_to_1023_byte_packets", 1932 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets, 1933 "rx_512_to_1023_byte_packets"); 1934 1935 SYSCTL_ADD_QUAD(ctx, children, 1936 OID_AUTO, "rx_1024_to_1518_byte_packets", 1937 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets, 1938 "rx_1024_to_1518_byte_packets"); 1939 1940 SYSCTL_ADD_QUAD(ctx, children, 1941 OID_AUTO, "rx_1519_to_1522_byte_packets", 1942 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets, 1943 "rx_1519_to_1522_byte_packets"); 1944 1945 SYSCTL_ADD_QUAD(ctx, children, 1946 OID_AUTO, "rx_1523_to_2047_byte_packets", 1947 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets, 1948 "rx_1523_to_2047_byte_packets"); 1949 1950 SYSCTL_ADD_QUAD(ctx, children, 1951 OID_AUTO, "rx_2048_to_4095_byte_packets", 1952 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets, 1953 "rx_2048_to_4095_byte_packets"); 1954 1955 SYSCTL_ADD_QUAD(ctx, children, 1956 OID_AUTO, "rx_4096_to_9216_byte_packets", 1957 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets, 1958 "rx_4096_to_9216_byte_packets"); 1959 1960 SYSCTL_ADD_QUAD(ctx, children, 1961 OID_AUTO, "rx_9217_to_16383_byte_packets", 1962 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets, 1963 "rx_9217_to_16383_byte_packets"); 1964 1965 SYSCTL_ADD_QUAD(ctx, children, 1966 OID_AUTO, "rx_crc_errors", 1967 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors, 1968 "rx_crc_errors"); 1969 1970 SYSCTL_ADD_QUAD(ctx, children, 1971 OID_AUTO, "rx_mac_crtl_frames", 1972 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames, 1973 "rx_mac_crtl_frames"); 1974 1975 SYSCTL_ADD_QUAD(ctx, children, 1976 OID_AUTO, "rx_pause_frames", 1977 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames, 1978 "rx_pause_frames"); 1979 1980 SYSCTL_ADD_QUAD(ctx, children, 1981 OID_AUTO, "rx_pfc_frames", 1982 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames, 1983 "rx_pfc_frames"); 1984 1985 SYSCTL_ADD_QUAD(ctx, children, 1986 OID_AUTO, "rx_align_errors", 1987 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors, 1988 "rx_align_errors"); 1989 1990 SYSCTL_ADD_QUAD(ctx, children, 1991 OID_AUTO, "rx_carrier_errors", 1992 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors, 1993 "rx_carrier_errors"); 1994 1995 SYSCTL_ADD_QUAD(ctx, children, 1996 OID_AUTO, "rx_oversize_packets", 1997 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets, 1998 "rx_oversize_packets"); 1999 2000 SYSCTL_ADD_QUAD(ctx, children, 2001 OID_AUTO, "rx_jabbers", 2002 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers, 2003 "rx_jabbers"); 2004 2005 SYSCTL_ADD_QUAD(ctx, children, 2006 OID_AUTO, "rx_undersize_packets", 2007 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets, 2008 "rx_undersize_packets"); 2009 2010 SYSCTL_ADD_QUAD(ctx, children, 2011 OID_AUTO, "rx_fragments", 2012 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments, 2013 "rx_fragments"); 2014 2015 SYSCTL_ADD_QUAD(ctx, children, 2016 OID_AUTO, "tx_64_byte_packets", 2017 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets, 2018 "tx_64_byte_packets"); 2019 2020 SYSCTL_ADD_QUAD(ctx, children, 2021 OID_AUTO, "tx_65_to_127_byte_packets", 2022 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets, 2023 "tx_65_to_127_byte_packets"); 2024 2025 SYSCTL_ADD_QUAD(ctx, children, 2026 OID_AUTO, "tx_128_to_255_byte_packets", 2027 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets, 2028 "tx_128_to_255_byte_packets"); 2029 2030 SYSCTL_ADD_QUAD(ctx, children, 2031 OID_AUTO, "tx_256_to_511_byte_packets", 2032 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets, 2033 "tx_256_to_511_byte_packets"); 2034 2035 SYSCTL_ADD_QUAD(ctx, children, 2036 OID_AUTO, "tx_512_to_1023_byte_packets", 2037 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets, 2038 "tx_512_to_1023_byte_packets"); 2039 2040 SYSCTL_ADD_QUAD(ctx, children, 2041 OID_AUTO, "tx_1024_to_1518_byte_packets", 2042 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets, 2043 "tx_1024_to_1518_byte_packets"); 2044 2045 SYSCTL_ADD_QUAD(ctx, children, 2046 OID_AUTO, "tx_1519_to_2047_byte_packets", 2047 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets, 2048 "tx_1519_to_2047_byte_packets"); 2049 2050 SYSCTL_ADD_QUAD(ctx, children, 2051 OID_AUTO, "tx_2048_to_4095_byte_packets", 2052 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets, 2053 "tx_2048_to_4095_byte_packets"); 2054 2055 SYSCTL_ADD_QUAD(ctx, children, 2056 OID_AUTO, "tx_4096_to_9216_byte_packets", 2057 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets, 2058 "tx_4096_to_9216_byte_packets"); 2059 2060 SYSCTL_ADD_QUAD(ctx, children, 2061 OID_AUTO, "tx_9217_to_16383_byte_packets", 2062 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets, 2063 "tx_9217_to_16383_byte_packets"); 2064 2065 SYSCTL_ADD_QUAD(ctx, children, 2066 OID_AUTO, "tx_pause_frames", 2067 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames, 2068 "tx_pause_frames"); 2069 2070 SYSCTL_ADD_QUAD(ctx, children, 2071 OID_AUTO, "tx_pfc_frames", 2072 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames, 2073 "tx_pfc_frames"); 2074 2075 SYSCTL_ADD_QUAD(ctx, children, 2076 OID_AUTO, "tx_lpi_entry_count", 2077 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count, 2078 "tx_lpi_entry_count"); 2079 2080 SYSCTL_ADD_QUAD(ctx, children, 2081 OID_AUTO, "tx_total_collisions", 2082 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions, 2083 "tx_total_collisions"); 2084 2085 SYSCTL_ADD_QUAD(ctx, children, 2086 OID_AUTO, "brb_truncates", 2087 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates, 2088 "brb_truncates"); 2089 2090 SYSCTL_ADD_QUAD(ctx, children, 2091 OID_AUTO, "brb_discards", 2092 CTLFLAG_RD, &ha->hw_stats.common.brb_discards, 2093 "brb_discards"); 2094 2095 SYSCTL_ADD_QUAD(ctx, children, 2096 OID_AUTO, "rx_mac_bytes", 2097 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes, 2098 "rx_mac_bytes"); 2099 2100 SYSCTL_ADD_QUAD(ctx, children, 2101 OID_AUTO, "rx_mac_uc_packets", 2102 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets, 2103 "rx_mac_uc_packets"); 2104 2105 SYSCTL_ADD_QUAD(ctx, children, 2106 OID_AUTO, "rx_mac_mc_packets", 2107 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets, 2108 "rx_mac_mc_packets"); 2109 2110 SYSCTL_ADD_QUAD(ctx, children, 2111 OID_AUTO, "rx_mac_bc_packets", 2112 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets, 2113 "rx_mac_bc_packets"); 2114 2115 SYSCTL_ADD_QUAD(ctx, children, 2116 OID_AUTO, "rx_mac_frames_ok", 2117 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok, 2118 "rx_mac_frames_ok"); 2119 2120 SYSCTL_ADD_QUAD(ctx, children, 2121 OID_AUTO, "tx_mac_bytes", 2122 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes, 2123 "tx_mac_bytes"); 2124 2125 SYSCTL_ADD_QUAD(ctx, children, 2126 OID_AUTO, "tx_mac_uc_packets", 2127 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets, 2128 "tx_mac_uc_packets"); 2129 2130 SYSCTL_ADD_QUAD(ctx, children, 2131 OID_AUTO, "tx_mac_mc_packets", 2132 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets, 2133 "tx_mac_mc_packets"); 2134 2135 SYSCTL_ADD_QUAD(ctx, children, 2136 OID_AUTO, "tx_mac_bc_packets", 2137 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets, 2138 "tx_mac_bc_packets"); 2139 2140 SYSCTL_ADD_QUAD(ctx, children, 2141 OID_AUTO, "tx_mac_ctrl_frames", 2142 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames, 2143 "tx_mac_ctrl_frames"); 2144 return; 2145 } 2146 2147 static void 2148 qlnx_add_sysctls(qlnx_host_t *ha) 2149 { 2150 device_t dev = ha->pci_dev; 2151 struct sysctl_ctx_list *ctx; 2152 struct sysctl_oid_list *children; 2153 2154 ctx = device_get_sysctl_ctx(dev); 2155 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2156 2157 qlnx_add_fp_stats_sysctls(ha); 2158 qlnx_add_sp_stats_sysctls(ha); 2159 2160 if (qlnx_vf_device(ha) != 0) 2161 qlnx_add_hw_stats_sysctls(ha); 2162 2163 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version", 2164 CTLFLAG_RD, qlnx_ver_str, 0, 2165 "Driver Version"); 2166 2167 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version", 2168 CTLFLAG_RD, ha->stormfw_ver, 0, 2169 "STORM Firmware Version"); 2170 2171 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version", 2172 CTLFLAG_RD, ha->mfw_ver, 0, 2173 "Management Firmware Version"); 2174 2175 SYSCTL_ADD_UINT(ctx, children, 2176 OID_AUTO, "personality", CTLFLAG_RD, 2177 &ha->personality, ha->personality, 2178 "\tpersonality = 0 => Ethernet Only\n" 2179 "\tpersonality = 3 => Ethernet and RoCE\n" 2180 "\tpersonality = 4 => Ethernet and iWARP\n" 2181 "\tpersonality = 6 => Default in Shared Memory\n"); 2182 2183 ha->dbg_level = 0; 2184 SYSCTL_ADD_UINT(ctx, children, 2185 OID_AUTO, "debug", CTLFLAG_RW, 2186 &ha->dbg_level, ha->dbg_level, "Debug Level"); 2187 2188 ha->dp_level = 0x01; 2189 SYSCTL_ADD_UINT(ctx, children, 2190 OID_AUTO, "dp_level", CTLFLAG_RW, 2191 &ha->dp_level, ha->dp_level, "DP Level"); 2192 2193 ha->dbg_trace_lro_cnt = 0; 2194 SYSCTL_ADD_UINT(ctx, children, 2195 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW, 2196 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt, 2197 "Trace LRO Counts"); 2198 2199 ha->dbg_trace_tso_pkt_len = 0; 2200 SYSCTL_ADD_UINT(ctx, children, 2201 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW, 2202 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len, 2203 "Trace TSO packet lengths"); 2204 2205 ha->dp_module = 0; 2206 SYSCTL_ADD_UINT(ctx, children, 2207 OID_AUTO, "dp_module", CTLFLAG_RW, 2208 &ha->dp_module, ha->dp_module, "DP Module"); 2209 2210 ha->err_inject = 0; 2211 2212 SYSCTL_ADD_UINT(ctx, children, 2213 OID_AUTO, "err_inject", CTLFLAG_RW, 2214 &ha->err_inject, ha->err_inject, "Error Inject"); 2215 2216 ha->storm_stats_enable = 0; 2217 2218 SYSCTL_ADD_UINT(ctx, children, 2219 OID_AUTO, "storm_stats_enable", CTLFLAG_RW, 2220 &ha->storm_stats_enable, ha->storm_stats_enable, 2221 "Enable Storm Statistics Gathering"); 2222 2223 ha->storm_stats_index = 0; 2224 2225 SYSCTL_ADD_UINT(ctx, children, 2226 OID_AUTO, "storm_stats_index", CTLFLAG_RD, 2227 &ha->storm_stats_index, ha->storm_stats_index, 2228 "Enable Storm Statistics Gathering Current Index"); 2229 2230 ha->grcdump_taken = 0; 2231 SYSCTL_ADD_UINT(ctx, children, 2232 OID_AUTO, "grcdump_taken", CTLFLAG_RD, 2233 &ha->grcdump_taken, ha->grcdump_taken, 2234 "grcdump_taken"); 2235 2236 ha->idle_chk_taken = 0; 2237 SYSCTL_ADD_UINT(ctx, children, 2238 OID_AUTO, "idle_chk_taken", CTLFLAG_RD, 2239 &ha->idle_chk_taken, ha->idle_chk_taken, 2240 "idle_chk_taken"); 2241 2242 SYSCTL_ADD_UINT(ctx, children, 2243 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD, 2244 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs, 2245 "rx_coalesce_usecs"); 2246 2247 SYSCTL_ADD_UINT(ctx, children, 2248 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD, 2249 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs, 2250 "tx_coalesce_usecs"); 2251 2252 SYSCTL_ADD_PROC(ctx, children, 2253 OID_AUTO, "trigger_dump", 2254 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2255 (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump"); 2256 2257 SYSCTL_ADD_PROC(ctx, children, 2258 OID_AUTO, "set_rx_coalesce_usecs", 2259 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2260 (void *)ha, 0, qlnx_set_rx_coalesce, "I", 2261 "rx interrupt coalesce period microseconds"); 2262 2263 SYSCTL_ADD_PROC(ctx, children, 2264 OID_AUTO, "set_tx_coalesce_usecs", 2265 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2266 (void *)ha, 0, qlnx_set_tx_coalesce, "I", 2267 "tx interrupt coalesce period microseconds"); 2268 2269 ha->rx_pkt_threshold = 128; 2270 SYSCTL_ADD_UINT(ctx, children, 2271 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW, 2272 &ha->rx_pkt_threshold, ha->rx_pkt_threshold, 2273 "No. of Rx Pkts to process at a time"); 2274 2275 ha->rx_jumbo_buf_eq_mtu = 0; 2276 SYSCTL_ADD_UINT(ctx, children, 2277 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW, 2278 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu, 2279 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n" 2280 "otherwise Rx Jumbo buffers are set to >= MTU size\n"); 2281 2282 SYSCTL_ADD_QUAD(ctx, children, 2283 OID_AUTO, "err_illegal_intr", CTLFLAG_RD, 2284 &ha->err_illegal_intr, "err_illegal_intr"); 2285 2286 SYSCTL_ADD_QUAD(ctx, children, 2287 OID_AUTO, "err_fp_null", CTLFLAG_RD, 2288 &ha->err_fp_null, "err_fp_null"); 2289 2290 SYSCTL_ADD_QUAD(ctx, children, 2291 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD, 2292 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type"); 2293 return; 2294 } 2295 2296 /***************************************************************************** 2297 * Operating System Network Interface Functions 2298 *****************************************************************************/ 2299 2300 static void 2301 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) 2302 { 2303 uint16_t device_id; 2304 if_t ifp; 2305 2306 ifp = ha->ifp = if_alloc(IFT_ETHER); 2307 2308 if (ifp == NULL) 2309 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 2310 2311 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2312 2313 device_id = pci_get_device(ha->pci_dev); 2314 2315 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) 2316 if_setbaudrate(ifp, IF_Gbps(40)); 2317 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2318 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 2319 if_setbaudrate(ifp, IF_Gbps(25)); 2320 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) 2321 if_setbaudrate(ifp, IF_Gbps(50)); 2322 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) 2323 if_setbaudrate(ifp, IF_Gbps(100)); 2324 2325 if_setcapabilities(ifp, IFCAP_LINKSTATE); 2326 2327 if_setinitfn(ifp, qlnx_init); 2328 if_setsoftc(ifp, ha); 2329 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 2330 if_setioctlfn(ifp, qlnx_ioctl); 2331 if_settransmitfn(ifp, qlnx_transmit); 2332 if_setqflushfn(ifp, qlnx_qflush); 2333 2334 if_setsendqlen(ifp, qlnx_get_ifq_snd_maxlen(ha)); 2335 if_setsendqready(ifp); 2336 2337 if_setgetcounterfn(ifp, qlnx_get_counter); 2338 2339 ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; 2340 2341 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN); 2342 2343 if (!ha->primary_mac[0] && !ha->primary_mac[1] && 2344 !ha->primary_mac[2] && !ha->primary_mac[3] && 2345 !ha->primary_mac[4] && !ha->primary_mac[5]) { 2346 uint32_t rnd; 2347 2348 rnd = arc4random(); 2349 2350 ha->primary_mac[0] = 0x00; 2351 ha->primary_mac[1] = 0x0e; 2352 ha->primary_mac[2] = 0x1e; 2353 ha->primary_mac[3] = rnd & 0xFF; 2354 ha->primary_mac[4] = (rnd >> 8) & 0xFF; 2355 ha->primary_mac[5] = (rnd >> 16) & 0xFF; 2356 } 2357 2358 ether_ifattach(ifp, ha->primary_mac); 2359 bcopy(if_getlladdr(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN); 2360 2361 if_setcapabilities(ifp, IFCAP_HWCSUM); 2362 if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0); 2363 2364 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); 2365 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0); 2366 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0); 2367 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0); 2368 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0); 2369 if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0); 2370 if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0); 2371 if_setcapabilitiesbit(ifp, IFCAP_LRO, 0); 2372 2373 if_sethwtsomax(ifp, QLNX_MAX_TSO_FRAME_SIZE - 2374 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); 2375 if_sethwtsomaxsegcount(ifp, QLNX_MAX_SEGMENTS - 1); /* hdr */ 2376 if_sethwtsomaxsegsize(ifp, QLNX_MAX_TX_MBUF_SIZE); 2377 2378 if_setcapenable(ifp, if_getcapabilities(ifp)); 2379 2380 if_sethwassist(ifp, CSUM_IP); 2381 if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0); 2382 if_sethwassistbits(ifp, CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0); 2383 if_sethwassistbits(ifp, CSUM_TSO, 0); 2384 2385 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 2386 2387 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\ 2388 qlnx_media_status); 2389 2390 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) { 2391 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL); 2392 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL); 2393 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL); 2394 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2395 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) { 2396 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL); 2397 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL); 2398 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) { 2399 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL); 2400 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL); 2401 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) { 2402 ifmedia_add(&ha->media, 2403 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL); 2404 ifmedia_add(&ha->media, 2405 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL); 2406 ifmedia_add(&ha->media, 2407 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL); 2408 } 2409 2410 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL); 2411 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 2412 2413 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 2414 2415 QL_DPRINT2(ha, "exit\n"); 2416 2417 return; 2418 } 2419 2420 static void 2421 qlnx_init_locked(qlnx_host_t *ha) 2422 { 2423 if_t ifp = ha->ifp; 2424 2425 QL_DPRINT1(ha, "Driver Initialization start \n"); 2426 2427 qlnx_stop(ha); 2428 2429 if (qlnx_load(ha) == 0) { 2430 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 2431 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 2432 2433 #ifdef QLNX_ENABLE_IWARP 2434 if (qlnx_vf_device(ha) != 0) { 2435 qlnx_rdma_dev_open(ha); 2436 } 2437 #endif /* #ifdef QLNX_ENABLE_IWARP */ 2438 } 2439 2440 return; 2441 } 2442 2443 static void 2444 qlnx_init(void *arg) 2445 { 2446 qlnx_host_t *ha; 2447 2448 ha = (qlnx_host_t *)arg; 2449 2450 QL_DPRINT2(ha, "enter\n"); 2451 2452 QLNX_LOCK(ha); 2453 qlnx_init_locked(ha); 2454 QLNX_UNLOCK(ha); 2455 2456 QL_DPRINT2(ha, "exit\n"); 2457 2458 return; 2459 } 2460 2461 static int 2462 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac) 2463 { 2464 struct ecore_filter_mcast *mcast; 2465 struct ecore_dev *cdev; 2466 int rc; 2467 2468 cdev = &ha->cdev; 2469 2470 mcast = &ha->ecore_mcast; 2471 bzero(mcast, sizeof(struct ecore_filter_mcast)); 2472 2473 if (add_mac) 2474 mcast->opcode = ECORE_FILTER_ADD; 2475 else 2476 mcast->opcode = ECORE_FILTER_REMOVE; 2477 2478 mcast->num_mc_addrs = 1; 2479 memcpy(mcast->mac, mac_addr, ETH_ALEN); 2480 2481 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 2482 2483 return (rc); 2484 } 2485 2486 static int 2487 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta) 2488 { 2489 int i; 2490 2491 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2492 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) 2493 return 0; /* its been already added */ 2494 } 2495 2496 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2497 if ((ha->mcast[i].addr[0] == 0) && 2498 (ha->mcast[i].addr[1] == 0) && 2499 (ha->mcast[i].addr[2] == 0) && 2500 (ha->mcast[i].addr[3] == 0) && 2501 (ha->mcast[i].addr[4] == 0) && 2502 (ha->mcast[i].addr[5] == 0)) { 2503 if (qlnx_config_mcast_mac_addr(ha, mta, 1)) 2504 return (-1); 2505 2506 bcopy(mta, ha->mcast[i].addr, ETH_ALEN); 2507 ha->nmcast++; 2508 2509 return 0; 2510 } 2511 } 2512 return 0; 2513 } 2514 2515 static int 2516 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta) 2517 { 2518 int i; 2519 2520 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2521 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { 2522 if (qlnx_config_mcast_mac_addr(ha, mta, 0)) 2523 return (-1); 2524 2525 ha->mcast[i].addr[0] = 0; 2526 ha->mcast[i].addr[1] = 0; 2527 ha->mcast[i].addr[2] = 0; 2528 ha->mcast[i].addr[3] = 0; 2529 ha->mcast[i].addr[4] = 0; 2530 ha->mcast[i].addr[5] = 0; 2531 2532 ha->nmcast--; 2533 2534 return 0; 2535 } 2536 } 2537 return 0; 2538 } 2539 2540 /* 2541 * Name: qls_hw_set_multi 2542 * Function: Sets the Multicast Addresses provided the host O.S into the 2543 * hardware (for the given interface) 2544 */ 2545 static void 2546 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 2547 uint32_t add_mac) 2548 { 2549 int i; 2550 2551 for (i = 0; i < mcnt; i++) { 2552 if (add_mac) { 2553 if (qlnx_hw_add_mcast(ha, mta)) 2554 break; 2555 } else { 2556 if (qlnx_hw_del_mcast(ha, mta)) 2557 break; 2558 } 2559 2560 mta += ETHER_HDR_LEN; 2561 } 2562 return; 2563 } 2564 2565 static u_int 2566 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) 2567 { 2568 uint8_t *mta = arg; 2569 2570 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS) 2571 return (0); 2572 2573 bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN); 2574 2575 return (1); 2576 } 2577 2578 static int 2579 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi) 2580 { 2581 uint8_t mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN]; 2582 if_t ifp = ha->ifp; 2583 u_int mcnt; 2584 2585 if (qlnx_vf_device(ha) == 0) 2586 return (0); 2587 2588 mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta); 2589 2590 QLNX_LOCK(ha); 2591 qlnx_hw_set_multi(ha, mta, mcnt, add_multi); 2592 QLNX_UNLOCK(ha); 2593 2594 return (0); 2595 } 2596 2597 static int 2598 qlnx_set_promisc(qlnx_host_t *ha) 2599 { 2600 int rc = 0; 2601 uint8_t filter; 2602 2603 if (qlnx_vf_device(ha) == 0) 2604 return (0); 2605 2606 filter = ha->filter; 2607 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2608 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 2609 2610 rc = qlnx_set_rx_accept_filter(ha, filter); 2611 return (rc); 2612 } 2613 2614 static int 2615 qlnx_set_allmulti(qlnx_host_t *ha) 2616 { 2617 int rc = 0; 2618 uint8_t filter; 2619 2620 if (qlnx_vf_device(ha) == 0) 2621 return (0); 2622 2623 filter = ha->filter; 2624 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2625 rc = qlnx_set_rx_accept_filter(ha, filter); 2626 2627 return (rc); 2628 } 2629 2630 static int 2631 qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data) 2632 { 2633 int ret = 0, mask; 2634 struct ifreq *ifr = (struct ifreq *)data; 2635 #ifdef INET 2636 struct ifaddr *ifa = (struct ifaddr *)data; 2637 #endif 2638 qlnx_host_t *ha; 2639 2640 ha = (qlnx_host_t *)if_getsoftc(ifp); 2641 2642 switch (cmd) { 2643 case SIOCSIFADDR: 2644 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd); 2645 2646 #ifdef INET 2647 if (ifa->ifa_addr->sa_family == AF_INET) { 2648 if_setflagbits(ifp, IFF_UP, 0); 2649 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 2650 QLNX_LOCK(ha); 2651 qlnx_init_locked(ha); 2652 QLNX_UNLOCK(ha); 2653 } 2654 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 2655 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)); 2656 2657 arp_ifinit(ifp, ifa); 2658 break; 2659 } 2660 #endif 2661 ether_ioctl(ifp, cmd, data); 2662 break; 2663 2664 case SIOCSIFMTU: 2665 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd); 2666 2667 if (ifr->ifr_mtu > QLNX_MAX_MTU) { 2668 ret = EINVAL; 2669 } else { 2670 QLNX_LOCK(ha); 2671 if_setmtu(ifp, ifr->ifr_mtu); 2672 ha->max_frame_size = 2673 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; 2674 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2675 qlnx_init_locked(ha); 2676 } 2677 2678 QLNX_UNLOCK(ha); 2679 } 2680 2681 break; 2682 2683 case SIOCSIFFLAGS: 2684 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd); 2685 2686 QLNX_LOCK(ha); 2687 2688 if (if_getflags(ifp) & IFF_UP) { 2689 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2690 if ((if_getflags(ifp) ^ ha->if_flags) & 2691 IFF_PROMISC) { 2692 ret = qlnx_set_promisc(ha); 2693 } else if ((if_getflags(ifp) ^ ha->if_flags) & 2694 IFF_ALLMULTI) { 2695 ret = qlnx_set_allmulti(ha); 2696 } 2697 } else { 2698 ha->max_frame_size = if_getmtu(ifp) + 2699 ETHER_HDR_LEN + ETHER_CRC_LEN; 2700 qlnx_init_locked(ha); 2701 } 2702 } else { 2703 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 2704 qlnx_stop(ha); 2705 ha->if_flags = if_getflags(ifp); 2706 } 2707 2708 QLNX_UNLOCK(ha); 2709 break; 2710 2711 case SIOCADDMULTI: 2712 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd); 2713 2714 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2715 if (qlnx_set_multi(ha, 1)) 2716 ret = EINVAL; 2717 } 2718 break; 2719 2720 case SIOCDELMULTI: 2721 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd); 2722 2723 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2724 if (qlnx_set_multi(ha, 0)) 2725 ret = EINVAL; 2726 } 2727 break; 2728 2729 case SIOCSIFMEDIA: 2730 case SIOCGIFMEDIA: 2731 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd); 2732 2733 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 2734 break; 2735 2736 case SIOCSIFCAP: 2737 2738 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 2739 2740 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd); 2741 2742 if (mask & IFCAP_HWCSUM) 2743 if_togglecapenable(ifp, IFCAP_HWCSUM); 2744 if (mask & IFCAP_TSO4) 2745 if_togglecapenable(ifp, IFCAP_TSO4); 2746 if (mask & IFCAP_TSO6) 2747 if_togglecapenable(ifp, IFCAP_TSO6); 2748 if (mask & IFCAP_VLAN_HWTAGGING) 2749 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 2750 if (mask & IFCAP_VLAN_HWTSO) 2751 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 2752 if (mask & IFCAP_LRO) 2753 if_togglecapenable(ifp, IFCAP_LRO); 2754 2755 QLNX_LOCK(ha); 2756 2757 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 2758 qlnx_init_locked(ha); 2759 2760 QLNX_UNLOCK(ha); 2761 2762 VLAN_CAPABILITIES(ifp); 2763 break; 2764 2765 case SIOCGI2C: 2766 { 2767 struct ifi2creq i2c; 2768 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 2769 struct ecore_ptt *p_ptt; 2770 2771 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 2772 2773 if (ret) 2774 break; 2775 2776 if ((i2c.len > sizeof (i2c.data)) || 2777 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) { 2778 ret = EINVAL; 2779 break; 2780 } 2781 2782 p_ptt = ecore_ptt_acquire(p_hwfn); 2783 2784 if (!p_ptt) { 2785 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 2786 ret = -1; 2787 break; 2788 } 2789 2790 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt, 2791 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset, 2792 i2c.len, &i2c.data[0]); 2793 2794 ecore_ptt_release(p_hwfn, p_ptt); 2795 2796 if (ret) { 2797 ret = -1; 2798 break; 2799 } 2800 2801 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 2802 2803 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \ 2804 len = %d addr = 0x%02x offset = 0x%04x \ 2805 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \ 2806 0x%02x 0x%02x 0x%02x\n", 2807 ret, i2c.len, i2c.dev_addr, i2c.offset, 2808 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3], 2809 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]); 2810 break; 2811 } 2812 2813 default: 2814 QL_DPRINT4(ha, "default (0x%lx)\n", cmd); 2815 ret = ether_ioctl(ifp, cmd, data); 2816 break; 2817 } 2818 2819 return (ret); 2820 } 2821 2822 static int 2823 qlnx_media_change(if_t ifp) 2824 { 2825 qlnx_host_t *ha; 2826 struct ifmedia *ifm; 2827 int ret = 0; 2828 2829 ha = (qlnx_host_t *)if_getsoftc(ifp); 2830 2831 QL_DPRINT2(ha, "enter\n"); 2832 2833 ifm = &ha->media; 2834 2835 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2836 ret = EINVAL; 2837 2838 QL_DPRINT2(ha, "exit\n"); 2839 2840 return (ret); 2841 } 2842 2843 static void 2844 qlnx_media_status(if_t ifp, struct ifmediareq *ifmr) 2845 { 2846 qlnx_host_t *ha; 2847 2848 ha = (qlnx_host_t *)if_getsoftc(ifp); 2849 2850 QL_DPRINT2(ha, "enter\n"); 2851 2852 ifmr->ifm_status = IFM_AVALID; 2853 ifmr->ifm_active = IFM_ETHER; 2854 2855 if (ha->link_up) { 2856 ifmr->ifm_status |= IFM_ACTIVE; 2857 ifmr->ifm_active |= 2858 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link)); 2859 2860 if (ha->if_link.link_partner_caps & 2861 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause)) 2862 ifmr->ifm_active |= 2863 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 2864 } 2865 2866 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down")); 2867 2868 return; 2869 } 2870 2871 static void 2872 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2873 struct qlnx_tx_queue *txq) 2874 { 2875 u16 idx; 2876 struct mbuf *mp; 2877 bus_dmamap_t map; 2878 int i; 2879 // struct eth_tx_bd *tx_data_bd; 2880 struct eth_tx_1st_bd *first_bd; 2881 int nbds = 0; 2882 2883 idx = txq->sw_tx_cons; 2884 mp = txq->sw_tx_ring[idx].mp; 2885 map = txq->sw_tx_ring[idx].map; 2886 2887 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){ 2888 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL); 2889 2890 QL_DPRINT1(ha, "(mp == NULL) " 2891 " tx_idx = 0x%x" 2892 " ecore_prod_idx = 0x%x" 2893 " ecore_cons_idx = 0x%x" 2894 " hw_bd_cons = 0x%x" 2895 " txq_db_last = 0x%x" 2896 " elem_left = 0x%x\n", 2897 fp->rss_id, 2898 ecore_chain_get_prod_idx(&txq->tx_pbl), 2899 ecore_chain_get_cons_idx(&txq->tx_pbl), 2900 le16toh(*txq->hw_cons_ptr), 2901 txq->tx_db.raw, 2902 ecore_chain_get_elem_left(&txq->tx_pbl)); 2903 2904 fp->err_tx_free_pkt_null++; 2905 2906 //DEBUG 2907 qlnx_trigger_dump(ha); 2908 2909 return; 2910 } else { 2911 QLNX_INC_OPACKETS((ha->ifp)); 2912 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len)); 2913 2914 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE); 2915 bus_dmamap_unload(ha->tx_tag, map); 2916 2917 fp->tx_pkts_freed++; 2918 fp->tx_pkts_completed++; 2919 2920 m_freem(mp); 2921 } 2922 2923 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl); 2924 nbds = first_bd->data.nbds; 2925 2926 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0); 2927 2928 for (i = 1; i < nbds; i++) { 2929 /* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl); 2930 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0); 2931 } 2932 txq->sw_tx_ring[idx].flags = 0; 2933 txq->sw_tx_ring[idx].mp = NULL; 2934 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0; 2935 2936 return; 2937 } 2938 2939 static void 2940 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2941 struct qlnx_tx_queue *txq) 2942 { 2943 u16 hw_bd_cons; 2944 u16 ecore_cons_idx; 2945 uint16_t diff; 2946 uint16_t idx, idx2; 2947 2948 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 2949 2950 while (hw_bd_cons != 2951 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 2952 diff = hw_bd_cons - ecore_cons_idx; 2953 if ((diff > TX_RING_SIZE) || 2954 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){ 2955 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF); 2956 2957 QL_DPRINT1(ha, "(diff = 0x%x) " 2958 " tx_idx = 0x%x" 2959 " ecore_prod_idx = 0x%x" 2960 " ecore_cons_idx = 0x%x" 2961 " hw_bd_cons = 0x%x" 2962 " txq_db_last = 0x%x" 2963 " elem_left = 0x%x\n", 2964 diff, 2965 fp->rss_id, 2966 ecore_chain_get_prod_idx(&txq->tx_pbl), 2967 ecore_chain_get_cons_idx(&txq->tx_pbl), 2968 le16toh(*txq->hw_cons_ptr), 2969 txq->tx_db.raw, 2970 ecore_chain_get_elem_left(&txq->tx_pbl)); 2971 2972 fp->err_tx_cons_idx_conflict++; 2973 2974 //DEBUG 2975 qlnx_trigger_dump(ha); 2976 } 2977 2978 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 2979 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1); 2980 prefetch(txq->sw_tx_ring[idx].mp); 2981 prefetch(txq->sw_tx_ring[idx2].mp); 2982 2983 qlnx_free_tx_pkt(ha, fp, txq); 2984 2985 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 2986 } 2987 return; 2988 } 2989 2990 static int 2991 qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, struct mbuf *mp) 2992 { 2993 int ret = 0; 2994 struct qlnx_tx_queue *txq; 2995 qlnx_host_t * ha; 2996 uint16_t elem_left; 2997 2998 txq = fp->txq[0]; 2999 ha = (qlnx_host_t *)fp->edev; 3000 3001 if ((!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) || (!ha->link_up)) { 3002 if(mp != NULL) 3003 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3004 return (ret); 3005 } 3006 3007 if(mp != NULL) 3008 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3009 3010 mp = drbr_peek(ifp, fp->tx_br); 3011 3012 while (mp != NULL) { 3013 if (qlnx_send(ha, fp, &mp)) { 3014 if (mp != NULL) { 3015 drbr_putback(ifp, fp->tx_br, mp); 3016 } else { 3017 fp->tx_pkts_processed++; 3018 drbr_advance(ifp, fp->tx_br); 3019 } 3020 goto qlnx_transmit_locked_exit; 3021 3022 } else { 3023 drbr_advance(ifp, fp->tx_br); 3024 fp->tx_pkts_transmitted++; 3025 fp->tx_pkts_processed++; 3026 } 3027 3028 mp = drbr_peek(ifp, fp->tx_br); 3029 } 3030 3031 qlnx_transmit_locked_exit: 3032 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) || 3033 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) 3034 < QLNX_TX_ELEM_MAX_THRESH)) 3035 (void)qlnx_tx_int(ha, fp, fp->txq[0]); 3036 3037 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret); 3038 return ret; 3039 } 3040 3041 static int 3042 qlnx_transmit(if_t ifp, struct mbuf *mp) 3043 { 3044 qlnx_host_t *ha = (qlnx_host_t *)if_getsoftc(ifp); 3045 struct qlnx_fastpath *fp; 3046 int rss_id = 0, ret = 0; 3047 3048 #ifdef QLNX_TRACEPERF_DATA 3049 uint64_t tx_pkts = 0, tx_compl = 0; 3050 #endif 3051 3052 QL_DPRINT2(ha, "enter\n"); 3053 3054 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 3055 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) % 3056 ha->num_rss; 3057 3058 fp = &ha->fp_array[rss_id]; 3059 3060 if (fp->tx_br == NULL) { 3061 ret = EINVAL; 3062 goto qlnx_transmit_exit; 3063 } 3064 3065 if (mtx_trylock(&fp->tx_mtx)) { 3066 #ifdef QLNX_TRACEPERF_DATA 3067 tx_pkts = fp->tx_pkts_transmitted; 3068 tx_compl = fp->tx_pkts_completed; 3069 #endif 3070 3071 ret = qlnx_transmit_locked(ifp, fp, mp); 3072 3073 #ifdef QLNX_TRACEPERF_DATA 3074 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts); 3075 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl); 3076 #endif 3077 mtx_unlock(&fp->tx_mtx); 3078 } else { 3079 if (mp != NULL && (fp->fp_taskqueue != NULL)) { 3080 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3081 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 3082 } 3083 } 3084 3085 qlnx_transmit_exit: 3086 3087 QL_DPRINT2(ha, "exit ret = %d\n", ret); 3088 return ret; 3089 } 3090 3091 static void 3092 qlnx_qflush(if_t ifp) 3093 { 3094 int rss_id; 3095 struct qlnx_fastpath *fp; 3096 struct mbuf *mp; 3097 qlnx_host_t *ha; 3098 3099 ha = (qlnx_host_t *)if_getsoftc(ifp); 3100 3101 QL_DPRINT2(ha, "enter\n"); 3102 3103 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 3104 fp = &ha->fp_array[rss_id]; 3105 3106 if (fp == NULL) 3107 continue; 3108 3109 if (fp->tx_br) { 3110 mtx_lock(&fp->tx_mtx); 3111 3112 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 3113 fp->tx_pkts_freed++; 3114 m_freem(mp); 3115 } 3116 mtx_unlock(&fp->tx_mtx); 3117 } 3118 } 3119 QL_DPRINT2(ha, "exit\n"); 3120 3121 return; 3122 } 3123 3124 static void 3125 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value) 3126 { 3127 uint32_t offset; 3128 3129 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells); 3130 3131 bus_write_4(ha->pci_dbells, offset, value); 3132 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ); 3133 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ); 3134 3135 return; 3136 } 3137 3138 static uint32_t 3139 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp) 3140 { 3141 struct ether_vlan_header *eh = NULL; 3142 struct ip *ip = NULL; 3143 struct ip6_hdr *ip6 = NULL; 3144 struct tcphdr *th = NULL; 3145 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0; 3146 uint16_t etype = 0; 3147 uint8_t buf[sizeof(struct ip6_hdr)]; 3148 3149 eh = mtod(mp, struct ether_vlan_header *); 3150 3151 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 3152 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3153 etype = ntohs(eh->evl_proto); 3154 } else { 3155 ehdrlen = ETHER_HDR_LEN; 3156 etype = ntohs(eh->evl_encap_proto); 3157 } 3158 3159 switch (etype) { 3160 case ETHERTYPE_IP: 3161 ip = (struct ip *)(mp->m_data + ehdrlen); 3162 3163 ip_hlen = sizeof (struct ip); 3164 3165 if (mp->m_len < (ehdrlen + ip_hlen)) { 3166 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 3167 ip = (struct ip *)buf; 3168 } 3169 3170 th = (struct tcphdr *)(ip + 1); 3171 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3172 break; 3173 3174 case ETHERTYPE_IPV6: 3175 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 3176 3177 ip_hlen = sizeof(struct ip6_hdr); 3178 3179 if (mp->m_len < (ehdrlen + ip_hlen)) { 3180 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 3181 buf); 3182 ip6 = (struct ip6_hdr *)buf; 3183 } 3184 th = (struct tcphdr *)(ip6 + 1); 3185 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3186 break; 3187 3188 default: 3189 break; 3190 } 3191 3192 return (offset); 3193 } 3194 3195 static __inline int 3196 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs, 3197 uint32_t offset) 3198 { 3199 int i; 3200 uint32_t sum, nbds_in_hdr = 1; 3201 uint32_t window; 3202 bus_dma_segment_t *s_seg; 3203 3204 /* If the header spans multiple segments, skip those segments */ 3205 3206 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM) 3207 return (0); 3208 3209 i = 0; 3210 3211 while ((i < nsegs) && (offset >= segs->ds_len)) { 3212 offset = offset - segs->ds_len; 3213 segs++; 3214 i++; 3215 nbds_in_hdr++; 3216 } 3217 3218 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr; 3219 3220 nsegs = nsegs - i; 3221 3222 while (nsegs >= window) { 3223 sum = 0; 3224 s_seg = segs; 3225 3226 for (i = 0; i < window; i++){ 3227 sum += s_seg->ds_len; 3228 s_seg++; 3229 } 3230 3231 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) { 3232 fp->tx_lso_wnd_min_len++; 3233 return (-1); 3234 } 3235 3236 nsegs = nsegs - 1; 3237 segs++; 3238 } 3239 3240 return (0); 3241 } 3242 3243 static int 3244 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp) 3245 { 3246 bus_dma_segment_t *segs; 3247 bus_dmamap_t map = 0; 3248 uint32_t nsegs = 0; 3249 int ret = -1; 3250 struct mbuf *m_head = *m_headp; 3251 uint16_t idx = 0; 3252 uint16_t elem_left; 3253 3254 uint8_t nbd = 0; 3255 struct qlnx_tx_queue *txq; 3256 3257 struct eth_tx_1st_bd *first_bd; 3258 struct eth_tx_2nd_bd *second_bd; 3259 struct eth_tx_3rd_bd *third_bd; 3260 struct eth_tx_bd *tx_data_bd; 3261 3262 int seg_idx = 0; 3263 uint32_t nbds_in_hdr = 0; 3264 uint32_t offset = 0; 3265 3266 #ifdef QLNX_TRACE_PERF_DATA 3267 uint16_t bd_used; 3268 #endif 3269 3270 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id); 3271 3272 if (!ha->link_up) 3273 return (-1); 3274 3275 first_bd = NULL; 3276 second_bd = NULL; 3277 third_bd = NULL; 3278 tx_data_bd = NULL; 3279 3280 txq = fp->txq[0]; 3281 3282 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) < 3283 QLNX_TX_ELEM_MIN_THRESH) { 3284 fp->tx_nsegs_gt_elem_left++; 3285 fp->err_tx_nsegs_gt_elem_left++; 3286 3287 return (ENOBUFS); 3288 } 3289 3290 idx = txq->sw_tx_prod; 3291 3292 map = txq->sw_tx_ring[idx].map; 3293 segs = txq->segs; 3294 3295 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 3296 BUS_DMA_NOWAIT); 3297 3298 if (ha->dbg_trace_tso_pkt_len) { 3299 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3300 if (!fp->tx_tso_min_pkt_len) { 3301 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3302 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3303 } else { 3304 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len) 3305 fp->tx_tso_min_pkt_len = 3306 m_head->m_pkthdr.len; 3307 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len) 3308 fp->tx_tso_max_pkt_len = 3309 m_head->m_pkthdr.len; 3310 } 3311 } 3312 } 3313 3314 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3315 offset = qlnx_tcp_offset(ha, m_head); 3316 3317 if ((ret == EFBIG) || 3318 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && ( 3319 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) || 3320 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) && 3321 qlnx_tso_check(fp, segs, nsegs, offset))))) { 3322 struct mbuf *m; 3323 3324 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len); 3325 3326 fp->tx_defrag++; 3327 3328 m = m_defrag(m_head, M_NOWAIT); 3329 if (m == NULL) { 3330 fp->err_tx_defrag++; 3331 fp->tx_pkts_freed++; 3332 m_freem(m_head); 3333 *m_headp = NULL; 3334 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret); 3335 return (ENOBUFS); 3336 } 3337 3338 m_head = m; 3339 *m_headp = m_head; 3340 3341 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 3342 segs, &nsegs, BUS_DMA_NOWAIT))) { 3343 fp->err_tx_defrag_dmamap_load++; 3344 3345 QL_DPRINT1(ha, 3346 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n", 3347 ret, m_head->m_pkthdr.len); 3348 3349 fp->tx_pkts_freed++; 3350 m_freem(m_head); 3351 *m_headp = NULL; 3352 3353 return (ret); 3354 } 3355 3356 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && 3357 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) { 3358 fp->err_tx_non_tso_max_seg++; 3359 3360 QL_DPRINT1(ha, 3361 "(%d) nsegs too many for non-TSO [%d, %d]\n", 3362 ret, nsegs, m_head->m_pkthdr.len); 3363 3364 fp->tx_pkts_freed++; 3365 m_freem(m_head); 3366 *m_headp = NULL; 3367 3368 return (ret); 3369 } 3370 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3371 offset = qlnx_tcp_offset(ha, m_head); 3372 3373 } else if (ret) { 3374 fp->err_tx_dmamap_load++; 3375 3376 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n", 3377 ret, m_head->m_pkthdr.len); 3378 fp->tx_pkts_freed++; 3379 m_freem(m_head); 3380 *m_headp = NULL; 3381 return (ret); 3382 } 3383 3384 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet")); 3385 3386 if (ha->dbg_trace_tso_pkt_len) { 3387 if (nsegs < QLNX_FP_MAX_SEGS) 3388 fp->tx_pkts[(nsegs - 1)]++; 3389 else 3390 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; 3391 } 3392 3393 #ifdef QLNX_TRACE_PERF_DATA 3394 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3395 if(m_head->m_pkthdr.len <= 2048) 3396 fp->tx_pkts_hist[0]++; 3397 else if((m_head->m_pkthdr.len > 2048) && 3398 (m_head->m_pkthdr.len <= 4096)) 3399 fp->tx_pkts_hist[1]++; 3400 else if((m_head->m_pkthdr.len > 4096) && 3401 (m_head->m_pkthdr.len <= 8192)) 3402 fp->tx_pkts_hist[2]++; 3403 else if((m_head->m_pkthdr.len > 8192) && 3404 (m_head->m_pkthdr.len <= 12288 )) 3405 fp->tx_pkts_hist[3]++; 3406 else if((m_head->m_pkthdr.len > 11288) && 3407 (m_head->m_pkthdr.len <= 16394)) 3408 fp->tx_pkts_hist[4]++; 3409 else if((m_head->m_pkthdr.len > 16384) && 3410 (m_head->m_pkthdr.len <= 20480)) 3411 fp->tx_pkts_hist[5]++; 3412 else if((m_head->m_pkthdr.len > 20480) && 3413 (m_head->m_pkthdr.len <= 24576)) 3414 fp->tx_pkts_hist[6]++; 3415 else if((m_head->m_pkthdr.len > 24576) && 3416 (m_head->m_pkthdr.len <= 28672)) 3417 fp->tx_pkts_hist[7]++; 3418 else if((m_head->m_pkthdr.len > 28762) && 3419 (m_head->m_pkthdr.len <= 32768)) 3420 fp->tx_pkts_hist[8]++; 3421 else if((m_head->m_pkthdr.len > 32768) && 3422 (m_head->m_pkthdr.len <= 36864)) 3423 fp->tx_pkts_hist[9]++; 3424 else if((m_head->m_pkthdr.len > 36864) && 3425 (m_head->m_pkthdr.len <= 40960)) 3426 fp->tx_pkts_hist[10]++; 3427 else if((m_head->m_pkthdr.len > 40960) && 3428 (m_head->m_pkthdr.len <= 45056)) 3429 fp->tx_pkts_hist[11]++; 3430 else if((m_head->m_pkthdr.len > 45056) && 3431 (m_head->m_pkthdr.len <= 49152)) 3432 fp->tx_pkts_hist[12]++; 3433 else if((m_head->m_pkthdr.len > 49512) && 3434 m_head->m_pkthdr.len <= 53248)) 3435 fp->tx_pkts_hist[13]++; 3436 else if((m_head->m_pkthdr.len > 53248) && 3437 (m_head->m_pkthdr.len <= 57344)) 3438 fp->tx_pkts_hist[14]++; 3439 else if((m_head->m_pkthdr.len > 53248) && 3440 (m_head->m_pkthdr.len <= 57344)) 3441 fp->tx_pkts_hist[15]++; 3442 else if((m_head->m_pkthdr.len > 57344) && 3443 (m_head->m_pkthdr.len <= 61440)) 3444 fp->tx_pkts_hist[16]++; 3445 else 3446 fp->tx_pkts_hist[17]++; 3447 } 3448 3449 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3450 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl); 3451 bd_used = TX_RING_SIZE - elem_left; 3452 3453 if(bd_used <= 100) 3454 fp->tx_pkts_q[0]++; 3455 else if((bd_used > 100) && (bd_used <= 500)) 3456 fp->tx_pkts_q[1]++; 3457 else if((bd_used > 500) && (bd_used <= 1000)) 3458 fp->tx_pkts_q[2]++; 3459 else if((bd_used > 1000) && (bd_used <= 2000)) 3460 fp->tx_pkts_q[3]++; 3461 else if((bd_used > 3000) && (bd_used <= 4000)) 3462 fp->tx_pkts_q[4]++; 3463 else if((bd_used > 4000) && (bd_used <= 5000)) 3464 fp->tx_pkts_q[5]++; 3465 else if((bd_used > 6000) && (bd_used <= 7000)) 3466 fp->tx_pkts_q[6]++; 3467 else if((bd_used > 7000) && (bd_used <= 8000)) 3468 fp->tx_pkts_q[7]++; 3469 else if((bd_used > 8000) && (bd_used <= 9000)) 3470 fp->tx_pkts_q[8]++; 3471 else if((bd_used > 9000) && (bd_used <= 10000)) 3472 fp->tx_pkts_q[9]++; 3473 else if((bd_used > 10000) && (bd_used <= 11000)) 3474 fp->tx_pkts_q[10]++; 3475 else if((bd_used > 11000) && (bd_used <= 12000)) 3476 fp->tx_pkts_q[11]++; 3477 else if((bd_used > 12000) && (bd_used <= 13000)) 3478 fp->tx_pkts_q[12]++; 3479 else if((bd_used > 13000) && (bd_used <= 14000)) 3480 fp->tx_pkts_q[13]++; 3481 else if((bd_used > 14000) && (bd_used <= 15000)) 3482 fp->tx_pkts_q[14]++; 3483 else if((bd_used > 15000) && (bd_used <= 16000)) 3484 fp->tx_pkts_q[15]++; 3485 else 3486 fp->tx_pkts_q[16]++; 3487 } 3488 3489 #endif /* end of QLNX_TRACE_PERF_DATA */ 3490 3491 if ((nsegs + QLNX_TX_ELEM_RESERVE) > 3492 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) { 3493 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs" 3494 " in chain[%d] trying to free packets\n", 3495 nsegs, elem_left, fp->rss_id); 3496 3497 fp->tx_nsegs_gt_elem_left++; 3498 3499 (void)qlnx_tx_int(ha, fp, txq); 3500 3501 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left = 3502 ecore_chain_get_elem_left(&txq->tx_pbl))) { 3503 QL_DPRINT1(ha, 3504 "(%d, 0x%x) insuffient BDs in chain[%d]\n", 3505 nsegs, elem_left, fp->rss_id); 3506 3507 fp->err_tx_nsegs_gt_elem_left++; 3508 fp->tx_ring_full = 1; 3509 if (ha->storm_stats_enable) 3510 ha->storm_stats_gather = 1; 3511 return (ENOBUFS); 3512 } 3513 } 3514 3515 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 3516 3517 txq->sw_tx_ring[idx].mp = m_head; 3518 3519 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); 3520 3521 memset(first_bd, 0, sizeof(*first_bd)); 3522 3523 first_bd->data.bd_flags.bitfields = 3524 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 3525 3526 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len); 3527 3528 nbd++; 3529 3530 if (m_head->m_pkthdr.csum_flags & CSUM_IP) { 3531 first_bd->data.bd_flags.bitfields |= 3532 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3533 } 3534 3535 if (m_head->m_pkthdr.csum_flags & 3536 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) { 3537 first_bd->data.bd_flags.bitfields |= 3538 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT); 3539 } 3540 3541 if (m_head->m_flags & M_VLANTAG) { 3542 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag; 3543 first_bd->data.bd_flags.bitfields |= 3544 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT); 3545 } 3546 3547 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3548 first_bd->data.bd_flags.bitfields |= 3549 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); 3550 first_bd->data.bd_flags.bitfields |= 3551 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3552 3553 nbds_in_hdr = 1; 3554 3555 if (offset == segs->ds_len) { 3556 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3557 segs++; 3558 seg_idx++; 3559 3560 second_bd = (struct eth_tx_2nd_bd *) 3561 ecore_chain_produce(&txq->tx_pbl); 3562 memset(second_bd, 0, sizeof(*second_bd)); 3563 nbd++; 3564 3565 if (seg_idx < nsegs) { 3566 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3567 (segs->ds_addr), (segs->ds_len)); 3568 segs++; 3569 seg_idx++; 3570 } 3571 3572 third_bd = (struct eth_tx_3rd_bd *) 3573 ecore_chain_produce(&txq->tx_pbl); 3574 memset(third_bd, 0, sizeof(*third_bd)); 3575 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3576 third_bd->data.bitfields |= 3577 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3578 nbd++; 3579 3580 if (seg_idx < nsegs) { 3581 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3582 (segs->ds_addr), (segs->ds_len)); 3583 segs++; 3584 seg_idx++; 3585 } 3586 3587 for (; seg_idx < nsegs; seg_idx++) { 3588 tx_data_bd = (struct eth_tx_bd *) 3589 ecore_chain_produce(&txq->tx_pbl); 3590 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3591 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3592 segs->ds_addr,\ 3593 segs->ds_len); 3594 segs++; 3595 nbd++; 3596 } 3597 3598 } else if (offset < segs->ds_len) { 3599 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3600 3601 second_bd = (struct eth_tx_2nd_bd *) 3602 ecore_chain_produce(&txq->tx_pbl); 3603 memset(second_bd, 0, sizeof(*second_bd)); 3604 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3605 (segs->ds_addr + offset),\ 3606 (segs->ds_len - offset)); 3607 nbd++; 3608 segs++; 3609 3610 third_bd = (struct eth_tx_3rd_bd *) 3611 ecore_chain_produce(&txq->tx_pbl); 3612 memset(third_bd, 0, sizeof(*third_bd)); 3613 3614 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3615 segs->ds_addr,\ 3616 segs->ds_len); 3617 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3618 third_bd->data.bitfields |= 3619 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3620 segs++; 3621 nbd++; 3622 3623 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) { 3624 tx_data_bd = (struct eth_tx_bd *) 3625 ecore_chain_produce(&txq->tx_pbl); 3626 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3627 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3628 segs->ds_addr,\ 3629 segs->ds_len); 3630 segs++; 3631 nbd++; 3632 } 3633 3634 } else { 3635 offset = offset - segs->ds_len; 3636 segs++; 3637 3638 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3639 if (offset) 3640 nbds_in_hdr++; 3641 3642 tx_data_bd = (struct eth_tx_bd *) 3643 ecore_chain_produce(&txq->tx_pbl); 3644 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3645 3646 if (second_bd == NULL) { 3647 second_bd = (struct eth_tx_2nd_bd *) 3648 tx_data_bd; 3649 } else if (third_bd == NULL) { 3650 third_bd = (struct eth_tx_3rd_bd *) 3651 tx_data_bd; 3652 } 3653 3654 if (offset && (offset < segs->ds_len)) { 3655 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3656 segs->ds_addr, offset); 3657 3658 tx_data_bd = (struct eth_tx_bd *) 3659 ecore_chain_produce(&txq->tx_pbl); 3660 3661 memset(tx_data_bd, 0, 3662 sizeof(*tx_data_bd)); 3663 3664 if (second_bd == NULL) { 3665 second_bd = 3666 (struct eth_tx_2nd_bd *)tx_data_bd; 3667 } else if (third_bd == NULL) { 3668 third_bd = 3669 (struct eth_tx_3rd_bd *)tx_data_bd; 3670 } 3671 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3672 (segs->ds_addr + offset), \ 3673 (segs->ds_len - offset)); 3674 nbd++; 3675 offset = 0; 3676 } else { 3677 if (offset) 3678 offset = offset - segs->ds_len; 3679 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3680 segs->ds_addr, segs->ds_len); 3681 } 3682 segs++; 3683 nbd++; 3684 } 3685 3686 if (third_bd == NULL) { 3687 third_bd = (struct eth_tx_3rd_bd *) 3688 ecore_chain_produce(&txq->tx_pbl); 3689 memset(third_bd, 0, sizeof(*third_bd)); 3690 } 3691 3692 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3693 third_bd->data.bitfields |= 3694 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3695 } 3696 fp->tx_tso_pkts++; 3697 } else { 3698 segs++; 3699 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3700 tx_data_bd = (struct eth_tx_bd *) 3701 ecore_chain_produce(&txq->tx_pbl); 3702 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3703 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\ 3704 segs->ds_len); 3705 segs++; 3706 nbd++; 3707 } 3708 first_bd->data.bitfields = 3709 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) 3710 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 3711 first_bd->data.bitfields = 3712 htole16(first_bd->data.bitfields); 3713 fp->tx_non_tso_pkts++; 3714 } 3715 3716 first_bd->data.nbds = nbd; 3717 3718 if (ha->dbg_trace_tso_pkt_len) { 3719 if (fp->tx_tso_max_nsegs < nsegs) 3720 fp->tx_tso_max_nsegs = nsegs; 3721 3722 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs)) 3723 fp->tx_tso_min_nsegs = nsegs; 3724 } 3725 3726 txq->sw_tx_ring[idx].nsegs = nsegs; 3727 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1); 3728 3729 txq->tx_db.data.bd_prod = 3730 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl)); 3731 3732 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw); 3733 3734 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id); 3735 return (0); 3736 } 3737 3738 static void 3739 qlnx_stop(qlnx_host_t *ha) 3740 { 3741 if_t ifp = ha->ifp; 3742 int i; 3743 3744 if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING)); 3745 3746 /* 3747 * We simply lock and unlock each fp->tx_mtx to 3748 * propagate the if_drv_flags 3749 * state to each tx thread 3750 */ 3751 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state); 3752 3753 if (ha->state == QLNX_STATE_OPEN) { 3754 for (i = 0; i < ha->num_rss; i++) { 3755 struct qlnx_fastpath *fp = &ha->fp_array[i]; 3756 3757 mtx_lock(&fp->tx_mtx); 3758 mtx_unlock(&fp->tx_mtx); 3759 3760 if (fp->fp_taskqueue != NULL) 3761 taskqueue_enqueue(fp->fp_taskqueue, 3762 &fp->fp_task); 3763 } 3764 } 3765 #ifdef QLNX_ENABLE_IWARP 3766 if (qlnx_vf_device(ha) != 0) { 3767 qlnx_rdma_dev_close(ha); 3768 } 3769 #endif /* #ifdef QLNX_ENABLE_IWARP */ 3770 3771 qlnx_unload(ha); 3772 3773 return; 3774 } 3775 3776 static int 3777 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha) 3778 { 3779 return(TX_RING_SIZE - 1); 3780 } 3781 3782 uint8_t * 3783 qlnx_get_mac_addr(qlnx_host_t *ha) 3784 { 3785 struct ecore_hwfn *p_hwfn; 3786 unsigned char mac[ETHER_ADDR_LEN]; 3787 uint8_t p_is_forced; 3788 3789 p_hwfn = &ha->cdev.hwfns[0]; 3790 3791 if (qlnx_vf_device(ha) != 0) 3792 return (p_hwfn->hw_info.hw_mac_addr); 3793 3794 ecore_vf_read_bulletin(p_hwfn, &p_is_forced); 3795 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) == 3796 true) { 3797 device_printf(ha->pci_dev, "%s: p_is_forced = %d" 3798 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, 3799 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3800 memcpy(ha->primary_mac, mac, ETH_ALEN); 3801 } 3802 3803 return (ha->primary_mac); 3804 } 3805 3806 static uint32_t 3807 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link) 3808 { 3809 uint32_t ifm_type = 0; 3810 3811 switch (if_link->media_type) { 3812 case MEDIA_MODULE_FIBER: 3813 case MEDIA_UNSPECIFIED: 3814 if (if_link->speed == (100 * 1000)) 3815 ifm_type = QLNX_IFM_100G_SR4; 3816 else if (if_link->speed == (40 * 1000)) 3817 ifm_type = IFM_40G_SR4; 3818 else if (if_link->speed == (25 * 1000)) 3819 ifm_type = QLNX_IFM_25G_SR; 3820 else if (if_link->speed == (10 * 1000)) 3821 ifm_type = (IFM_10G_LR | IFM_10G_SR); 3822 else if (if_link->speed == (1 * 1000)) 3823 ifm_type = (IFM_1000_SX | IFM_1000_LX); 3824 3825 break; 3826 3827 case MEDIA_DA_TWINAX: 3828 if (if_link->speed == (100 * 1000)) 3829 ifm_type = QLNX_IFM_100G_CR4; 3830 else if (if_link->speed == (40 * 1000)) 3831 ifm_type = IFM_40G_CR4; 3832 else if (if_link->speed == (25 * 1000)) 3833 ifm_type = QLNX_IFM_25G_CR; 3834 else if (if_link->speed == (10 * 1000)) 3835 ifm_type = IFM_10G_TWINAX; 3836 3837 break; 3838 3839 default : 3840 ifm_type = IFM_UNKNOWN; 3841 break; 3842 } 3843 return (ifm_type); 3844 } 3845 3846 /***************************************************************************** 3847 * Interrupt Service Functions 3848 *****************************************************************************/ 3849 3850 static int 3851 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3852 struct mbuf *mp_head, uint16_t len) 3853 { 3854 struct mbuf *mp, *mpf, *mpl; 3855 struct sw_rx_data *sw_rx_data; 3856 struct qlnx_rx_queue *rxq; 3857 uint16_t len_in_buffer; 3858 3859 rxq = fp->rxq; 3860 mpf = mpl = mp = NULL; 3861 3862 while (len) { 3863 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3864 3865 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3866 mp = sw_rx_data->data; 3867 3868 if (mp == NULL) { 3869 QL_DPRINT1(ha, "mp = NULL\n"); 3870 fp->err_rx_mp_null++; 3871 rxq->sw_rx_cons = 3872 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3873 3874 if (mpf != NULL) 3875 m_freem(mpf); 3876 3877 return (-1); 3878 } 3879 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3880 BUS_DMASYNC_POSTREAD); 3881 3882 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3883 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 3884 " incoming packet and reusing its buffer\n"); 3885 3886 qlnx_reuse_rx_data(rxq); 3887 fp->err_rx_alloc_errors++; 3888 3889 if (mpf != NULL) 3890 m_freem(mpf); 3891 3892 return (-1); 3893 } 3894 ecore_chain_consume(&rxq->rx_bd_ring); 3895 3896 if (len > rxq->rx_buf_size) 3897 len_in_buffer = rxq->rx_buf_size; 3898 else 3899 len_in_buffer = len; 3900 3901 len = len - len_in_buffer; 3902 3903 mp->m_flags &= ~M_PKTHDR; 3904 mp->m_next = NULL; 3905 mp->m_len = len_in_buffer; 3906 3907 if (mpf == NULL) 3908 mpf = mpl = mp; 3909 else { 3910 mpl->m_next = mp; 3911 mpl = mp; 3912 } 3913 } 3914 3915 if (mpf != NULL) 3916 mp_head->m_next = mpf; 3917 3918 return (0); 3919 } 3920 3921 static void 3922 qlnx_tpa_start(qlnx_host_t *ha, 3923 struct qlnx_fastpath *fp, 3924 struct qlnx_rx_queue *rxq, 3925 struct eth_fast_path_rx_tpa_start_cqe *cqe) 3926 { 3927 uint32_t agg_index; 3928 if_t ifp = ha->ifp; 3929 struct mbuf *mp; 3930 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3931 struct sw_rx_data *sw_rx_data; 3932 dma_addr_t addr; 3933 bus_dmamap_t map; 3934 struct eth_rx_bd *rx_bd; 3935 int i; 3936 uint8_t hash_type; 3937 3938 agg_index = cqe->tpa_agg_index; 3939 3940 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \ 3941 \t type = 0x%x\n \ 3942 \t bitfields = 0x%x\n \ 3943 \t seg_len = 0x%x\n \ 3944 \t pars_flags = 0x%x\n \ 3945 \t vlan_tag = 0x%x\n \ 3946 \t rss_hash = 0x%x\n \ 3947 \t len_on_first_bd = 0x%x\n \ 3948 \t placement_offset = 0x%x\n \ 3949 \t tpa_agg_index = 0x%x\n \ 3950 \t header_len = 0x%x\n \ 3951 \t ext_bd_len_list[0] = 0x%x\n \ 3952 \t ext_bd_len_list[1] = 0x%x\n \ 3953 \t ext_bd_len_list[2] = 0x%x\n \ 3954 \t ext_bd_len_list[3] = 0x%x\n \ 3955 \t ext_bd_len_list[4] = 0x%x\n", 3956 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len, 3957 cqe->pars_flags.flags, cqe->vlan_tag, 3958 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset, 3959 cqe->tpa_agg_index, cqe->header_len, 3960 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1], 3961 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3], 3962 cqe->ext_bd_len_list[4]); 3963 3964 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3965 fp->err_rx_tpa_invalid_agg_num++; 3966 return; 3967 } 3968 3969 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3970 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); 3971 mp = sw_rx_data->data; 3972 3973 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp); 3974 3975 if (mp == NULL) { 3976 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id); 3977 fp->err_rx_mp_null++; 3978 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3979 3980 return; 3981 } 3982 3983 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) { 3984 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error," 3985 " flags = %x, dropping incoming packet\n", fp->rss_id, 3986 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags)); 3987 3988 fp->err_rx_hw_errors++; 3989 3990 qlnx_reuse_rx_data(rxq); 3991 3992 QLNX_INC_IERRORS(ifp); 3993 3994 return; 3995 } 3996 3997 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3998 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 3999 " dropping incoming packet and reusing its buffer\n", 4000 fp->rss_id); 4001 4002 fp->err_rx_alloc_errors++; 4003 QLNX_INC_IQDROPS(ifp); 4004 4005 /* 4006 * Load the tpa mbuf into the rx ring and save the 4007 * posted mbuf 4008 */ 4009 4010 map = sw_rx_data->map; 4011 addr = sw_rx_data->dma_addr; 4012 4013 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 4014 4015 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data; 4016 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr; 4017 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map; 4018 4019 rxq->tpa_info[agg_index].rx_buf.data = mp; 4020 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr; 4021 rxq->tpa_info[agg_index].rx_buf.map = map; 4022 4023 rx_bd = (struct eth_rx_bd *) 4024 ecore_chain_produce(&rxq->rx_bd_ring); 4025 4026 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr)); 4027 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr)); 4028 4029 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4030 BUS_DMASYNC_PREREAD); 4031 4032 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 4033 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4034 4035 ecore_chain_consume(&rxq->rx_bd_ring); 4036 4037 /* Now reuse any buffers posted in ext_bd_len_list */ 4038 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4039 if (cqe->ext_bd_len_list[i] == 0) 4040 break; 4041 4042 qlnx_reuse_rx_data(rxq); 4043 } 4044 4045 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4046 return; 4047 } 4048 4049 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4050 QL_DPRINT7(ha, "[%d]: invalid aggregation state," 4051 " dropping incoming packet and reusing its buffer\n", 4052 fp->rss_id); 4053 4054 QLNX_INC_IQDROPS(ifp); 4055 4056 /* if we already have mbuf head in aggregation free it */ 4057 if (rxq->tpa_info[agg_index].mpf) { 4058 m_freem(rxq->tpa_info[agg_index].mpf); 4059 rxq->tpa_info[agg_index].mpl = NULL; 4060 } 4061 rxq->tpa_info[agg_index].mpf = mp; 4062 rxq->tpa_info[agg_index].mpl = NULL; 4063 4064 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4065 ecore_chain_consume(&rxq->rx_bd_ring); 4066 4067 /* Now reuse any buffers posted in ext_bd_len_list */ 4068 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4069 if (cqe->ext_bd_len_list[i] == 0) 4070 break; 4071 4072 qlnx_reuse_rx_data(rxq); 4073 } 4074 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4075 4076 return; 4077 } 4078 4079 /* 4080 * first process the ext_bd_len_list 4081 * if this fails then we simply drop the packet 4082 */ 4083 ecore_chain_consume(&rxq->rx_bd_ring); 4084 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4085 4086 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4087 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id); 4088 4089 if (cqe->ext_bd_len_list[i] == 0) 4090 break; 4091 4092 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4093 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4094 BUS_DMASYNC_POSTREAD); 4095 4096 mpc = sw_rx_data->data; 4097 4098 if (mpc == NULL) { 4099 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4100 fp->err_rx_mp_null++; 4101 if (mpf != NULL) 4102 m_freem(mpf); 4103 mpf = mpl = NULL; 4104 rxq->tpa_info[agg_index].agg_state = 4105 QLNX_AGG_STATE_ERROR; 4106 ecore_chain_consume(&rxq->rx_bd_ring); 4107 rxq->sw_rx_cons = 4108 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4109 continue; 4110 } 4111 4112 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4113 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4114 " dropping incoming packet and reusing its" 4115 " buffer\n", fp->rss_id); 4116 4117 qlnx_reuse_rx_data(rxq); 4118 4119 if (mpf != NULL) 4120 m_freem(mpf); 4121 mpf = mpl = NULL; 4122 4123 rxq->tpa_info[agg_index].agg_state = 4124 QLNX_AGG_STATE_ERROR; 4125 4126 ecore_chain_consume(&rxq->rx_bd_ring); 4127 rxq->sw_rx_cons = 4128 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4129 4130 continue; 4131 } 4132 4133 mpc->m_flags &= ~M_PKTHDR; 4134 mpc->m_next = NULL; 4135 mpc->m_len = cqe->ext_bd_len_list[i]; 4136 4137 if (mpf == NULL) { 4138 mpf = mpl = mpc; 4139 } else { 4140 mpl->m_len = ha->rx_buf_size; 4141 mpl->m_next = mpc; 4142 mpl = mpc; 4143 } 4144 4145 ecore_chain_consume(&rxq->rx_bd_ring); 4146 rxq->sw_rx_cons = 4147 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4148 } 4149 4150 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4151 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping" 4152 " incoming packet and reusing its buffer\n", 4153 fp->rss_id); 4154 4155 QLNX_INC_IQDROPS(ifp); 4156 4157 rxq->tpa_info[agg_index].mpf = mp; 4158 rxq->tpa_info[agg_index].mpl = NULL; 4159 4160 return; 4161 } 4162 4163 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset; 4164 4165 if (mpf != NULL) { 4166 mp->m_len = ha->rx_buf_size; 4167 mp->m_next = mpf; 4168 rxq->tpa_info[agg_index].mpf = mp; 4169 rxq->tpa_info[agg_index].mpl = mpl; 4170 } else { 4171 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset; 4172 rxq->tpa_info[agg_index].mpf = mp; 4173 rxq->tpa_info[agg_index].mpl = mp; 4174 mp->m_next = NULL; 4175 } 4176 4177 mp->m_flags |= M_PKTHDR; 4178 4179 /* assign packet to this interface interface */ 4180 mp->m_pkthdr.rcvif = ifp; 4181 4182 /* assume no hardware checksum has complated */ 4183 mp->m_pkthdr.csum_flags = 0; 4184 4185 //mp->m_pkthdr.flowid = fp->rss_id; 4186 mp->m_pkthdr.flowid = cqe->rss_hash; 4187 4188 hash_type = cqe->bitfields & 4189 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4190 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4191 4192 switch (hash_type) { 4193 case RSS_HASH_TYPE_IPV4: 4194 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4195 break; 4196 4197 case RSS_HASH_TYPE_TCP_IPV4: 4198 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4199 break; 4200 4201 case RSS_HASH_TYPE_IPV6: 4202 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4203 break; 4204 4205 case RSS_HASH_TYPE_TCP_IPV6: 4206 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4207 break; 4208 4209 default: 4210 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4211 break; 4212 } 4213 4214 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | 4215 CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4216 4217 mp->m_pkthdr.csum_data = 0xFFFF; 4218 4219 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) { 4220 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag); 4221 mp->m_flags |= M_VLANTAG; 4222 } 4223 4224 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START; 4225 4226 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n", 4227 fp->rss_id, rxq->tpa_info[agg_index].agg_state, 4228 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl); 4229 4230 return; 4231 } 4232 4233 static void 4234 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4235 struct qlnx_rx_queue *rxq, 4236 struct eth_fast_path_rx_tpa_cont_cqe *cqe) 4237 { 4238 struct sw_rx_data *sw_rx_data; 4239 int i; 4240 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4241 struct mbuf *mp; 4242 uint32_t agg_index; 4243 4244 QL_DPRINT7(ha, "[%d]: enter\n \ 4245 \t type = 0x%x\n \ 4246 \t tpa_agg_index = 0x%x\n \ 4247 \t len_list[0] = 0x%x\n \ 4248 \t len_list[1] = 0x%x\n \ 4249 \t len_list[2] = 0x%x\n \ 4250 \t len_list[3] = 0x%x\n \ 4251 \t len_list[4] = 0x%x\n \ 4252 \t len_list[5] = 0x%x\n", 4253 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4254 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4255 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]); 4256 4257 agg_index = cqe->tpa_agg_index; 4258 4259 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4260 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4261 fp->err_rx_tpa_invalid_agg_num++; 4262 return; 4263 } 4264 4265 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) { 4266 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4267 4268 if (cqe->len_list[i] == 0) 4269 break; 4270 4271 if (rxq->tpa_info[agg_index].agg_state != 4272 QLNX_AGG_STATE_START) { 4273 qlnx_reuse_rx_data(rxq); 4274 continue; 4275 } 4276 4277 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4278 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4279 BUS_DMASYNC_POSTREAD); 4280 4281 mpc = sw_rx_data->data; 4282 4283 if (mpc == NULL) { 4284 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4285 4286 fp->err_rx_mp_null++; 4287 if (mpf != NULL) 4288 m_freem(mpf); 4289 mpf = mpl = NULL; 4290 rxq->tpa_info[agg_index].agg_state = 4291 QLNX_AGG_STATE_ERROR; 4292 ecore_chain_consume(&rxq->rx_bd_ring); 4293 rxq->sw_rx_cons = 4294 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4295 continue; 4296 } 4297 4298 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4299 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4300 " dropping incoming packet and reusing its" 4301 " buffer\n", fp->rss_id); 4302 4303 qlnx_reuse_rx_data(rxq); 4304 4305 if (mpf != NULL) 4306 m_freem(mpf); 4307 mpf = mpl = NULL; 4308 4309 rxq->tpa_info[agg_index].agg_state = 4310 QLNX_AGG_STATE_ERROR; 4311 4312 ecore_chain_consume(&rxq->rx_bd_ring); 4313 rxq->sw_rx_cons = 4314 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4315 4316 continue; 4317 } 4318 4319 mpc->m_flags &= ~M_PKTHDR; 4320 mpc->m_next = NULL; 4321 mpc->m_len = cqe->len_list[i]; 4322 4323 if (mpf == NULL) { 4324 mpf = mpl = mpc; 4325 } else { 4326 mpl->m_len = ha->rx_buf_size; 4327 mpl->m_next = mpc; 4328 mpl = mpc; 4329 } 4330 4331 ecore_chain_consume(&rxq->rx_bd_ring); 4332 rxq->sw_rx_cons = 4333 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4334 } 4335 4336 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n", 4337 fp->rss_id, mpf, mpl); 4338 4339 if (mpf != NULL) { 4340 mp = rxq->tpa_info[agg_index].mpl; 4341 mp->m_len = ha->rx_buf_size; 4342 mp->m_next = mpf; 4343 rxq->tpa_info[agg_index].mpl = mpl; 4344 } 4345 4346 return; 4347 } 4348 4349 static int 4350 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4351 struct qlnx_rx_queue *rxq, 4352 struct eth_fast_path_rx_tpa_end_cqe *cqe) 4353 { 4354 struct sw_rx_data *sw_rx_data; 4355 int i; 4356 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4357 struct mbuf *mp; 4358 uint32_t agg_index; 4359 uint32_t len = 0; 4360 if_t ifp = ha->ifp; 4361 4362 QL_DPRINT7(ha, "[%d]: enter\n \ 4363 \t type = 0x%x\n \ 4364 \t tpa_agg_index = 0x%x\n \ 4365 \t total_packet_len = 0x%x\n \ 4366 \t num_of_bds = 0x%x\n \ 4367 \t end_reason = 0x%x\n \ 4368 \t num_of_coalesced_segs = 0x%x\n \ 4369 \t ts_delta = 0x%x\n \ 4370 \t len_list[0] = 0x%x\n \ 4371 \t len_list[1] = 0x%x\n \ 4372 \t len_list[2] = 0x%x\n \ 4373 \t len_list[3] = 0x%x\n", 4374 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4375 cqe->total_packet_len, cqe->num_of_bds, 4376 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta, 4377 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4378 cqe->len_list[3]); 4379 4380 agg_index = cqe->tpa_agg_index; 4381 4382 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4383 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4384 4385 fp->err_rx_tpa_invalid_agg_num++; 4386 return (0); 4387 } 4388 4389 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) { 4390 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4391 4392 if (cqe->len_list[i] == 0) 4393 break; 4394 4395 if (rxq->tpa_info[agg_index].agg_state != 4396 QLNX_AGG_STATE_START) { 4397 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id); 4398 4399 qlnx_reuse_rx_data(rxq); 4400 continue; 4401 } 4402 4403 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4404 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4405 BUS_DMASYNC_POSTREAD); 4406 4407 mpc = sw_rx_data->data; 4408 4409 if (mpc == NULL) { 4410 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4411 4412 fp->err_rx_mp_null++; 4413 if (mpf != NULL) 4414 m_freem(mpf); 4415 mpf = mpl = NULL; 4416 rxq->tpa_info[agg_index].agg_state = 4417 QLNX_AGG_STATE_ERROR; 4418 ecore_chain_consume(&rxq->rx_bd_ring); 4419 rxq->sw_rx_cons = 4420 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4421 continue; 4422 } 4423 4424 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4425 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4426 " dropping incoming packet and reusing its" 4427 " buffer\n", fp->rss_id); 4428 4429 qlnx_reuse_rx_data(rxq); 4430 4431 if (mpf != NULL) 4432 m_freem(mpf); 4433 mpf = mpl = NULL; 4434 4435 rxq->tpa_info[agg_index].agg_state = 4436 QLNX_AGG_STATE_ERROR; 4437 4438 ecore_chain_consume(&rxq->rx_bd_ring); 4439 rxq->sw_rx_cons = 4440 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4441 4442 continue; 4443 } 4444 4445 mpc->m_flags &= ~M_PKTHDR; 4446 mpc->m_next = NULL; 4447 mpc->m_len = cqe->len_list[i]; 4448 4449 if (mpf == NULL) { 4450 mpf = mpl = mpc; 4451 } else { 4452 mpl->m_len = ha->rx_buf_size; 4453 mpl->m_next = mpc; 4454 mpl = mpc; 4455 } 4456 4457 ecore_chain_consume(&rxq->rx_bd_ring); 4458 rxq->sw_rx_cons = 4459 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4460 } 4461 4462 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id); 4463 4464 if (mpf != NULL) { 4465 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id); 4466 4467 mp = rxq->tpa_info[agg_index].mpl; 4468 mp->m_len = ha->rx_buf_size; 4469 mp->m_next = mpf; 4470 } 4471 4472 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { 4473 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id); 4474 4475 if (rxq->tpa_info[agg_index].mpf != NULL) 4476 m_freem(rxq->tpa_info[agg_index].mpf); 4477 rxq->tpa_info[agg_index].mpf = NULL; 4478 rxq->tpa_info[agg_index].mpl = NULL; 4479 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4480 return (0); 4481 } 4482 4483 mp = rxq->tpa_info[agg_index].mpf; 4484 m_adj(mp, rxq->tpa_info[agg_index].placement_offset); 4485 mp->m_pkthdr.len = cqe->total_packet_len; 4486 4487 if (mp->m_next == NULL) 4488 mp->m_len = mp->m_pkthdr.len; 4489 else { 4490 /* compute the total packet length */ 4491 mpf = mp; 4492 while (mpf != NULL) { 4493 len += mpf->m_len; 4494 mpf = mpf->m_next; 4495 } 4496 4497 if (cqe->total_packet_len > len) { 4498 mpl = rxq->tpa_info[agg_index].mpl; 4499 mpl->m_len += (cqe->total_packet_len - len); 4500 } 4501 } 4502 4503 QLNX_INC_IPACKETS(ifp); 4504 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len)); 4505 4506 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \ 4507 m_len = 0x%x m_pkthdr_len = 0x%x\n", 4508 fp->rss_id, mp->m_pkthdr.csum_data, 4509 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len); 4510 4511 if_input(ifp, mp); 4512 4513 rxq->tpa_info[agg_index].mpf = NULL; 4514 rxq->tpa_info[agg_index].mpl = NULL; 4515 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4516 4517 return (cqe->num_of_coalesced_segs); 4518 } 4519 4520 static int 4521 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 4522 int lro_enable) 4523 { 4524 uint16_t hw_comp_cons, sw_comp_cons; 4525 int rx_pkt = 0; 4526 struct qlnx_rx_queue *rxq = fp->rxq; 4527 if_t ifp = ha->ifp; 4528 struct ecore_dev *cdev = &ha->cdev; 4529 struct ecore_hwfn *p_hwfn; 4530 4531 #ifdef QLNX_SOFT_LRO 4532 struct lro_ctrl *lro; 4533 4534 lro = &rxq->lro; 4535 #endif /* #ifdef QLNX_SOFT_LRO */ 4536 4537 hw_comp_cons = le16toh(*rxq->hw_cons_ptr); 4538 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4539 4540 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)]; 4541 4542 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 4543 * / BD in the while-loop before reading hw_comp_cons. If the CQE is 4544 * read before it is written by FW, then FW writes CQE and SB, and then 4545 * the CPU reads the hw_comp_cons, it will use an old CQE. 4546 */ 4547 4548 /* Loop to complete all indicated BDs */ 4549 while (sw_comp_cons != hw_comp_cons) { 4550 union eth_rx_cqe *cqe; 4551 struct eth_fast_path_rx_reg_cqe *fp_cqe; 4552 struct sw_rx_data *sw_rx_data; 4553 register struct mbuf *mp; 4554 enum eth_rx_cqe_type cqe_type; 4555 uint16_t len, pad, len_on_first_bd; 4556 uint8_t *data; 4557 uint8_t hash_type; 4558 4559 /* Get the CQE from the completion ring */ 4560 cqe = (union eth_rx_cqe *) 4561 ecore_chain_consume(&rxq->rx_comp_ring); 4562 cqe_type = cqe->fast_path_regular.type; 4563 4564 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { 4565 QL_DPRINT3(ha, "Got a slowath CQE\n"); 4566 4567 ecore_eth_cqe_completion(p_hwfn, 4568 (struct eth_slow_path_rx_cqe *)cqe); 4569 goto next_cqe; 4570 } 4571 4572 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { 4573 switch (cqe_type) { 4574 case ETH_RX_CQE_TYPE_TPA_START: 4575 qlnx_tpa_start(ha, fp, rxq, 4576 &cqe->fast_path_tpa_start); 4577 fp->tpa_start++; 4578 break; 4579 4580 case ETH_RX_CQE_TYPE_TPA_CONT: 4581 qlnx_tpa_cont(ha, fp, rxq, 4582 &cqe->fast_path_tpa_cont); 4583 fp->tpa_cont++; 4584 break; 4585 4586 case ETH_RX_CQE_TYPE_TPA_END: 4587 rx_pkt += qlnx_tpa_end(ha, fp, rxq, 4588 &cqe->fast_path_tpa_end); 4589 fp->tpa_end++; 4590 break; 4591 4592 default: 4593 break; 4594 } 4595 4596 goto next_cqe; 4597 } 4598 4599 /* Get the data from the SW ring */ 4600 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4601 mp = sw_rx_data->data; 4602 4603 if (mp == NULL) { 4604 QL_DPRINT1(ha, "mp = NULL\n"); 4605 fp->err_rx_mp_null++; 4606 rxq->sw_rx_cons = 4607 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4608 goto next_cqe; 4609 } 4610 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4611 BUS_DMASYNC_POSTREAD); 4612 4613 /* non GRO */ 4614 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */ 4615 len = le16toh(fp_cqe->pkt_len); 4616 pad = fp_cqe->placement_offset; 4617 #if 0 4618 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x," 4619 " len %u, parsing flags = %d pad = %d\n", 4620 cqe_type, fp_cqe->bitfields, 4621 le16toh(fp_cqe->vlan_tag), 4622 len, le16toh(fp_cqe->pars_flags.flags), pad); 4623 #endif 4624 data = mtod(mp, uint8_t *); 4625 data = data + pad; 4626 4627 if (0) 4628 qlnx_dump_buf8(ha, __func__, data, len); 4629 4630 /* For every Rx BD consumed, we allocate a new BD so the BD ring 4631 * is always with a fixed size. If allocation fails, we take the 4632 * consumed BD and return it to the ring in the PROD position. 4633 * The packet that was received on that BD will be dropped (and 4634 * not passed to the upper stack). 4635 */ 4636 /* If this is an error packet then drop it */ 4637 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) & 4638 CQE_FLAGS_ERR) { 4639 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x," 4640 " dropping incoming packet\n", sw_comp_cons, 4641 le16toh(cqe->fast_path_regular.pars_flags.flags)); 4642 fp->err_rx_hw_errors++; 4643 4644 qlnx_reuse_rx_data(rxq); 4645 4646 QLNX_INC_IERRORS(ifp); 4647 4648 goto next_cqe; 4649 } 4650 4651 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4652 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 4653 " incoming packet and reusing its buffer\n"); 4654 qlnx_reuse_rx_data(rxq); 4655 4656 fp->err_rx_alloc_errors++; 4657 4658 QLNX_INC_IQDROPS(ifp); 4659 4660 goto next_cqe; 4661 } 4662 4663 ecore_chain_consume(&rxq->rx_bd_ring); 4664 4665 len_on_first_bd = fp_cqe->len_on_first_bd; 4666 m_adj(mp, pad); 4667 mp->m_pkthdr.len = len; 4668 4669 if ((len > 60 ) && (len > len_on_first_bd)) { 4670 mp->m_len = len_on_first_bd; 4671 4672 if (qlnx_rx_jumbo_chain(ha, fp, mp, 4673 (len - len_on_first_bd)) != 0) { 4674 m_freem(mp); 4675 4676 QLNX_INC_IQDROPS(ifp); 4677 4678 goto next_cqe; 4679 } 4680 4681 } else if (len_on_first_bd < len) { 4682 fp->err_rx_jumbo_chain_pkts++; 4683 } else { 4684 mp->m_len = len; 4685 } 4686 4687 mp->m_flags |= M_PKTHDR; 4688 4689 /* assign packet to this interface interface */ 4690 mp->m_pkthdr.rcvif = ifp; 4691 4692 /* assume no hardware checksum has complated */ 4693 mp->m_pkthdr.csum_flags = 0; 4694 4695 mp->m_pkthdr.flowid = fp_cqe->rss_hash; 4696 4697 hash_type = fp_cqe->bitfields & 4698 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4699 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4700 4701 switch (hash_type) { 4702 case RSS_HASH_TYPE_IPV4: 4703 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4704 break; 4705 4706 case RSS_HASH_TYPE_TCP_IPV4: 4707 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4708 break; 4709 4710 case RSS_HASH_TYPE_IPV6: 4711 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4712 break; 4713 4714 case RSS_HASH_TYPE_TCP_IPV6: 4715 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4716 break; 4717 4718 default: 4719 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4720 break; 4721 } 4722 4723 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) { 4724 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4725 } 4726 4727 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) { 4728 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4729 } 4730 4731 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) { 4732 mp->m_pkthdr.csum_data = 0xFFFF; 4733 mp->m_pkthdr.csum_flags |= 4734 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4735 } 4736 4737 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) { 4738 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag); 4739 mp->m_flags |= M_VLANTAG; 4740 } 4741 4742 QLNX_INC_IPACKETS(ifp); 4743 QLNX_INC_IBYTES(ifp, len); 4744 4745 #ifdef QLNX_SOFT_LRO 4746 if (lro_enable) 4747 tcp_lro_queue_mbuf(lro, mp); 4748 else 4749 if_input(ifp, mp); 4750 #else 4751 4752 if_input(ifp, mp); 4753 4754 #endif /* #ifdef QLNX_SOFT_LRO */ 4755 4756 rx_pkt++; 4757 4758 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4759 4760 next_cqe: /* don't consume bd rx buffer */ 4761 ecore_chain_recycle_consumed(&rxq->rx_comp_ring); 4762 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4763 4764 /* CR TPA - revisit how to handle budget in TPA perhaps 4765 increase on "end" */ 4766 if (rx_pkt == budget) 4767 break; 4768 } /* repeat while sw_comp_cons != hw_comp_cons... */ 4769 4770 /* Update producers */ 4771 qlnx_update_rx_prod(p_hwfn, rxq); 4772 4773 return rx_pkt; 4774 } 4775 4776 /* 4777 * fast path interrupt 4778 */ 4779 4780 static void 4781 qlnx_fp_isr(void *arg) 4782 { 4783 qlnx_ivec_t *ivec = arg; 4784 qlnx_host_t *ha; 4785 struct qlnx_fastpath *fp = NULL; 4786 int idx; 4787 4788 ha = ivec->ha; 4789 4790 if (ha->state != QLNX_STATE_OPEN) { 4791 return; 4792 } 4793 4794 idx = ivec->rss_idx; 4795 4796 if ((idx = ivec->rss_idx) >= ha->num_rss) { 4797 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx); 4798 ha->err_illegal_intr++; 4799 return; 4800 } 4801 fp = &ha->fp_array[idx]; 4802 4803 if (fp == NULL) { 4804 ha->err_fp_null++; 4805 } else { 4806 int rx_int = 0; 4807 #ifdef QLNX_SOFT_LRO 4808 int total_rx_count = 0; 4809 #endif 4810 int lro_enable, tc; 4811 struct qlnx_tx_queue *txq; 4812 uint16_t elem_left; 4813 4814 lro_enable = if_getcapenable(ha->ifp) & IFCAP_LRO; 4815 4816 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); 4817 4818 do { 4819 for (tc = 0; tc < ha->num_tc; tc++) { 4820 txq = fp->txq[tc]; 4821 4822 if((int)(elem_left = 4823 ecore_chain_get_elem_left(&txq->tx_pbl)) < 4824 QLNX_TX_ELEM_THRESH) { 4825 if (mtx_trylock(&fp->tx_mtx)) { 4826 #ifdef QLNX_TRACE_PERF_DATA 4827 tx_compl = fp->tx_pkts_completed; 4828 #endif 4829 4830 qlnx_tx_int(ha, fp, fp->txq[tc]); 4831 #ifdef QLNX_TRACE_PERF_DATA 4832 fp->tx_pkts_compl_intr += 4833 (fp->tx_pkts_completed - tx_compl); 4834 if ((fp->tx_pkts_completed - tx_compl) <= 32) 4835 fp->tx_comInt[0]++; 4836 else if (((fp->tx_pkts_completed - tx_compl) > 32) && 4837 ((fp->tx_pkts_completed - tx_compl) <= 64)) 4838 fp->tx_comInt[1]++; 4839 else if(((fp->tx_pkts_completed - tx_compl) > 64) && 4840 ((fp->tx_pkts_completed - tx_compl) <= 128)) 4841 fp->tx_comInt[2]++; 4842 else if(((fp->tx_pkts_completed - tx_compl) > 128)) 4843 fp->tx_comInt[3]++; 4844 #endif 4845 mtx_unlock(&fp->tx_mtx); 4846 } 4847 } 4848 } 4849 4850 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, 4851 lro_enable); 4852 4853 if (rx_int) { 4854 fp->rx_pkts += rx_int; 4855 #ifdef QLNX_SOFT_LRO 4856 total_rx_count += rx_int; 4857 #endif 4858 } 4859 4860 } while (rx_int); 4861 4862 #ifdef QLNX_SOFT_LRO 4863 { 4864 struct lro_ctrl *lro; 4865 4866 lro = &fp->rxq->lro; 4867 4868 if (lro_enable && total_rx_count) { 4869 4870 #ifdef QLNX_TRACE_LRO_CNT 4871 if (lro->lro_mbuf_count & ~1023) 4872 fp->lro_cnt_1024++; 4873 else if (lro->lro_mbuf_count & ~511) 4874 fp->lro_cnt_512++; 4875 else if (lro->lro_mbuf_count & ~255) 4876 fp->lro_cnt_256++; 4877 else if (lro->lro_mbuf_count & ~127) 4878 fp->lro_cnt_128++; 4879 else if (lro->lro_mbuf_count & ~63) 4880 fp->lro_cnt_64++; 4881 #endif /* #ifdef QLNX_TRACE_LRO_CNT */ 4882 4883 tcp_lro_flush_all(lro); 4884 } 4885 } 4886 #endif /* #ifdef QLNX_SOFT_LRO */ 4887 4888 ecore_sb_update_sb_idx(fp->sb_info); 4889 rmb(); 4890 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 4891 } 4892 4893 return; 4894 } 4895 4896 /* 4897 * slow path interrupt processing function 4898 * can be invoked in polled mode or in interrupt mode via taskqueue. 4899 */ 4900 void 4901 qlnx_sp_isr(void *arg) 4902 { 4903 struct ecore_hwfn *p_hwfn; 4904 qlnx_host_t *ha; 4905 4906 p_hwfn = arg; 4907 4908 ha = (qlnx_host_t *)p_hwfn->p_dev; 4909 4910 ha->sp_interrupts++; 4911 4912 QL_DPRINT2(ha, "enter\n"); 4913 4914 ecore_int_sp_dpc(p_hwfn); 4915 4916 QL_DPRINT2(ha, "exit\n"); 4917 4918 return; 4919 } 4920 4921 /***************************************************************************** 4922 * Support Functions for DMA'able Memory 4923 *****************************************************************************/ 4924 4925 static void 4926 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 4927 { 4928 *((bus_addr_t *)arg) = 0; 4929 4930 if (error) { 4931 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 4932 return; 4933 } 4934 4935 *((bus_addr_t *)arg) = segs[0].ds_addr; 4936 4937 return; 4938 } 4939 4940 static int 4941 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 4942 { 4943 int ret = 0; 4944 bus_addr_t b_addr; 4945 4946 ret = bus_dma_tag_create( 4947 ha->parent_tag,/* parent */ 4948 dma_buf->alignment, 4949 ((bus_size_t)(1ULL << 32)),/* boundary */ 4950 BUS_SPACE_MAXADDR, /* lowaddr */ 4951 BUS_SPACE_MAXADDR, /* highaddr */ 4952 NULL, NULL, /* filter, filterarg */ 4953 dma_buf->size, /* maxsize */ 4954 1, /* nsegments */ 4955 dma_buf->size, /* maxsegsize */ 4956 0, /* flags */ 4957 NULL, NULL, /* lockfunc, lockarg */ 4958 &dma_buf->dma_tag); 4959 4960 if (ret) { 4961 QL_DPRINT1(ha, "could not create dma tag\n"); 4962 goto qlnx_alloc_dmabuf_exit; 4963 } 4964 ret = bus_dmamem_alloc(dma_buf->dma_tag, 4965 (void **)&dma_buf->dma_b, 4966 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 4967 &dma_buf->dma_map); 4968 if (ret) { 4969 bus_dma_tag_destroy(dma_buf->dma_tag); 4970 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n"); 4971 goto qlnx_alloc_dmabuf_exit; 4972 } 4973 4974 ret = bus_dmamap_load(dma_buf->dma_tag, 4975 dma_buf->dma_map, 4976 dma_buf->dma_b, 4977 dma_buf->size, 4978 qlnx_dmamap_callback, 4979 &b_addr, BUS_DMA_NOWAIT); 4980 4981 if (ret || !b_addr) { 4982 bus_dma_tag_destroy(dma_buf->dma_tag); 4983 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 4984 dma_buf->dma_map); 4985 ret = -1; 4986 goto qlnx_alloc_dmabuf_exit; 4987 } 4988 4989 dma_buf->dma_addr = b_addr; 4990 4991 qlnx_alloc_dmabuf_exit: 4992 4993 return ret; 4994 } 4995 4996 static void 4997 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 4998 { 4999 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 5000 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 5001 bus_dma_tag_destroy(dma_buf->dma_tag); 5002 return; 5003 } 5004 5005 void * 5006 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size) 5007 { 5008 qlnx_dma_t dma_buf; 5009 qlnx_dma_t *dma_p; 5010 qlnx_host_t *ha __unused; 5011 5012 ha = (qlnx_host_t *)ecore_dev; 5013 5014 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5015 5016 memset(&dma_buf, 0, sizeof (qlnx_dma_t)); 5017 5018 dma_buf.size = size + PAGE_SIZE; 5019 dma_buf.alignment = 8; 5020 5021 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0) 5022 return (NULL); 5023 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size); 5024 5025 *phys = dma_buf.dma_addr; 5026 5027 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size); 5028 5029 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t)); 5030 5031 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5032 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag, 5033 dma_buf.dma_b, (void *)dma_buf.dma_addr, size); 5034 5035 return (dma_buf.dma_b); 5036 } 5037 5038 void 5039 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys, 5040 uint32_t size) 5041 { 5042 qlnx_dma_t dma_buf, *dma_p; 5043 qlnx_host_t *ha; 5044 5045 ha = (qlnx_host_t *)ecore_dev; 5046 5047 if (v_addr == NULL) 5048 return; 5049 5050 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5051 5052 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size); 5053 5054 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5055 (void *)dma_p->dma_map, (void *)dma_p->dma_tag, 5056 dma_p->dma_b, (void *)dma_p->dma_addr, size); 5057 5058 dma_buf = *dma_p; 5059 5060 if (!ha->qlnxr_debug) 5061 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf); 5062 return; 5063 } 5064 5065 static int 5066 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha) 5067 { 5068 int ret; 5069 device_t dev; 5070 5071 dev = ha->pci_dev; 5072 5073 /* 5074 * Allocate parent DMA Tag 5075 */ 5076 ret = bus_dma_tag_create( 5077 bus_get_dma_tag(dev), /* parent */ 5078 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 5079 BUS_SPACE_MAXADDR, /* lowaddr */ 5080 BUS_SPACE_MAXADDR, /* highaddr */ 5081 NULL, NULL, /* filter, filterarg */ 5082 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 5083 0, /* nsegments */ 5084 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 5085 0, /* flags */ 5086 NULL, NULL, /* lockfunc, lockarg */ 5087 &ha->parent_tag); 5088 5089 if (ret) { 5090 QL_DPRINT1(ha, "could not create parent dma tag\n"); 5091 return (-1); 5092 } 5093 5094 ha->flags.parent_tag = 1; 5095 5096 return (0); 5097 } 5098 5099 static void 5100 qlnx_free_parent_dma_tag(qlnx_host_t *ha) 5101 { 5102 if (ha->parent_tag != NULL) { 5103 bus_dma_tag_destroy(ha->parent_tag); 5104 ha->parent_tag = NULL; 5105 } 5106 return; 5107 } 5108 5109 static int 5110 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha) 5111 { 5112 if (bus_dma_tag_create(NULL, /* parent */ 5113 1, 0, /* alignment, bounds */ 5114 BUS_SPACE_MAXADDR, /* lowaddr */ 5115 BUS_SPACE_MAXADDR, /* highaddr */ 5116 NULL, NULL, /* filter, filterarg */ 5117 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */ 5118 QLNX_MAX_SEGMENTS, /* nsegments */ 5119 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */ 5120 0, /* flags */ 5121 NULL, /* lockfunc */ 5122 NULL, /* lockfuncarg */ 5123 &ha->tx_tag)) { 5124 QL_DPRINT1(ha, "tx_tag alloc failed\n"); 5125 return (-1); 5126 } 5127 5128 return (0); 5129 } 5130 5131 static void 5132 qlnx_free_tx_dma_tag(qlnx_host_t *ha) 5133 { 5134 if (ha->tx_tag != NULL) { 5135 bus_dma_tag_destroy(ha->tx_tag); 5136 ha->tx_tag = NULL; 5137 } 5138 return; 5139 } 5140 5141 static int 5142 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha) 5143 { 5144 if (bus_dma_tag_create(NULL, /* parent */ 5145 1, 0, /* alignment, bounds */ 5146 BUS_SPACE_MAXADDR, /* lowaddr */ 5147 BUS_SPACE_MAXADDR, /* highaddr */ 5148 NULL, NULL, /* filter, filterarg */ 5149 MJUM9BYTES, /* maxsize */ 5150 1, /* nsegments */ 5151 MJUM9BYTES, /* maxsegsize */ 5152 0, /* flags */ 5153 NULL, /* lockfunc */ 5154 NULL, /* lockfuncarg */ 5155 &ha->rx_tag)) { 5156 QL_DPRINT1(ha, " rx_tag alloc failed\n"); 5157 5158 return (-1); 5159 } 5160 return (0); 5161 } 5162 5163 static void 5164 qlnx_free_rx_dma_tag(qlnx_host_t *ha) 5165 { 5166 if (ha->rx_tag != NULL) { 5167 bus_dma_tag_destroy(ha->rx_tag); 5168 ha->rx_tag = NULL; 5169 } 5170 return; 5171 } 5172 5173 /********************************* 5174 * Exported functions 5175 *********************************/ 5176 uint32_t 5177 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id) 5178 { 5179 uint32_t bar_size; 5180 5181 bar_id = bar_id * 2; 5182 5183 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev, 5184 SYS_RES_MEMORY, 5185 PCIR_BAR(bar_id)); 5186 5187 return (bar_size); 5188 } 5189 5190 uint32_t 5191 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value) 5192 { 5193 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5194 pci_reg, 1); 5195 return 0; 5196 } 5197 5198 uint32_t 5199 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg, 5200 uint16_t *reg_value) 5201 { 5202 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5203 pci_reg, 2); 5204 return 0; 5205 } 5206 5207 uint32_t 5208 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg, 5209 uint32_t *reg_value) 5210 { 5211 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5212 pci_reg, 4); 5213 return 0; 5214 } 5215 5216 void 5217 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value) 5218 { 5219 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5220 pci_reg, reg_value, 1); 5221 return; 5222 } 5223 5224 void 5225 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg, 5226 uint16_t reg_value) 5227 { 5228 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5229 pci_reg, reg_value, 2); 5230 return; 5231 } 5232 5233 void 5234 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg, 5235 uint32_t reg_value) 5236 { 5237 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5238 pci_reg, reg_value, 4); 5239 return; 5240 } 5241 5242 int 5243 qlnx_pci_find_capability(void *ecore_dev, int cap) 5244 { 5245 int reg; 5246 qlnx_host_t *ha; 5247 5248 ha = ecore_dev; 5249 5250 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0) 5251 return reg; 5252 else { 5253 QL_DPRINT1(ha, "failed\n"); 5254 return 0; 5255 } 5256 } 5257 5258 int 5259 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap) 5260 { 5261 int reg; 5262 qlnx_host_t *ha; 5263 5264 ha = ecore_dev; 5265 5266 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0) 5267 return reg; 5268 else { 5269 QL_DPRINT1(ha, "failed\n"); 5270 return 0; 5271 } 5272 } 5273 5274 uint32_t 5275 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr) 5276 { 5277 uint32_t data32; 5278 struct ecore_hwfn *p_hwfn; 5279 5280 p_hwfn = hwfn; 5281 5282 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5283 (bus_size_t)(p_hwfn->reg_offset + reg_addr)); 5284 5285 return (data32); 5286 } 5287 5288 void 5289 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5290 { 5291 struct ecore_hwfn *p_hwfn = hwfn; 5292 5293 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5294 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5295 5296 return; 5297 } 5298 5299 void 5300 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value) 5301 { 5302 struct ecore_hwfn *p_hwfn = hwfn; 5303 5304 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5305 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5306 return; 5307 } 5308 5309 void 5310 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value) 5311 { 5312 struct ecore_dev *cdev; 5313 struct ecore_hwfn *p_hwfn; 5314 uint32_t offset; 5315 5316 p_hwfn = hwfn; 5317 5318 cdev = p_hwfn->p_dev; 5319 5320 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells)); 5321 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value); 5322 5323 return; 5324 } 5325 5326 void 5327 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5328 { 5329 struct ecore_hwfn *p_hwfn = hwfn; 5330 5331 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \ 5332 (bus_size_t)(p_hwfn->db_offset + reg_addr), value); 5333 5334 return; 5335 } 5336 5337 uint32_t 5338 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr) 5339 { 5340 uint32_t data32; 5341 bus_size_t offset; 5342 struct ecore_dev *cdev; 5343 5344 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5345 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5346 5347 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset); 5348 5349 return (data32); 5350 } 5351 5352 void 5353 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value) 5354 { 5355 bus_size_t offset; 5356 struct ecore_dev *cdev; 5357 5358 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5359 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5360 5361 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5362 5363 return; 5364 } 5365 5366 void 5367 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value) 5368 { 5369 bus_size_t offset; 5370 struct ecore_dev *cdev; 5371 5372 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5373 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5374 5375 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5376 return; 5377 } 5378 5379 void * 5380 qlnx_zalloc(uint32_t size) 5381 { 5382 caddr_t va; 5383 5384 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT); 5385 bzero(va, size); 5386 return ((void *)va); 5387 } 5388 5389 void 5390 qlnx_barrier(void *p_hwfn) 5391 { 5392 qlnx_host_t *ha; 5393 5394 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5395 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE); 5396 } 5397 5398 void 5399 qlnx_link_update(void *p_hwfn) 5400 { 5401 qlnx_host_t *ha; 5402 int prev_link_state; 5403 5404 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5405 5406 qlnx_fill_link(ha, p_hwfn, &ha->if_link); 5407 5408 prev_link_state = ha->link_up; 5409 ha->link_up = ha->if_link.link_up; 5410 5411 if (prev_link_state != ha->link_up) { 5412 if (ha->link_up) { 5413 if_link_state_change(ha->ifp, LINK_STATE_UP); 5414 } else { 5415 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 5416 } 5417 } 5418 #ifndef QLNX_VF 5419 #ifdef CONFIG_ECORE_SRIOV 5420 5421 if (qlnx_vf_device(ha) != 0) { 5422 if (ha->sriov_initialized) 5423 qlnx_inform_vf_link_state(p_hwfn, ha); 5424 } 5425 5426 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 5427 #endif /* #ifdef QLNX_VF */ 5428 5429 return; 5430 } 5431 5432 static void 5433 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn, 5434 struct ecore_vf_acquire_sw_info *p_sw_info) 5435 { 5436 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) | 5437 (QLNX_VERSION_MINOR << 16) | 5438 QLNX_VERSION_BUILD; 5439 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD; 5440 5441 return; 5442 } 5443 5444 void 5445 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req, 5446 void *p_sw_info) 5447 { 5448 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info); 5449 5450 return; 5451 } 5452 5453 void 5454 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn, 5455 struct qlnx_link_output *if_link) 5456 { 5457 struct ecore_mcp_link_params link_params; 5458 struct ecore_mcp_link_state link_state; 5459 uint8_t p_change; 5460 struct ecore_ptt *p_ptt = NULL; 5461 5462 memset(if_link, 0, sizeof(*if_link)); 5463 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params)); 5464 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state)); 5465 5466 ha = (qlnx_host_t *)hwfn->p_dev; 5467 5468 /* Prepare source inputs */ 5469 /* we only deal with physical functions */ 5470 if (qlnx_vf_device(ha) != 0) { 5471 p_ptt = ecore_ptt_acquire(hwfn); 5472 5473 if (p_ptt == NULL) { 5474 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 5475 return; 5476 } 5477 5478 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type); 5479 ecore_ptt_release(hwfn, p_ptt); 5480 5481 memcpy(&link_params, ecore_mcp_get_link_params(hwfn), 5482 sizeof(link_params)); 5483 memcpy(&link_state, ecore_mcp_get_link_state(hwfn), 5484 sizeof(link_state)); 5485 } else { 5486 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type); 5487 ecore_vf_read_bulletin(hwfn, &p_change); 5488 ecore_vf_get_link_params(hwfn, &link_params); 5489 ecore_vf_get_link_state(hwfn, &link_state); 5490 } 5491 5492 /* Set the link parameters to pass to protocol driver */ 5493 if (link_state.link_up) { 5494 if_link->link_up = true; 5495 if_link->speed = link_state.speed; 5496 } 5497 5498 if_link->supported_caps = QLNX_LINK_CAP_FIBRE; 5499 5500 if (link_params.speed.autoneg) 5501 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg; 5502 5503 if (link_params.pause.autoneg || 5504 (link_params.pause.forced_rx && link_params.pause.forced_tx)) 5505 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause; 5506 5507 if (link_params.pause.autoneg || link_params.pause.forced_rx || 5508 link_params.pause.forced_tx) 5509 if_link->supported_caps |= QLNX_LINK_CAP_Pause; 5510 5511 if (link_params.speed.advertised_speeds & 5512 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 5513 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half | 5514 QLNX_LINK_CAP_1000baseT_Full; 5515 5516 if (link_params.speed.advertised_speeds & 5517 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 5518 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5519 5520 if (link_params.speed.advertised_speeds & 5521 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 5522 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5523 5524 if (link_params.speed.advertised_speeds & 5525 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) 5526 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5527 5528 if (link_params.speed.advertised_speeds & 5529 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 5530 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5531 5532 if (link_params.speed.advertised_speeds & 5533 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 5534 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5535 5536 if_link->advertised_caps = if_link->supported_caps; 5537 5538 if_link->autoneg = link_params.speed.autoneg; 5539 if_link->duplex = QLNX_LINK_DUPLEX; 5540 5541 /* Link partner capabilities */ 5542 5543 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD) 5544 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half; 5545 5546 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD) 5547 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full; 5548 5549 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G) 5550 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5551 5552 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G) 5553 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5554 5555 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G) 5556 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5557 5558 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G) 5559 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5560 5561 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G) 5562 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5563 5564 if (link_state.an_complete) 5565 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg; 5566 5567 if (link_state.partner_adv_pause) 5568 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause; 5569 5570 if ((link_state.partner_adv_pause == 5571 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) || 5572 (link_state.partner_adv_pause == 5573 ECORE_LINK_PARTNER_BOTH_PAUSE)) 5574 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause; 5575 5576 return; 5577 } 5578 5579 void 5580 qlnx_schedule_recovery(void *p_hwfn) 5581 { 5582 qlnx_host_t *ha; 5583 5584 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5585 5586 if (qlnx_vf_device(ha) != 0) { 5587 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 5588 } 5589 5590 return; 5591 } 5592 5593 static int 5594 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params) 5595 { 5596 int rc, i; 5597 5598 for (i = 0; i < cdev->num_hwfns; i++) { 5599 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 5600 p_hwfn->pf_params = *func_params; 5601 5602 #ifdef QLNX_ENABLE_IWARP 5603 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) { 5604 p_hwfn->using_ll2 = true; 5605 } 5606 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5607 } 5608 5609 rc = ecore_resc_alloc(cdev); 5610 if (rc) 5611 goto qlnx_nic_setup_exit; 5612 5613 ecore_resc_setup(cdev); 5614 5615 qlnx_nic_setup_exit: 5616 5617 return rc; 5618 } 5619 5620 static int 5621 qlnx_nic_start(struct ecore_dev *cdev) 5622 { 5623 int rc; 5624 struct ecore_hw_init_params params; 5625 5626 bzero(¶ms, sizeof (struct ecore_hw_init_params)); 5627 5628 params.p_tunn = NULL; 5629 params.b_hw_start = true; 5630 params.int_mode = cdev->int_mode; 5631 params.allow_npar_tx_switch = true; 5632 params.bin_fw_data = NULL; 5633 5634 rc = ecore_hw_init(cdev, ¶ms); 5635 if (rc) { 5636 ecore_resc_free(cdev); 5637 return rc; 5638 } 5639 5640 return 0; 5641 } 5642 5643 static int 5644 qlnx_slowpath_start(qlnx_host_t *ha) 5645 { 5646 struct ecore_dev *cdev; 5647 struct ecore_pf_params pf_params; 5648 int rc; 5649 5650 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 5651 pf_params.eth_pf_params.num_cons = 5652 (ha->num_rss) * (ha->num_tc + 1); 5653 5654 #ifdef QLNX_ENABLE_IWARP 5655 if (qlnx_vf_device(ha) != 0) { 5656 if(ha->personality == ECORE_PCI_ETH_IWARP) { 5657 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n"); 5658 pf_params.rdma_pf_params.num_qps = 1024; 5659 pf_params.rdma_pf_params.num_srqs = 1024; 5660 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5661 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP; 5662 } else if(ha->personality == ECORE_PCI_ETH_ROCE) { 5663 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n"); 5664 pf_params.rdma_pf_params.num_qps = 8192; 5665 pf_params.rdma_pf_params.num_srqs = 8192; 5666 //pf_params.rdma_pf_params.min_dpis = 0; 5667 pf_params.rdma_pf_params.min_dpis = 8; 5668 pf_params.rdma_pf_params.roce_edpm_mode = 0; 5669 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5670 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE; 5671 } 5672 } 5673 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5674 5675 cdev = &ha->cdev; 5676 5677 rc = qlnx_nic_setup(cdev, &pf_params); 5678 if (rc) 5679 goto qlnx_slowpath_start_exit; 5680 5681 cdev->int_mode = ECORE_INT_MODE_MSIX; 5682 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; 5683 5684 #ifdef QLNX_MAX_COALESCE 5685 cdev->rx_coalesce_usecs = 255; 5686 cdev->tx_coalesce_usecs = 255; 5687 #endif 5688 5689 rc = qlnx_nic_start(cdev); 5690 5691 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs; 5692 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs; 5693 5694 #ifdef QLNX_USER_LLDP 5695 (void)qlnx_set_lldp_tlvx(ha, NULL); 5696 #endif /* #ifdef QLNX_USER_LLDP */ 5697 5698 qlnx_slowpath_start_exit: 5699 5700 return (rc); 5701 } 5702 5703 static int 5704 qlnx_slowpath_stop(qlnx_host_t *ha) 5705 { 5706 struct ecore_dev *cdev; 5707 device_t dev = ha->pci_dev; 5708 int i; 5709 5710 cdev = &ha->cdev; 5711 5712 ecore_hw_stop(cdev); 5713 5714 for (i = 0; i < ha->cdev.num_hwfns; i++) { 5715 if (ha->sp_handle[i]) 5716 (void)bus_teardown_intr(dev, ha->sp_irq[i], 5717 ha->sp_handle[i]); 5718 5719 ha->sp_handle[i] = NULL; 5720 5721 if (ha->sp_irq[i]) 5722 (void) bus_release_resource(dev, SYS_RES_IRQ, 5723 ha->sp_irq_rid[i], ha->sp_irq[i]); 5724 ha->sp_irq[i] = NULL; 5725 } 5726 5727 ecore_resc_free(cdev); 5728 5729 return 0; 5730 } 5731 5732 static void 5733 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 5734 char ver_str[VER_SIZE]) 5735 { 5736 int i; 5737 5738 memcpy(cdev->name, name, NAME_SIZE); 5739 5740 for_each_hwfn(cdev, i) { 5741 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 5742 } 5743 5744 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD; 5745 5746 return ; 5747 } 5748 5749 void 5750 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats) 5751 { 5752 enum ecore_mcp_protocol_type type; 5753 union ecore_mcp_protocol_stats *stats; 5754 struct ecore_eth_stats eth_stats; 5755 qlnx_host_t *ha; 5756 5757 ha = cdev; 5758 stats = proto_stats; 5759 type = proto_type; 5760 5761 switch (type) { 5762 case ECORE_MCP_LAN_STATS: 5763 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats); 5764 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; 5765 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts; 5766 stats->lan_stats.fcs_err = -1; 5767 break; 5768 5769 default: 5770 ha->err_get_proto_invalid_type++; 5771 5772 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type); 5773 break; 5774 } 5775 return; 5776 } 5777 5778 static int 5779 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver) 5780 { 5781 struct ecore_hwfn *p_hwfn; 5782 struct ecore_ptt *p_ptt; 5783 5784 p_hwfn = &ha->cdev.hwfns[0]; 5785 p_ptt = ecore_ptt_acquire(p_hwfn); 5786 5787 if (p_ptt == NULL) { 5788 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 5789 return (-1); 5790 } 5791 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL); 5792 5793 ecore_ptt_release(p_hwfn, p_ptt); 5794 5795 return (0); 5796 } 5797 5798 static int 5799 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size) 5800 { 5801 struct ecore_hwfn *p_hwfn; 5802 struct ecore_ptt *p_ptt; 5803 5804 p_hwfn = &ha->cdev.hwfns[0]; 5805 p_ptt = ecore_ptt_acquire(p_hwfn); 5806 5807 if (p_ptt == NULL) { 5808 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n"); 5809 return (-1); 5810 } 5811 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size); 5812 5813 ecore_ptt_release(p_hwfn, p_ptt); 5814 5815 return (0); 5816 } 5817 5818 static int 5819 qlnx_alloc_mem_arrays(qlnx_host_t *ha) 5820 { 5821 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS)); 5822 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS)); 5823 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS)); 5824 5825 return 0; 5826 } 5827 5828 static void 5829 qlnx_init_fp(qlnx_host_t *ha) 5830 { 5831 int rss_id, txq_array_index, tc; 5832 5833 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 5834 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 5835 5836 fp->rss_id = rss_id; 5837 fp->edev = ha; 5838 fp->sb_info = &ha->sb_array[rss_id]; 5839 fp->rxq = &ha->rxq_array[rss_id]; 5840 fp->rxq->rxq_id = rss_id; 5841 5842 for (tc = 0; tc < ha->num_tc; tc++) { 5843 txq_array_index = tc * ha->num_rss + rss_id; 5844 fp->txq[tc] = &ha->txq_array[txq_array_index]; 5845 fp->txq[tc]->index = txq_array_index; 5846 } 5847 5848 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str, 5849 rss_id); 5850 5851 fp->tx_ring_full = 0; 5852 5853 /* reset all the statistics counters */ 5854 5855 fp->tx_pkts_processed = 0; 5856 fp->tx_pkts_freed = 0; 5857 fp->tx_pkts_transmitted = 0; 5858 fp->tx_pkts_completed = 0; 5859 5860 #ifdef QLNX_TRACE_PERF_DATA 5861 fp->tx_pkts_trans_ctx = 0; 5862 fp->tx_pkts_compl_ctx = 0; 5863 fp->tx_pkts_trans_fp = 0; 5864 fp->tx_pkts_compl_fp = 0; 5865 fp->tx_pkts_compl_intr = 0; 5866 #endif 5867 fp->tx_lso_wnd_min_len = 0; 5868 fp->tx_defrag = 0; 5869 fp->tx_nsegs_gt_elem_left = 0; 5870 fp->tx_tso_max_nsegs = 0; 5871 fp->tx_tso_min_nsegs = 0; 5872 fp->err_tx_nsegs_gt_elem_left = 0; 5873 fp->err_tx_dmamap_create = 0; 5874 fp->err_tx_defrag_dmamap_load = 0; 5875 fp->err_tx_non_tso_max_seg = 0; 5876 fp->err_tx_dmamap_load = 0; 5877 fp->err_tx_defrag = 0; 5878 fp->err_tx_free_pkt_null = 0; 5879 fp->err_tx_cons_idx_conflict = 0; 5880 5881 fp->rx_pkts = 0; 5882 fp->err_m_getcl = 0; 5883 fp->err_m_getjcl = 0; 5884 } 5885 return; 5886 } 5887 5888 void 5889 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info) 5890 { 5891 struct ecore_dev *cdev; 5892 5893 cdev = &ha->cdev; 5894 5895 if (sb_info->sb_virt) { 5896 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt), 5897 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt))); 5898 sb_info->sb_virt = NULL; 5899 } 5900 } 5901 5902 static int 5903 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info, 5904 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id) 5905 { 5906 struct ecore_hwfn *p_hwfn; 5907 int hwfn_index, rc; 5908 u16 rel_sb_id; 5909 5910 hwfn_index = sb_id % cdev->num_hwfns; 5911 p_hwfn = &cdev->hwfns[hwfn_index]; 5912 rel_sb_id = sb_id / cdev->num_hwfns; 5913 5914 QL_DPRINT2(((qlnx_host_t *)cdev), 5915 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \ 5916 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n", 5917 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info, 5918 sb_virt_addr, (void *)sb_phy_addr); 5919 5920 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, 5921 sb_virt_addr, sb_phy_addr, rel_sb_id); 5922 5923 return rc; 5924 } 5925 5926 /* This function allocates fast-path status block memory */ 5927 int 5928 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id) 5929 { 5930 struct status_block_e4 *sb_virt; 5931 bus_addr_t sb_phys; 5932 int rc; 5933 uint32_t size; 5934 struct ecore_dev *cdev; 5935 5936 cdev = &ha->cdev; 5937 5938 size = sizeof(*sb_virt); 5939 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size); 5940 5941 if (!sb_virt) { 5942 QL_DPRINT1(ha, "Status block allocation failed\n"); 5943 return -ENOMEM; 5944 } 5945 5946 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id); 5947 if (rc) { 5948 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size); 5949 } 5950 5951 return rc; 5952 } 5953 5954 static void 5955 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5956 { 5957 int i; 5958 struct sw_rx_data *rx_buf; 5959 5960 for (i = 0; i < rxq->num_rx_buffers; i++) { 5961 rx_buf = &rxq->sw_rx_ring[i]; 5962 5963 if (rx_buf->data != NULL) { 5964 if (rx_buf->map != NULL) { 5965 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 5966 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 5967 rx_buf->map = NULL; 5968 } 5969 m_freem(rx_buf->data); 5970 rx_buf->data = NULL; 5971 } 5972 } 5973 return; 5974 } 5975 5976 static void 5977 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5978 { 5979 struct ecore_dev *cdev; 5980 int i; 5981 5982 cdev = &ha->cdev; 5983 5984 qlnx_free_rx_buffers(ha, rxq); 5985 5986 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 5987 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]); 5988 if (rxq->tpa_info[i].mpf != NULL) 5989 m_freem(rxq->tpa_info[i].mpf); 5990 } 5991 5992 bzero((void *)&rxq->sw_rx_ring[0], 5993 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 5994 5995 /* Free the real RQ ring used by FW */ 5996 if (rxq->rx_bd_ring.p_virt_addr) { 5997 ecore_chain_free(cdev, &rxq->rx_bd_ring); 5998 rxq->rx_bd_ring.p_virt_addr = NULL; 5999 } 6000 6001 /* Free the real completion ring used by FW */ 6002 if (rxq->rx_comp_ring.p_virt_addr && 6003 rxq->rx_comp_ring.pbl_sp.p_virt_table) { 6004 ecore_chain_free(cdev, &rxq->rx_comp_ring); 6005 rxq->rx_comp_ring.p_virt_addr = NULL; 6006 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL; 6007 } 6008 6009 #ifdef QLNX_SOFT_LRO 6010 { 6011 struct lro_ctrl *lro; 6012 6013 lro = &rxq->lro; 6014 tcp_lro_free(lro); 6015 } 6016 #endif /* #ifdef QLNX_SOFT_LRO */ 6017 6018 return; 6019 } 6020 6021 static int 6022 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6023 { 6024 register struct mbuf *mp; 6025 uint16_t rx_buf_size; 6026 struct sw_rx_data *sw_rx_data; 6027 struct eth_rx_bd *rx_bd; 6028 dma_addr_t dma_addr; 6029 bus_dmamap_t map; 6030 bus_dma_segment_t segs[1]; 6031 int nsegs; 6032 int ret; 6033 6034 rx_buf_size = rxq->rx_buf_size; 6035 6036 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6037 6038 if (mp == NULL) { 6039 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6040 return -ENOMEM; 6041 } 6042 6043 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6044 6045 map = (bus_dmamap_t)0; 6046 6047 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6048 BUS_DMA_NOWAIT); 6049 dma_addr = segs[0].ds_addr; 6050 6051 if (ret || !dma_addr || (nsegs != 1)) { 6052 m_freem(mp); 6053 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6054 ret, (long long unsigned int)dma_addr, nsegs); 6055 return -ENOMEM; 6056 } 6057 6058 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6059 sw_rx_data->data = mp; 6060 sw_rx_data->dma_addr = dma_addr; 6061 sw_rx_data->map = map; 6062 6063 /* Advance PROD and get BD pointer */ 6064 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); 6065 rx_bd->addr.hi = htole32(U64_HI(dma_addr)); 6066 rx_bd->addr.lo = htole32(U64_LO(dma_addr)); 6067 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6068 6069 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6070 6071 return 0; 6072 } 6073 6074 static int 6075 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 6076 struct qlnx_agg_info *tpa) 6077 { 6078 struct mbuf *mp; 6079 dma_addr_t dma_addr; 6080 bus_dmamap_t map; 6081 bus_dma_segment_t segs[1]; 6082 int nsegs; 6083 int ret; 6084 struct sw_rx_data *rx_buf; 6085 6086 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6087 6088 if (mp == NULL) { 6089 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6090 return -ENOMEM; 6091 } 6092 6093 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6094 6095 map = (bus_dmamap_t)0; 6096 6097 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6098 BUS_DMA_NOWAIT); 6099 dma_addr = segs[0].ds_addr; 6100 6101 if (ret || !dma_addr || (nsegs != 1)) { 6102 m_freem(mp); 6103 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6104 ret, (long long unsigned int)dma_addr, nsegs); 6105 return -ENOMEM; 6106 } 6107 6108 rx_buf = &tpa->rx_buf; 6109 6110 memset(rx_buf, 0, sizeof (struct sw_rx_data)); 6111 6112 rx_buf->data = mp; 6113 rx_buf->dma_addr = dma_addr; 6114 rx_buf->map = map; 6115 6116 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6117 6118 return (0); 6119 } 6120 6121 static void 6122 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa) 6123 { 6124 struct sw_rx_data *rx_buf; 6125 6126 rx_buf = &tpa->rx_buf; 6127 6128 if (rx_buf->data != NULL) { 6129 if (rx_buf->map != NULL) { 6130 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 6131 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 6132 rx_buf->map = NULL; 6133 } 6134 m_freem(rx_buf->data); 6135 rx_buf->data = NULL; 6136 } 6137 return; 6138 } 6139 6140 /* This function allocates all memory needed per Rx queue */ 6141 static int 6142 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6143 { 6144 int i, rc, num_allocated; 6145 struct ecore_dev *cdev; 6146 6147 cdev = &ha->cdev; 6148 6149 rxq->num_rx_buffers = RX_RING_SIZE; 6150 6151 rxq->rx_buf_size = ha->rx_buf_size; 6152 6153 /* Allocate the parallel driver ring for Rx buffers */ 6154 bzero((void *)&rxq->sw_rx_ring[0], 6155 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 6156 6157 /* Allocate FW Rx ring */ 6158 6159 rc = ecore_chain_alloc(cdev, 6160 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6161 ECORE_CHAIN_MODE_NEXT_PTR, 6162 ECORE_CHAIN_CNT_TYPE_U16, 6163 RX_RING_SIZE, 6164 sizeof(struct eth_rx_bd), 6165 &rxq->rx_bd_ring, NULL); 6166 6167 if (rc) 6168 goto err; 6169 6170 /* Allocate FW completion ring */ 6171 rc = ecore_chain_alloc(cdev, 6172 ECORE_CHAIN_USE_TO_CONSUME, 6173 ECORE_CHAIN_MODE_PBL, 6174 ECORE_CHAIN_CNT_TYPE_U16, 6175 RX_RING_SIZE, 6176 sizeof(union eth_rx_cqe), 6177 &rxq->rx_comp_ring, NULL); 6178 6179 if (rc) 6180 goto err; 6181 6182 /* Allocate buffers for the Rx ring */ 6183 6184 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 6185 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size, 6186 &rxq->tpa_info[i]); 6187 if (rc) 6188 break; 6189 } 6190 6191 for (i = 0; i < rxq->num_rx_buffers; i++) { 6192 rc = qlnx_alloc_rx_buffer(ha, rxq); 6193 if (rc) 6194 break; 6195 } 6196 num_allocated = i; 6197 if (!num_allocated) { 6198 QL_DPRINT1(ha, "Rx buffers allocation failed\n"); 6199 goto err; 6200 } else if (num_allocated < rxq->num_rx_buffers) { 6201 QL_DPRINT1(ha, "Allocated less buffers than" 6202 " desired (%d allocated)\n", num_allocated); 6203 } 6204 6205 #ifdef QLNX_SOFT_LRO 6206 6207 { 6208 struct lro_ctrl *lro; 6209 6210 lro = &rxq->lro; 6211 6212 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) { 6213 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 6214 rxq->rxq_id); 6215 goto err; 6216 } 6217 6218 lro->ifp = ha->ifp; 6219 } 6220 #endif /* #ifdef QLNX_SOFT_LRO */ 6221 return 0; 6222 6223 err: 6224 qlnx_free_mem_rxq(ha, rxq); 6225 return -ENOMEM; 6226 } 6227 6228 static void 6229 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6230 struct qlnx_tx_queue *txq) 6231 { 6232 struct ecore_dev *cdev; 6233 6234 cdev = &ha->cdev; 6235 6236 bzero((void *)&txq->sw_tx_ring[0], 6237 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6238 6239 /* Free the real RQ ring used by FW */ 6240 if (txq->tx_pbl.p_virt_addr) { 6241 ecore_chain_free(cdev, &txq->tx_pbl); 6242 txq->tx_pbl.p_virt_addr = NULL; 6243 } 6244 return; 6245 } 6246 6247 /* This function allocates all memory needed per Tx queue */ 6248 static int 6249 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6250 struct qlnx_tx_queue *txq) 6251 { 6252 int ret = ECORE_SUCCESS; 6253 union eth_tx_bd_types *p_virt; 6254 struct ecore_dev *cdev; 6255 6256 cdev = &ha->cdev; 6257 6258 bzero((void *)&txq->sw_tx_ring[0], 6259 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6260 6261 /* Allocate the real Tx ring to be used by FW */ 6262 ret = ecore_chain_alloc(cdev, 6263 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6264 ECORE_CHAIN_MODE_PBL, 6265 ECORE_CHAIN_CNT_TYPE_U16, 6266 TX_RING_SIZE, 6267 sizeof(*p_virt), 6268 &txq->tx_pbl, NULL); 6269 6270 if (ret != ECORE_SUCCESS) { 6271 goto err; 6272 } 6273 6274 txq->num_tx_buffers = TX_RING_SIZE; 6275 6276 return 0; 6277 6278 err: 6279 qlnx_free_mem_txq(ha, fp, txq); 6280 return -ENOMEM; 6281 } 6282 6283 static void 6284 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6285 { 6286 struct mbuf *mp; 6287 if_t ifp = ha->ifp; 6288 6289 if (mtx_initialized(&fp->tx_mtx)) { 6290 if (fp->tx_br != NULL) { 6291 mtx_lock(&fp->tx_mtx); 6292 6293 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 6294 fp->tx_pkts_freed++; 6295 m_freem(mp); 6296 } 6297 6298 mtx_unlock(&fp->tx_mtx); 6299 6300 buf_ring_free(fp->tx_br, M_DEVBUF); 6301 fp->tx_br = NULL; 6302 } 6303 mtx_destroy(&fp->tx_mtx); 6304 } 6305 return; 6306 } 6307 6308 static void 6309 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6310 { 6311 int tc; 6312 6313 qlnx_free_mem_sb(ha, fp->sb_info); 6314 6315 qlnx_free_mem_rxq(ha, fp->rxq); 6316 6317 for (tc = 0; tc < ha->num_tc; tc++) 6318 qlnx_free_mem_txq(ha, fp, fp->txq[tc]); 6319 6320 return; 6321 } 6322 6323 static int 6324 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6325 { 6326 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 6327 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id); 6328 6329 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 6330 6331 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF, 6332 M_NOWAIT, &fp->tx_mtx); 6333 if (fp->tx_br == NULL) { 6334 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n", 6335 ha->dev_unit, fp->rss_id); 6336 return -ENOMEM; 6337 } 6338 return 0; 6339 } 6340 6341 static int 6342 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6343 { 6344 int rc, tc; 6345 6346 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id); 6347 if (rc) 6348 goto err; 6349 6350 if (ha->rx_jumbo_buf_eq_mtu) { 6351 if (ha->max_frame_size <= MCLBYTES) 6352 ha->rx_buf_size = MCLBYTES; 6353 else if (ha->max_frame_size <= MJUMPAGESIZE) 6354 ha->rx_buf_size = MJUMPAGESIZE; 6355 else if (ha->max_frame_size <= MJUM9BYTES) 6356 ha->rx_buf_size = MJUM9BYTES; 6357 else if (ha->max_frame_size <= MJUM16BYTES) 6358 ha->rx_buf_size = MJUM16BYTES; 6359 } else { 6360 if (ha->max_frame_size <= MCLBYTES) 6361 ha->rx_buf_size = MCLBYTES; 6362 else 6363 ha->rx_buf_size = MJUMPAGESIZE; 6364 } 6365 6366 rc = qlnx_alloc_mem_rxq(ha, fp->rxq); 6367 if (rc) 6368 goto err; 6369 6370 for (tc = 0; tc < ha->num_tc; tc++) { 6371 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]); 6372 if (rc) 6373 goto err; 6374 } 6375 6376 return 0; 6377 6378 err: 6379 qlnx_free_mem_fp(ha, fp); 6380 return -ENOMEM; 6381 } 6382 6383 static void 6384 qlnx_free_mem_load(qlnx_host_t *ha) 6385 { 6386 int i; 6387 6388 for (i = 0; i < ha->num_rss; i++) { 6389 struct qlnx_fastpath *fp = &ha->fp_array[i]; 6390 6391 qlnx_free_mem_fp(ha, fp); 6392 } 6393 return; 6394 } 6395 6396 static int 6397 qlnx_alloc_mem_load(qlnx_host_t *ha) 6398 { 6399 int rc = 0, rss_id; 6400 6401 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 6402 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 6403 6404 rc = qlnx_alloc_mem_fp(ha, fp); 6405 if (rc) 6406 break; 6407 } 6408 return (rc); 6409 } 6410 6411 static int 6412 qlnx_start_vport(struct ecore_dev *cdev, 6413 u8 vport_id, 6414 u16 mtu, 6415 u8 drop_ttl0_flg, 6416 u8 inner_vlan_removal_en_flg, 6417 u8 tx_switching, 6418 u8 hw_lro_enable) 6419 { 6420 int rc, i; 6421 struct ecore_sp_vport_start_params vport_start_params = { 0 }; 6422 qlnx_host_t *ha __unused; 6423 6424 ha = (qlnx_host_t *)cdev; 6425 6426 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg; 6427 vport_start_params.tx_switching = 0; 6428 vport_start_params.handle_ptp_pkts = 0; 6429 vport_start_params.only_untagged = 0; 6430 vport_start_params.drop_ttl0 = drop_ttl0_flg; 6431 6432 vport_start_params.tpa_mode = 6433 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE); 6434 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6435 6436 vport_start_params.vport_id = vport_id; 6437 vport_start_params.mtu = mtu; 6438 6439 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id); 6440 6441 for_each_hwfn(cdev, i) { 6442 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6443 6444 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid; 6445 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6446 6447 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params); 6448 6449 if (rc) { 6450 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d" 6451 " with MTU %d\n" , vport_id, mtu); 6452 return -ENOMEM; 6453 } 6454 6455 ecore_hw_start_fastpath(p_hwfn); 6456 6457 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n", 6458 vport_id, mtu); 6459 } 6460 return 0; 6461 } 6462 6463 static int 6464 qlnx_update_vport(struct ecore_dev *cdev, 6465 struct qlnx_update_vport_params *params) 6466 { 6467 struct ecore_sp_vport_update_params sp_params; 6468 int rc, i, j, fp_index; 6469 struct ecore_hwfn *p_hwfn; 6470 struct ecore_rss_params *rss; 6471 qlnx_host_t *ha = (qlnx_host_t *)cdev; 6472 struct qlnx_fastpath *fp; 6473 6474 memset(&sp_params, 0, sizeof(sp_params)); 6475 /* Translate protocol params into sp params */ 6476 sp_params.vport_id = params->vport_id; 6477 6478 sp_params.update_vport_active_rx_flg = 6479 params->update_vport_active_rx_flg; 6480 sp_params.vport_active_rx_flg = params->vport_active_rx_flg; 6481 6482 sp_params.update_vport_active_tx_flg = 6483 params->update_vport_active_tx_flg; 6484 sp_params.vport_active_tx_flg = params->vport_active_tx_flg; 6485 6486 sp_params.update_inner_vlan_removal_flg = 6487 params->update_inner_vlan_removal_flg; 6488 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; 6489 6490 sp_params.sge_tpa_params = params->sge_tpa_params; 6491 6492 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 6493 * We need to re-fix the rss values per engine for CMT. 6494 */ 6495 if (params->rss_params->update_rss_config) 6496 sp_params.rss_params = params->rss_params; 6497 else 6498 sp_params.rss_params = NULL; 6499 6500 for_each_hwfn(cdev, i) { 6501 p_hwfn = &cdev->hwfns[i]; 6502 6503 if ((cdev->num_hwfns > 1) && 6504 params->rss_params->update_rss_config && 6505 params->rss_params->rss_enable) { 6506 rss = params->rss_params; 6507 6508 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) { 6509 fp_index = ((cdev->num_hwfns * j) + i) % 6510 ha->num_rss; 6511 6512 fp = &ha->fp_array[fp_index]; 6513 rss->rss_ind_table[j] = fp->rxq->handle; 6514 } 6515 6516 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) { 6517 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n", 6518 rss->rss_ind_table[j], 6519 rss->rss_ind_table[j+1], 6520 rss->rss_ind_table[j+2], 6521 rss->rss_ind_table[j+3], 6522 rss->rss_ind_table[j+4], 6523 rss->rss_ind_table[j+5], 6524 rss->rss_ind_table[j+6], 6525 rss->rss_ind_table[j+7]); 6526 j += 8; 6527 } 6528 } 6529 6530 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6531 6532 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id); 6533 6534 rc = ecore_sp_vport_update(p_hwfn, &sp_params, 6535 ECORE_SPQ_MODE_EBLOCK, NULL); 6536 if (rc) { 6537 QL_DPRINT1(ha, "Failed to update VPORT\n"); 6538 return rc; 6539 } 6540 6541 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \ 6542 rx_active_flag %d [tx_update %d], [rx_update %d]\n", 6543 params->vport_id, params->vport_active_tx_flg, 6544 params->vport_active_rx_flg, 6545 params->update_vport_active_tx_flg, 6546 params->update_vport_active_rx_flg); 6547 } 6548 6549 return 0; 6550 } 6551 6552 static void 6553 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq) 6554 { 6555 struct eth_rx_bd *rx_bd_cons = 6556 ecore_chain_consume(&rxq->rx_bd_ring); 6557 struct eth_rx_bd *rx_bd_prod = 6558 ecore_chain_produce(&rxq->rx_bd_ring); 6559 struct sw_rx_data *sw_rx_data_cons = 6560 &rxq->sw_rx_ring[rxq->sw_rx_cons]; 6561 struct sw_rx_data *sw_rx_data_prod = 6562 &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6563 6564 sw_rx_data_prod->data = sw_rx_data_cons->data; 6565 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); 6566 6567 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 6568 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6569 6570 return; 6571 } 6572 6573 static void 6574 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq) 6575 { 6576 6577 uint16_t bd_prod; 6578 uint16_t cqe_prod; 6579 union { 6580 struct eth_rx_prod_data rx_prod_data; 6581 uint32_t data32; 6582 } rx_prods; 6583 6584 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); 6585 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); 6586 6587 /* Update producers */ 6588 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod); 6589 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod); 6590 6591 /* Make sure that the BD and SGE data is updated before updating the 6592 * producers since FW might read the BD/SGE right after the producer 6593 * is updated. 6594 */ 6595 wmb(); 6596 6597 #ifdef ECORE_CONFIG_DIRECT_HWFN 6598 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr, 6599 sizeof(rx_prods), &rx_prods.data32); 6600 #else 6601 internal_ram_wr(rxq->hw_rxq_prod_addr, 6602 sizeof(rx_prods), &rx_prods.data32); 6603 #endif 6604 6605 /* mmiowb is needed to synchronize doorbell writes from more than one 6606 * processor. It guarantees that the write arrives to the device before 6607 * the napi lock is released and another qlnx_poll is called (possibly 6608 * on another CPU). Without this barrier, the next doorbell can bypass 6609 * this doorbell. This is applicable to IA64/Altix systems. 6610 */ 6611 wmb(); 6612 6613 return; 6614 } 6615 6616 static uint32_t qlnx_hash_key[] = { 6617 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda), 6618 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2), 6619 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d), 6620 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0), 6621 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb), 6622 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4), 6623 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3), 6624 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c), 6625 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b), 6626 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)}; 6627 6628 static int 6629 qlnx_start_queues(qlnx_host_t *ha) 6630 { 6631 int rc, tc, i, vport_id = 0, 6632 drop_ttl0_flg = 1, vlan_removal_en = 1, 6633 tx_switching = 0, hw_lro_enable = 0; 6634 struct ecore_dev *cdev = &ha->cdev; 6635 struct ecore_rss_params *rss_params = &ha->rss_params; 6636 struct qlnx_update_vport_params vport_update_params; 6637 if_t ifp; 6638 struct ecore_hwfn *p_hwfn; 6639 struct ecore_sge_tpa_params tpa_params; 6640 struct ecore_queue_start_common_params qparams; 6641 struct qlnx_fastpath *fp; 6642 6643 ifp = ha->ifp; 6644 6645 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss); 6646 6647 if (!ha->num_rss) { 6648 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there" 6649 " are no Rx queues\n"); 6650 return -EINVAL; 6651 } 6652 6653 #ifndef QLNX_SOFT_LRO 6654 hw_lro_enable = if_getcapenable(ifp) & IFCAP_LRO; 6655 #endif /* #ifndef QLNX_SOFT_LRO */ 6656 6657 rc = qlnx_start_vport(cdev, vport_id, if_getmtu(ifp), drop_ttl0_flg, 6658 vlan_removal_en, tx_switching, hw_lro_enable); 6659 6660 if (rc) { 6661 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc); 6662 return rc; 6663 } 6664 6665 QL_DPRINT2(ha, "Start vport ramrod passed, " 6666 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n", 6667 vport_id, (int)(if_getmtu(ifp) + 0xe), vlan_removal_en); 6668 6669 for_each_rss(i) { 6670 struct ecore_rxq_start_ret_params rx_ret_params; 6671 struct ecore_txq_start_ret_params tx_ret_params; 6672 6673 fp = &ha->fp_array[i]; 6674 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)]; 6675 6676 bzero(&qparams, sizeof(struct ecore_queue_start_common_params)); 6677 bzero(&rx_ret_params, 6678 sizeof (struct ecore_rxq_start_ret_params)); 6679 6680 qparams.queue_id = i ; 6681 qparams.vport_id = vport_id; 6682 qparams.stats_id = vport_id; 6683 qparams.p_sb = fp->sb_info; 6684 qparams.sb_idx = RX_PI; 6685 6686 6687 rc = ecore_eth_rx_queue_start(p_hwfn, 6688 p_hwfn->hw_info.opaque_fid, 6689 &qparams, 6690 fp->rxq->rx_buf_size, /* bd_max_bytes */ 6691 /* bd_chain_phys_addr */ 6692 fp->rxq->rx_bd_ring.p_phys_addr, 6693 /* cqe_pbl_addr */ 6694 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring), 6695 /* cqe_pbl_size */ 6696 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring), 6697 &rx_ret_params); 6698 6699 if (rc) { 6700 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc); 6701 return rc; 6702 } 6703 6704 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod; 6705 fp->rxq->handle = rx_ret_params.p_handle; 6706 fp->rxq->hw_cons_ptr = 6707 &fp->sb_info->sb_virt->pi_array[RX_PI]; 6708 6709 qlnx_update_rx_prod(p_hwfn, fp->rxq); 6710 6711 for (tc = 0; tc < ha->num_tc; tc++) { 6712 struct qlnx_tx_queue *txq = fp->txq[tc]; 6713 6714 bzero(&qparams, 6715 sizeof(struct ecore_queue_start_common_params)); 6716 bzero(&tx_ret_params, 6717 sizeof (struct ecore_txq_start_ret_params)); 6718 6719 qparams.queue_id = txq->index / cdev->num_hwfns ; 6720 qparams.vport_id = vport_id; 6721 qparams.stats_id = vport_id; 6722 qparams.p_sb = fp->sb_info; 6723 qparams.sb_idx = TX_PI(tc); 6724 6725 rc = ecore_eth_tx_queue_start(p_hwfn, 6726 p_hwfn->hw_info.opaque_fid, 6727 &qparams, tc, 6728 /* bd_chain_phys_addr */ 6729 ecore_chain_get_pbl_phys(&txq->tx_pbl), 6730 ecore_chain_get_page_cnt(&txq->tx_pbl), 6731 &tx_ret_params); 6732 6733 if (rc) { 6734 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n", 6735 txq->index, rc); 6736 return rc; 6737 } 6738 6739 txq->doorbell_addr = tx_ret_params.p_doorbell; 6740 txq->handle = tx_ret_params.p_handle; 6741 6742 txq->hw_cons_ptr = 6743 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; 6744 SET_FIELD(txq->tx_db.data.params, 6745 ETH_DB_DATA_DEST, DB_DEST_XCM); 6746 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, 6747 DB_AGG_CMD_SET); 6748 SET_FIELD(txq->tx_db.data.params, 6749 ETH_DB_DATA_AGG_VAL_SEL, 6750 DQ_XCM_ETH_TX_BD_PROD_CMD); 6751 6752 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 6753 } 6754 } 6755 6756 /* Fill struct with RSS params */ 6757 if (ha->num_rss > 1) { 6758 rss_params->update_rss_config = 1; 6759 rss_params->rss_enable = 1; 6760 rss_params->update_rss_capabilities = 1; 6761 rss_params->update_rss_ind_table = 1; 6762 rss_params->update_rss_key = 1; 6763 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | 6764 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; 6765 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */ 6766 6767 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 6768 fp = &ha->fp_array[(i % ha->num_rss)]; 6769 rss_params->rss_ind_table[i] = fp->rxq->handle; 6770 } 6771 6772 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 6773 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i]; 6774 6775 } else { 6776 memset(rss_params, 0, sizeof(*rss_params)); 6777 } 6778 6779 /* Prepare and send the vport enable */ 6780 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6781 vport_update_params.vport_id = vport_id; 6782 vport_update_params.update_vport_active_tx_flg = 1; 6783 vport_update_params.vport_active_tx_flg = 1; 6784 vport_update_params.update_vport_active_rx_flg = 1; 6785 vport_update_params.vport_active_rx_flg = 1; 6786 vport_update_params.rss_params = rss_params; 6787 vport_update_params.update_inner_vlan_removal_flg = 1; 6788 vport_update_params.inner_vlan_removal_flg = 1; 6789 6790 if (hw_lro_enable) { 6791 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params)); 6792 6793 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6794 6795 tpa_params.update_tpa_en_flg = 1; 6796 tpa_params.tpa_ipv4_en_flg = 1; 6797 tpa_params.tpa_ipv6_en_flg = 1; 6798 6799 tpa_params.update_tpa_param_flg = 1; 6800 tpa_params.tpa_pkt_split_flg = 0; 6801 tpa_params.tpa_hdr_data_split_flg = 0; 6802 tpa_params.tpa_gro_consistent_flg = 0; 6803 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 6804 tpa_params.tpa_max_size = (uint16_t)(-1); 6805 tpa_params.tpa_min_size_to_start = if_getmtu(ifp) / 2; 6806 tpa_params.tpa_min_size_to_cont = if_getmtu(ifp) / 2; 6807 6808 vport_update_params.sge_tpa_params = &tpa_params; 6809 } 6810 6811 rc = qlnx_update_vport(cdev, &vport_update_params); 6812 if (rc) { 6813 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc); 6814 return rc; 6815 } 6816 6817 return 0; 6818 } 6819 6820 static int 6821 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6822 struct qlnx_tx_queue *txq) 6823 { 6824 uint16_t hw_bd_cons; 6825 uint16_t ecore_cons_idx; 6826 6827 QL_DPRINT2(ha, "enter\n"); 6828 6829 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6830 6831 while (hw_bd_cons != 6832 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 6833 mtx_lock(&fp->tx_mtx); 6834 6835 (void)qlnx_tx_int(ha, fp, txq); 6836 6837 mtx_unlock(&fp->tx_mtx); 6838 6839 qlnx_mdelay(__func__, 2); 6840 6841 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6842 } 6843 6844 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index); 6845 6846 return 0; 6847 } 6848 6849 static int 6850 qlnx_stop_queues(qlnx_host_t *ha) 6851 { 6852 struct qlnx_update_vport_params vport_update_params; 6853 struct ecore_dev *cdev; 6854 struct qlnx_fastpath *fp; 6855 int rc, tc, i; 6856 6857 cdev = &ha->cdev; 6858 6859 /* Disable the vport */ 6860 6861 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6862 6863 vport_update_params.vport_id = 0; 6864 vport_update_params.update_vport_active_tx_flg = 1; 6865 vport_update_params.vport_active_tx_flg = 0; 6866 vport_update_params.update_vport_active_rx_flg = 1; 6867 vport_update_params.vport_active_rx_flg = 0; 6868 vport_update_params.rss_params = &ha->rss_params; 6869 vport_update_params.rss_params->update_rss_config = 0; 6870 vport_update_params.rss_params->rss_enable = 0; 6871 vport_update_params.update_inner_vlan_removal_flg = 0; 6872 vport_update_params.inner_vlan_removal_flg = 0; 6873 6874 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id); 6875 6876 rc = qlnx_update_vport(cdev, &vport_update_params); 6877 if (rc) { 6878 QL_DPRINT1(ha, "Failed to update vport\n"); 6879 return rc; 6880 } 6881 6882 /* Flush Tx queues. If needed, request drain from MCP */ 6883 for_each_rss(i) { 6884 fp = &ha->fp_array[i]; 6885 6886 for (tc = 0; tc < ha->num_tc; tc++) { 6887 struct qlnx_tx_queue *txq = fp->txq[tc]; 6888 6889 rc = qlnx_drain_txq(ha, fp, txq); 6890 if (rc) 6891 return rc; 6892 } 6893 } 6894 6895 /* Stop all Queues in reverse order*/ 6896 for (i = ha->num_rss - 1; i >= 0; i--) { 6897 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)]; 6898 6899 fp = &ha->fp_array[i]; 6900 6901 /* Stop the Tx Queue(s)*/ 6902 for (tc = 0; tc < ha->num_tc; tc++) { 6903 int tx_queue_id __unused; 6904 6905 tx_queue_id = tc * ha->num_rss + i; 6906 rc = ecore_eth_tx_queue_stop(p_hwfn, 6907 fp->txq[tc]->handle); 6908 6909 if (rc) { 6910 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n", 6911 tx_queue_id); 6912 return rc; 6913 } 6914 } 6915 6916 /* Stop the Rx Queue*/ 6917 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false, 6918 false); 6919 if (rc) { 6920 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i); 6921 return rc; 6922 } 6923 } 6924 6925 /* Stop the vport */ 6926 for_each_hwfn(cdev, i) { 6927 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6928 6929 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0); 6930 6931 if (rc) { 6932 QL_DPRINT1(ha, "Failed to stop VPORT\n"); 6933 return rc; 6934 } 6935 } 6936 6937 return rc; 6938 } 6939 6940 static int 6941 qlnx_set_ucast_rx_mac(qlnx_host_t *ha, 6942 enum ecore_filter_opcode opcode, 6943 unsigned char mac[ETH_ALEN]) 6944 { 6945 struct ecore_filter_ucast ucast; 6946 struct ecore_dev *cdev; 6947 int rc; 6948 6949 cdev = &ha->cdev; 6950 6951 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6952 6953 ucast.opcode = opcode; 6954 ucast.type = ECORE_FILTER_MAC; 6955 ucast.is_rx_filter = 1; 6956 ucast.vport_to_add_to = 0; 6957 memcpy(&ucast.mac[0], mac, ETH_ALEN); 6958 6959 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6960 6961 return (rc); 6962 } 6963 6964 static int 6965 qlnx_remove_all_ucast_mac(qlnx_host_t *ha) 6966 { 6967 struct ecore_filter_ucast ucast; 6968 struct ecore_dev *cdev; 6969 int rc; 6970 6971 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6972 6973 ucast.opcode = ECORE_FILTER_REPLACE; 6974 ucast.type = ECORE_FILTER_MAC; 6975 ucast.is_rx_filter = 1; 6976 6977 cdev = &ha->cdev; 6978 6979 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6980 6981 return (rc); 6982 } 6983 6984 static int 6985 qlnx_remove_all_mcast_mac(qlnx_host_t *ha) 6986 { 6987 struct ecore_filter_mcast *mcast; 6988 struct ecore_dev *cdev; 6989 int rc, i; 6990 6991 cdev = &ha->cdev; 6992 6993 mcast = &ha->ecore_mcast; 6994 bzero(mcast, sizeof(struct ecore_filter_mcast)); 6995 6996 mcast->opcode = ECORE_FILTER_REMOVE; 6997 6998 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 6999 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] || 7000 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] || 7001 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) { 7002 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN); 7003 mcast->num_mc_addrs++; 7004 } 7005 } 7006 mcast = &ha->ecore_mcast; 7007 7008 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 7009 7010 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS)); 7011 ha->nmcast = 0; 7012 7013 return (rc); 7014 } 7015 7016 static int 7017 qlnx_clean_filters(qlnx_host_t *ha) 7018 { 7019 int rc = 0; 7020 7021 /* Remove all unicast macs */ 7022 rc = qlnx_remove_all_ucast_mac(ha); 7023 if (rc) 7024 return rc; 7025 7026 /* Remove all multicast macs */ 7027 rc = qlnx_remove_all_mcast_mac(ha); 7028 if (rc) 7029 return rc; 7030 7031 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac); 7032 7033 return (rc); 7034 } 7035 7036 static int 7037 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter) 7038 { 7039 struct ecore_filter_accept_flags accept; 7040 int rc = 0; 7041 struct ecore_dev *cdev; 7042 7043 cdev = &ha->cdev; 7044 7045 bzero(&accept, sizeof(struct ecore_filter_accept_flags)); 7046 7047 accept.update_rx_mode_config = 1; 7048 accept.rx_accept_filter = filter; 7049 7050 accept.update_tx_mode_config = 1; 7051 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 7052 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST; 7053 7054 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false, 7055 ECORE_SPQ_MODE_CB, NULL); 7056 7057 return (rc); 7058 } 7059 7060 static int 7061 qlnx_set_rx_mode(qlnx_host_t *ha) 7062 { 7063 int rc = 0; 7064 uint8_t filter; 7065 7066 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac); 7067 if (rc) 7068 return rc; 7069 7070 rc = qlnx_remove_all_mcast_mac(ha); 7071 if (rc) 7072 return rc; 7073 7074 filter = ECORE_ACCEPT_UCAST_MATCHED | 7075 ECORE_ACCEPT_MCAST_MATCHED | 7076 ECORE_ACCEPT_BCAST; 7077 7078 if (qlnx_vf_device(ha) == 0) { 7079 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 7080 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 7081 } 7082 ha->filter = filter; 7083 7084 rc = qlnx_set_rx_accept_filter(ha, filter); 7085 7086 return (rc); 7087 } 7088 7089 static int 7090 qlnx_set_link(qlnx_host_t *ha, bool link_up) 7091 { 7092 int i, rc = 0; 7093 struct ecore_dev *cdev; 7094 struct ecore_hwfn *hwfn; 7095 struct ecore_ptt *ptt; 7096 7097 if (qlnx_vf_device(ha) == 0) 7098 return (0); 7099 7100 cdev = &ha->cdev; 7101 7102 for_each_hwfn(cdev, i) { 7103 hwfn = &cdev->hwfns[i]; 7104 7105 ptt = ecore_ptt_acquire(hwfn); 7106 if (!ptt) 7107 return -EBUSY; 7108 7109 rc = ecore_mcp_set_link(hwfn, ptt, link_up); 7110 7111 ecore_ptt_release(hwfn, ptt); 7112 7113 if (rc) 7114 return rc; 7115 } 7116 return (rc); 7117 } 7118 7119 static uint64_t 7120 qlnx_get_counter(if_t ifp, ift_counter cnt) 7121 { 7122 qlnx_host_t *ha; 7123 uint64_t count; 7124 7125 ha = (qlnx_host_t *)if_getsoftc(ifp); 7126 7127 switch (cnt) { 7128 case IFCOUNTER_IPACKETS: 7129 count = ha->hw_stats.common.rx_ucast_pkts + 7130 ha->hw_stats.common.rx_mcast_pkts + 7131 ha->hw_stats.common.rx_bcast_pkts; 7132 break; 7133 7134 case IFCOUNTER_IERRORS: 7135 count = ha->hw_stats.common.rx_crc_errors + 7136 ha->hw_stats.common.rx_align_errors + 7137 ha->hw_stats.common.rx_oversize_packets + 7138 ha->hw_stats.common.rx_undersize_packets; 7139 break; 7140 7141 case IFCOUNTER_OPACKETS: 7142 count = ha->hw_stats.common.tx_ucast_pkts + 7143 ha->hw_stats.common.tx_mcast_pkts + 7144 ha->hw_stats.common.tx_bcast_pkts; 7145 break; 7146 7147 case IFCOUNTER_OERRORS: 7148 count = ha->hw_stats.common.tx_err_drop_pkts; 7149 break; 7150 7151 case IFCOUNTER_COLLISIONS: 7152 return (0); 7153 7154 case IFCOUNTER_IBYTES: 7155 count = ha->hw_stats.common.rx_ucast_bytes + 7156 ha->hw_stats.common.rx_mcast_bytes + 7157 ha->hw_stats.common.rx_bcast_bytes; 7158 break; 7159 7160 case IFCOUNTER_OBYTES: 7161 count = ha->hw_stats.common.tx_ucast_bytes + 7162 ha->hw_stats.common.tx_mcast_bytes + 7163 ha->hw_stats.common.tx_bcast_bytes; 7164 break; 7165 7166 case IFCOUNTER_IMCASTS: 7167 count = ha->hw_stats.common.rx_mcast_bytes; 7168 break; 7169 7170 case IFCOUNTER_OMCASTS: 7171 count = ha->hw_stats.common.tx_mcast_bytes; 7172 break; 7173 7174 case IFCOUNTER_IQDROPS: 7175 case IFCOUNTER_OQDROPS: 7176 case IFCOUNTER_NOPROTO: 7177 7178 default: 7179 return (if_get_counter_default(ifp, cnt)); 7180 } 7181 return (count); 7182 } 7183 7184 static void 7185 qlnx_timer(void *arg) 7186 { 7187 qlnx_host_t *ha; 7188 7189 ha = (qlnx_host_t *)arg; 7190 7191 if (ha->error_recovery) { 7192 ha->error_recovery = 0; 7193 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 7194 return; 7195 } 7196 7197 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats); 7198 7199 if (ha->storm_stats_gather) 7200 qlnx_sample_storm_stats(ha); 7201 7202 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7203 7204 return; 7205 } 7206 7207 static int 7208 qlnx_load(qlnx_host_t *ha) 7209 { 7210 int i; 7211 int rc = 0; 7212 device_t dev; 7213 7214 dev = ha->pci_dev; 7215 7216 QL_DPRINT2(ha, "enter\n"); 7217 7218 rc = qlnx_alloc_mem_arrays(ha); 7219 if (rc) 7220 goto qlnx_load_exit0; 7221 7222 qlnx_init_fp(ha); 7223 7224 rc = qlnx_alloc_mem_load(ha); 7225 if (rc) 7226 goto qlnx_load_exit1; 7227 7228 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n", 7229 ha->num_rss, ha->num_tc); 7230 7231 for (i = 0; i < ha->num_rss; i++) { 7232 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq, 7233 (INTR_TYPE_NET | INTR_MPSAFE), 7234 NULL, qlnx_fp_isr, &ha->irq_vec[i], 7235 &ha->irq_vec[i].handle))) { 7236 QL_DPRINT1(ha, "could not setup interrupt\n"); 7237 goto qlnx_load_exit2; 7238 } 7239 7240 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \ 7241 irq %p handle %p\n", i, 7242 ha->irq_vec[i].irq_rid, 7243 ha->irq_vec[i].irq, ha->irq_vec[i].handle); 7244 7245 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus)); 7246 } 7247 7248 rc = qlnx_start_queues(ha); 7249 if (rc) 7250 goto qlnx_load_exit2; 7251 7252 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n"); 7253 7254 /* Add primary mac and set Rx filters */ 7255 rc = qlnx_set_rx_mode(ha); 7256 if (rc) 7257 goto qlnx_load_exit2; 7258 7259 /* Ask for link-up using current configuration */ 7260 qlnx_set_link(ha, true); 7261 7262 if (qlnx_vf_device(ha) == 0) 7263 qlnx_link_update(&ha->cdev.hwfns[0]); 7264 7265 ha->state = QLNX_STATE_OPEN; 7266 7267 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats)); 7268 7269 if (ha->flags.callout_init) 7270 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7271 7272 goto qlnx_load_exit0; 7273 7274 qlnx_load_exit2: 7275 qlnx_free_mem_load(ha); 7276 7277 qlnx_load_exit1: 7278 ha->num_rss = 0; 7279 7280 qlnx_load_exit0: 7281 QL_DPRINT2(ha, "exit [%d]\n", rc); 7282 return rc; 7283 } 7284 7285 static void 7286 qlnx_drain_soft_lro(qlnx_host_t *ha) 7287 { 7288 #ifdef QLNX_SOFT_LRO 7289 7290 if_t ifp; 7291 int i; 7292 7293 ifp = ha->ifp; 7294 7295 if (if_getcapenable(ifp) & IFCAP_LRO) { 7296 for (i = 0; i < ha->num_rss; i++) { 7297 struct qlnx_fastpath *fp = &ha->fp_array[i]; 7298 struct lro_ctrl *lro; 7299 7300 lro = &fp->rxq->lro; 7301 7302 tcp_lro_flush_all(lro); 7303 } 7304 } 7305 7306 #endif /* #ifdef QLNX_SOFT_LRO */ 7307 7308 return; 7309 } 7310 7311 static void 7312 qlnx_unload(qlnx_host_t *ha) 7313 { 7314 struct ecore_dev *cdev; 7315 device_t dev; 7316 int i; 7317 7318 cdev = &ha->cdev; 7319 dev = ha->pci_dev; 7320 7321 QL_DPRINT2(ha, "enter\n"); 7322 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state); 7323 7324 if (ha->state == QLNX_STATE_OPEN) { 7325 qlnx_set_link(ha, false); 7326 qlnx_clean_filters(ha); 7327 qlnx_stop_queues(ha); 7328 ecore_hw_stop_fastpath(cdev); 7329 7330 for (i = 0; i < ha->num_rss; i++) { 7331 if (ha->irq_vec[i].handle) { 7332 (void)bus_teardown_intr(dev, 7333 ha->irq_vec[i].irq, 7334 ha->irq_vec[i].handle); 7335 ha->irq_vec[i].handle = NULL; 7336 } 7337 } 7338 7339 qlnx_drain_fp_taskqueues(ha); 7340 qlnx_drain_soft_lro(ha); 7341 qlnx_free_mem_load(ha); 7342 } 7343 7344 if (ha->flags.callout_init) 7345 callout_drain(&ha->qlnx_callout); 7346 7347 qlnx_mdelay(__func__, 1000); 7348 7349 ha->state = QLNX_STATE_CLOSED; 7350 7351 QL_DPRINT2(ha, "exit\n"); 7352 return; 7353 } 7354 7355 static int 7356 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7357 { 7358 int rval = -1; 7359 struct ecore_hwfn *p_hwfn; 7360 struct ecore_ptt *p_ptt; 7361 7362 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7363 7364 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7365 p_ptt = ecore_ptt_acquire(p_hwfn); 7366 7367 if (!p_ptt) { 7368 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7369 return (rval); 7370 } 7371 7372 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7373 7374 if (rval == DBG_STATUS_OK) 7375 rval = 0; 7376 else { 7377 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed" 7378 "[0x%x]\n", rval); 7379 } 7380 7381 ecore_ptt_release(p_hwfn, p_ptt); 7382 7383 return (rval); 7384 } 7385 7386 static int 7387 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7388 { 7389 int rval = -1; 7390 struct ecore_hwfn *p_hwfn; 7391 struct ecore_ptt *p_ptt; 7392 7393 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7394 7395 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7396 p_ptt = ecore_ptt_acquire(p_hwfn); 7397 7398 if (!p_ptt) { 7399 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7400 return (rval); 7401 } 7402 7403 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7404 7405 if (rval == DBG_STATUS_OK) 7406 rval = 0; 7407 else { 7408 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed" 7409 " [0x%x]\n", rval); 7410 } 7411 7412 ecore_ptt_release(p_hwfn, p_ptt); 7413 7414 return (rval); 7415 } 7416 7417 static void 7418 qlnx_sample_storm_stats(qlnx_host_t *ha) 7419 { 7420 int i, index; 7421 struct ecore_dev *cdev; 7422 qlnx_storm_stats_t *s_stats; 7423 uint32_t reg; 7424 struct ecore_ptt *p_ptt; 7425 struct ecore_hwfn *hwfn; 7426 7427 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) { 7428 ha->storm_stats_gather = 0; 7429 return; 7430 } 7431 7432 cdev = &ha->cdev; 7433 7434 for_each_hwfn(cdev, i) { 7435 hwfn = &cdev->hwfns[i]; 7436 7437 p_ptt = ecore_ptt_acquire(hwfn); 7438 if (!p_ptt) 7439 return; 7440 7441 index = ha->storm_stats_index + 7442 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN); 7443 7444 s_stats = &ha->storm_stats[index]; 7445 7446 /* XSTORM */ 7447 reg = XSEM_REG_FAST_MEMORY + 7448 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7449 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7450 7451 reg = XSEM_REG_FAST_MEMORY + 7452 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7453 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7454 7455 reg = XSEM_REG_FAST_MEMORY + 7456 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7457 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7458 7459 reg = XSEM_REG_FAST_MEMORY + 7460 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7461 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7462 7463 /* YSTORM */ 7464 reg = YSEM_REG_FAST_MEMORY + 7465 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7466 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7467 7468 reg = YSEM_REG_FAST_MEMORY + 7469 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7470 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7471 7472 reg = YSEM_REG_FAST_MEMORY + 7473 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7474 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7475 7476 reg = YSEM_REG_FAST_MEMORY + 7477 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7478 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7479 7480 /* PSTORM */ 7481 reg = PSEM_REG_FAST_MEMORY + 7482 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7483 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7484 7485 reg = PSEM_REG_FAST_MEMORY + 7486 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7487 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7488 7489 reg = PSEM_REG_FAST_MEMORY + 7490 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7491 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7492 7493 reg = PSEM_REG_FAST_MEMORY + 7494 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7495 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7496 7497 /* TSTORM */ 7498 reg = TSEM_REG_FAST_MEMORY + 7499 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7500 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7501 7502 reg = TSEM_REG_FAST_MEMORY + 7503 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7504 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7505 7506 reg = TSEM_REG_FAST_MEMORY + 7507 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7508 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7509 7510 reg = TSEM_REG_FAST_MEMORY + 7511 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7512 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7513 7514 /* MSTORM */ 7515 reg = MSEM_REG_FAST_MEMORY + 7516 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7517 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7518 7519 reg = MSEM_REG_FAST_MEMORY + 7520 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7521 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7522 7523 reg = MSEM_REG_FAST_MEMORY + 7524 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7525 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7526 7527 reg = MSEM_REG_FAST_MEMORY + 7528 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7529 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7530 7531 /* USTORM */ 7532 reg = USEM_REG_FAST_MEMORY + 7533 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7534 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7535 7536 reg = USEM_REG_FAST_MEMORY + 7537 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7538 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7539 7540 reg = USEM_REG_FAST_MEMORY + 7541 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7542 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7543 7544 reg = USEM_REG_FAST_MEMORY + 7545 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7546 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7547 7548 ecore_ptt_release(hwfn, p_ptt); 7549 } 7550 7551 ha->storm_stats_index++; 7552 7553 return; 7554 } 7555 7556 /* 7557 * Name: qlnx_dump_buf8 7558 * Function: dumps a buffer as bytes 7559 */ 7560 static void 7561 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len) 7562 { 7563 device_t dev; 7564 uint32_t i = 0; 7565 uint8_t *buf; 7566 7567 dev = ha->pci_dev; 7568 buf = dbuf; 7569 7570 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len); 7571 7572 while (len >= 16) { 7573 device_printf(dev,"0x%08x:" 7574 " %02x %02x %02x %02x %02x %02x %02x %02x" 7575 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7576 buf[0], buf[1], buf[2], buf[3], 7577 buf[4], buf[5], buf[6], buf[7], 7578 buf[8], buf[9], buf[10], buf[11], 7579 buf[12], buf[13], buf[14], buf[15]); 7580 i += 16; 7581 len -= 16; 7582 buf += 16; 7583 } 7584 switch (len) { 7585 case 1: 7586 device_printf(dev,"0x%08x: %02x\n", i, buf[0]); 7587 break; 7588 case 2: 7589 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]); 7590 break; 7591 case 3: 7592 device_printf(dev,"0x%08x: %02x %02x %02x\n", 7593 i, buf[0], buf[1], buf[2]); 7594 break; 7595 case 4: 7596 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i, 7597 buf[0], buf[1], buf[2], buf[3]); 7598 break; 7599 case 5: 7600 device_printf(dev,"0x%08x:" 7601 " %02x %02x %02x %02x %02x\n", i, 7602 buf[0], buf[1], buf[2], buf[3], buf[4]); 7603 break; 7604 case 6: 7605 device_printf(dev,"0x%08x:" 7606 " %02x %02x %02x %02x %02x %02x\n", i, 7607 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); 7608 break; 7609 case 7: 7610 device_printf(dev,"0x%08x:" 7611 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7612 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); 7613 break; 7614 case 8: 7615 device_printf(dev,"0x%08x:" 7616 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7617 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7618 buf[7]); 7619 break; 7620 case 9: 7621 device_printf(dev,"0x%08x:" 7622 " %02x %02x %02x %02x %02x %02x %02x %02x" 7623 " %02x\n", i, 7624 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7625 buf[7], buf[8]); 7626 break; 7627 case 10: 7628 device_printf(dev,"0x%08x:" 7629 " %02x %02x %02x %02x %02x %02x %02x %02x" 7630 " %02x %02x\n", i, 7631 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7632 buf[7], buf[8], buf[9]); 7633 break; 7634 case 11: 7635 device_printf(dev,"0x%08x:" 7636 " %02x %02x %02x %02x %02x %02x %02x %02x" 7637 " %02x %02x %02x\n", i, 7638 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7639 buf[7], buf[8], buf[9], buf[10]); 7640 break; 7641 case 12: 7642 device_printf(dev,"0x%08x:" 7643 " %02x %02x %02x %02x %02x %02x %02x %02x" 7644 " %02x %02x %02x %02x\n", i, 7645 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7646 buf[7], buf[8], buf[9], buf[10], buf[11]); 7647 break; 7648 case 13: 7649 device_printf(dev,"0x%08x:" 7650 " %02x %02x %02x %02x %02x %02x %02x %02x" 7651 " %02x %02x %02x %02x %02x\n", i, 7652 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7653 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]); 7654 break; 7655 case 14: 7656 device_printf(dev,"0x%08x:" 7657 " %02x %02x %02x %02x %02x %02x %02x %02x" 7658 " %02x %02x %02x %02x %02x %02x\n", i, 7659 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7660 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7661 buf[13]); 7662 break; 7663 case 15: 7664 device_printf(dev,"0x%08x:" 7665 " %02x %02x %02x %02x %02x %02x %02x %02x" 7666 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7667 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7668 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7669 buf[13], buf[14]); 7670 break; 7671 default: 7672 break; 7673 } 7674 7675 device_printf(dev, "%s: %s dump end\n", __func__, msg); 7676 7677 return; 7678 } 7679 7680 #ifdef CONFIG_ECORE_SRIOV 7681 7682 static void 7683 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id) 7684 { 7685 struct ecore_public_vf_info *vf_info; 7686 7687 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false); 7688 7689 if (!vf_info) 7690 return; 7691 7692 /* Clear the VF mac */ 7693 memset(vf_info->forced_mac, 0, ETH_ALEN); 7694 7695 vf_info->forced_vlan = 0; 7696 7697 return; 7698 } 7699 7700 void 7701 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id) 7702 { 7703 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id); 7704 return; 7705 } 7706 7707 static int 7708 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid, 7709 struct ecore_filter_ucast *params) 7710 { 7711 struct ecore_public_vf_info *vf; 7712 7713 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) { 7714 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev), 7715 "VF[%d] vport not initialized\n", vfid); 7716 return ECORE_INVAL; 7717 } 7718 7719 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true); 7720 if (!vf) 7721 return -EINVAL; 7722 7723 /* No real decision to make; Store the configured MAC */ 7724 if (params->type == ECORE_FILTER_MAC || 7725 params->type == ECORE_FILTER_MAC_VLAN) 7726 memcpy(params->mac, vf->forced_mac, ETH_ALEN); 7727 7728 return 0; 7729 } 7730 7731 int 7732 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params) 7733 { 7734 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params)); 7735 } 7736 7737 static int 7738 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid, 7739 struct ecore_sp_vport_update_params *params, uint16_t * tlvs) 7740 { 7741 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) { 7742 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev), 7743 "VF[%d] vport not initialized\n", vfid); 7744 return ECORE_INVAL; 7745 } 7746 7747 /* Untrusted VFs can't even be trusted to know that fact. 7748 * Simply indicate everything is configured fine, and trace 7749 * configuration 'behind their back'. 7750 */ 7751 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM))) 7752 return 0; 7753 7754 return 0; 7755 7756 } 7757 int 7758 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs) 7759 { 7760 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs)); 7761 } 7762 7763 static int 7764 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn) 7765 { 7766 int i; 7767 struct ecore_dev *cdev; 7768 7769 cdev = p_hwfn->p_dev; 7770 7771 for (i = 0; i < cdev->num_hwfns; i++) { 7772 if (&cdev->hwfns[i] == p_hwfn) 7773 break; 7774 } 7775 7776 if (i >= cdev->num_hwfns) 7777 return (-1); 7778 7779 return (i); 7780 } 7781 7782 static int 7783 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id) 7784 { 7785 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 7786 int i; 7787 7788 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n", 7789 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id); 7790 7791 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 7792 return (-1); 7793 7794 if (ha->sriov_task[i].pf_taskqueue != NULL) { 7795 atomic_testandset_32(&ha->sriov_task[i].flags, 7796 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG); 7797 7798 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 7799 &ha->sriov_task[i].pf_task); 7800 } 7801 7802 return (ECORE_SUCCESS); 7803 } 7804 7805 int 7806 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id) 7807 { 7808 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id)); 7809 } 7810 7811 static void 7812 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn) 7813 { 7814 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 7815 int i; 7816 7817 if (!ha->sriov_initialized) 7818 return; 7819 7820 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 7821 ha, p_hwfn->p_dev, p_hwfn); 7822 7823 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 7824 return; 7825 7826 if (ha->sriov_task[i].pf_taskqueue != NULL) { 7827 atomic_testandset_32(&ha->sriov_task[i].flags, 7828 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE); 7829 7830 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 7831 &ha->sriov_task[i].pf_task); 7832 } 7833 7834 return; 7835 } 7836 7837 void 7838 qlnx_vf_flr_update(void *p_hwfn) 7839 { 7840 __qlnx_vf_flr_update(p_hwfn); 7841 7842 return; 7843 } 7844 7845 #ifndef QLNX_VF 7846 7847 static void 7848 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn) 7849 { 7850 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 7851 int i; 7852 7853 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 7854 ha, p_hwfn->p_dev, p_hwfn); 7855 7856 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 7857 return; 7858 7859 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n", 7860 ha, p_hwfn->p_dev, p_hwfn, i); 7861 7862 if (ha->sriov_task[i].pf_taskqueue != NULL) { 7863 atomic_testandset_32(&ha->sriov_task[i].flags, 7864 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE); 7865 7866 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 7867 &ha->sriov_task[i].pf_task); 7868 } 7869 } 7870 7871 static void 7872 qlnx_initialize_sriov(qlnx_host_t *ha) 7873 { 7874 device_t dev; 7875 nvlist_t *pf_schema, *vf_schema; 7876 int iov_error; 7877 7878 dev = ha->pci_dev; 7879 7880 pf_schema = pci_iov_schema_alloc_node(); 7881 vf_schema = pci_iov_schema_alloc_node(); 7882 7883 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 7884 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 7885 IOV_SCHEMA_HASDEFAULT, FALSE); 7886 pci_iov_schema_add_bool(vf_schema, "allow-promisc", 7887 IOV_SCHEMA_HASDEFAULT, FALSE); 7888 pci_iov_schema_add_uint16(vf_schema, "num-queues", 7889 IOV_SCHEMA_HASDEFAULT, 1); 7890 7891 iov_error = pci_iov_attach(dev, pf_schema, vf_schema); 7892 7893 if (iov_error != 0) { 7894 ha->sriov_initialized = 0; 7895 } else { 7896 device_printf(dev, "SRIOV initialized\n"); 7897 ha->sriov_initialized = 1; 7898 } 7899 7900 return; 7901 } 7902 7903 static void 7904 qlnx_sriov_disable(qlnx_host_t *ha) 7905 { 7906 struct ecore_dev *cdev; 7907 int i, j; 7908 7909 cdev = &ha->cdev; 7910 7911 ecore_iov_set_vfs_to_disable(cdev, true); 7912 7913 for_each_hwfn(cdev, i) { 7914 struct ecore_hwfn *hwfn = &cdev->hwfns[i]; 7915 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 7916 7917 if (!ptt) { 7918 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 7919 return; 7920 } 7921 /* Clean WFQ db and configure equal weight for all vports */ 7922 ecore_clean_wfq_db(hwfn, ptt); 7923 7924 ecore_for_each_vf(hwfn, j) { 7925 int k = 0; 7926 7927 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false)) 7928 continue; 7929 7930 if (ecore_iov_is_vf_started(hwfn, j)) { 7931 /* Wait until VF is disabled before releasing */ 7932 7933 for (k = 0; k < 100; k++) { 7934 if (!ecore_iov_is_vf_stopped(hwfn, j)) { 7935 qlnx_mdelay(__func__, 10); 7936 } else 7937 break; 7938 } 7939 } 7940 7941 if (k < 100) 7942 ecore_iov_release_hw_for_vf(&cdev->hwfns[i], 7943 ptt, j); 7944 else { 7945 QL_DPRINT1(ha, 7946 "Timeout waiting for VF's FLR to end\n"); 7947 } 7948 } 7949 ecore_ptt_release(hwfn, ptt); 7950 } 7951 7952 ecore_iov_set_vfs_to_disable(cdev, false); 7953 7954 return; 7955 } 7956 7957 static void 7958 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid, 7959 struct ecore_iov_vf_init_params *params) 7960 { 7961 u16 base, i; 7962 7963 /* Since we have an equal resource distribution per-VF, and we assume 7964 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting 7965 * sequentially from there. 7966 */ 7967 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues; 7968 7969 params->rel_vf_id = vfid; 7970 7971 for (i = 0; i < params->num_queues; i++) { 7972 params->req_rx_queue[i] = base + i; 7973 params->req_tx_queue[i] = base + i; 7974 } 7975 7976 /* PF uses indices 0 for itself; Set vport/RSS afterwards */ 7977 params->vport_id = vfid + 1; 7978 params->rss_eng_id = vfid + 1; 7979 7980 return; 7981 } 7982 7983 static int 7984 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params) 7985 { 7986 qlnx_host_t *ha; 7987 struct ecore_dev *cdev; 7988 struct ecore_iov_vf_init_params params; 7989 int ret, j, i; 7990 uint32_t max_vfs; 7991 7992 if ((ha = device_get_softc(dev)) == NULL) { 7993 device_printf(dev, "%s: cannot get softc\n", __func__); 7994 return (-1); 7995 } 7996 7997 if (qlnx_create_pf_taskqueues(ha) != 0) 7998 goto qlnx_iov_init_err0; 7999 8000 cdev = &ha->cdev; 8001 8002 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT); 8003 8004 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n", 8005 dev, num_vfs, max_vfs); 8006 8007 if (num_vfs >= max_vfs) { 8008 QL_DPRINT1(ha, "Can start at most %d VFs\n", 8009 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1)); 8010 goto qlnx_iov_init_err0; 8011 } 8012 8013 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF, 8014 M_NOWAIT); 8015 8016 if (ha->vf_attr == NULL) 8017 goto qlnx_iov_init_err0; 8018 8019 memset(¶ms, 0, sizeof(params)); 8020 8021 /* Initialize HW for VF access */ 8022 for_each_hwfn(cdev, j) { 8023 struct ecore_hwfn *hwfn = &cdev->hwfns[j]; 8024 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 8025 8026 /* Make sure not to use more than 16 queues per VF */ 8027 params.num_queues = min_t(int, 8028 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs), 8029 16); 8030 8031 if (!ptt) { 8032 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 8033 goto qlnx_iov_init_err1; 8034 } 8035 8036 for (i = 0; i < num_vfs; i++) { 8037 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true)) 8038 continue; 8039 8040 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms); 8041 8042 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms); 8043 8044 if (ret) { 8045 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i); 8046 ecore_ptt_release(hwfn, ptt); 8047 goto qlnx_iov_init_err1; 8048 } 8049 } 8050 8051 ecore_ptt_release(hwfn, ptt); 8052 } 8053 8054 ha->num_vfs = num_vfs; 8055 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha); 8056 8057 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs); 8058 8059 return (0); 8060 8061 qlnx_iov_init_err1: 8062 qlnx_sriov_disable(ha); 8063 8064 qlnx_iov_init_err0: 8065 qlnx_destroy_pf_taskqueues(ha); 8066 ha->num_vfs = 0; 8067 8068 return (-1); 8069 } 8070 8071 static void 8072 qlnx_iov_uninit(device_t dev) 8073 { 8074 qlnx_host_t *ha; 8075 8076 if ((ha = device_get_softc(dev)) == NULL) { 8077 device_printf(dev, "%s: cannot get softc\n", __func__); 8078 return; 8079 } 8080 8081 QL_DPRINT2(ha," dev = %p enter\n", dev); 8082 8083 qlnx_sriov_disable(ha); 8084 qlnx_destroy_pf_taskqueues(ha); 8085 8086 free(ha->vf_attr, M_QLNXBUF); 8087 ha->vf_attr = NULL; 8088 8089 ha->num_vfs = 0; 8090 8091 QL_DPRINT2(ha," dev = %p exit\n", dev); 8092 return; 8093 } 8094 8095 static int 8096 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 8097 { 8098 qlnx_host_t *ha; 8099 qlnx_vf_attr_t *vf_attr; 8100 unsigned const char *mac; 8101 size_t size; 8102 struct ecore_hwfn *p_hwfn; 8103 8104 if ((ha = device_get_softc(dev)) == NULL) { 8105 device_printf(dev, "%s: cannot get softc\n", __func__); 8106 return (-1); 8107 } 8108 8109 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum); 8110 8111 if (vfnum > (ha->num_vfs - 1)) { 8112 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n", 8113 vfnum, (ha->num_vfs - 1)); 8114 } 8115 8116 vf_attr = &ha->vf_attr[vfnum]; 8117 8118 if (nvlist_exists_binary(params, "mac-addr")) { 8119 mac = nvlist_get_binary(params, "mac-addr", &size); 8120 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN); 8121 device_printf(dev, 8122 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", 8123 __func__, vf_attr->mac_addr[0], 8124 vf_attr->mac_addr[1], vf_attr->mac_addr[2], 8125 vf_attr->mac_addr[3], vf_attr->mac_addr[4], 8126 vf_attr->mac_addr[5]); 8127 p_hwfn = &ha->cdev.hwfns[0]; 8128 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr, 8129 vfnum); 8130 } 8131 8132 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum); 8133 return (0); 8134 } 8135 8136 static void 8137 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8138 { 8139 uint64_t events[ECORE_VF_ARRAY_LENGTH]; 8140 struct ecore_ptt *ptt; 8141 int i; 8142 8143 ptt = ecore_ptt_acquire(p_hwfn); 8144 if (!ptt) { 8145 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8146 __qlnx_pf_vf_msg(p_hwfn, 0); 8147 return; 8148 } 8149 8150 ecore_iov_pf_get_pending_events(p_hwfn, events); 8151 8152 QL_DPRINT2(ha, "Event mask of VF events:" 8153 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n", 8154 events[0], events[1], events[2]); 8155 8156 ecore_for_each_vf(p_hwfn, i) { 8157 /* Skip VFs with no pending messages */ 8158 if (!(events[i / 64] & (1ULL << (i % 64)))) 8159 continue; 8160 8161 QL_DPRINT2(ha, 8162 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", 8163 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i); 8164 8165 /* Copy VF's message to PF's request buffer for that VF */ 8166 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i)) 8167 continue; 8168 8169 ecore_iov_process_mbx_req(p_hwfn, ptt, i); 8170 } 8171 8172 ecore_ptt_release(p_hwfn, ptt); 8173 8174 return; 8175 } 8176 8177 static void 8178 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8179 { 8180 struct ecore_ptt *ptt; 8181 int ret; 8182 8183 ptt = ecore_ptt_acquire(p_hwfn); 8184 8185 if (!ptt) { 8186 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8187 __qlnx_vf_flr_update(p_hwfn); 8188 return; 8189 } 8190 8191 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt); 8192 8193 if (ret) { 8194 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n"); 8195 } 8196 8197 ecore_ptt_release(p_hwfn, ptt); 8198 8199 return; 8200 } 8201 8202 static void 8203 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8204 { 8205 struct ecore_ptt *ptt; 8206 int i; 8207 8208 ptt = ecore_ptt_acquire(p_hwfn); 8209 8210 if (!ptt) { 8211 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8212 qlnx_vf_bulleting_update(p_hwfn); 8213 return; 8214 } 8215 8216 ecore_for_each_vf(p_hwfn, i) { 8217 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n", 8218 p_hwfn, i); 8219 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt); 8220 } 8221 8222 ecore_ptt_release(p_hwfn, ptt); 8223 8224 return; 8225 } 8226 8227 static void 8228 qlnx_pf_taskqueue(void *context, int pending) 8229 { 8230 struct ecore_hwfn *p_hwfn; 8231 qlnx_host_t *ha; 8232 int i; 8233 8234 p_hwfn = context; 8235 8236 if (p_hwfn == NULL) 8237 return; 8238 8239 ha = (qlnx_host_t *)(p_hwfn->p_dev); 8240 8241 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8242 return; 8243 8244 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8245 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG)) 8246 qlnx_handle_vf_msg(ha, p_hwfn); 8247 8248 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8249 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE)) 8250 qlnx_handle_vf_flr_update(ha, p_hwfn); 8251 8252 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8253 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE)) 8254 qlnx_handle_bulletin_update(ha, p_hwfn); 8255 8256 return; 8257 } 8258 8259 static int 8260 qlnx_create_pf_taskqueues(qlnx_host_t *ha) 8261 { 8262 int i; 8263 uint8_t tq_name[32]; 8264 8265 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8266 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 8267 8268 bzero(tq_name, sizeof (tq_name)); 8269 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i); 8270 8271 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn); 8272 8273 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 8274 taskqueue_thread_enqueue, 8275 &ha->sriov_task[i].pf_taskqueue); 8276 8277 if (ha->sriov_task[i].pf_taskqueue == NULL) 8278 return (-1); 8279 8280 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1, 8281 PI_NET, "%s", tq_name); 8282 8283 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue); 8284 } 8285 8286 return (0); 8287 } 8288 8289 static void 8290 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha) 8291 { 8292 int i; 8293 8294 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8295 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8296 taskqueue_drain(ha->sriov_task[i].pf_taskqueue, 8297 &ha->sriov_task[i].pf_task); 8298 taskqueue_free(ha->sriov_task[i].pf_taskqueue); 8299 ha->sriov_task[i].pf_taskqueue = NULL; 8300 } 8301 } 8302 return; 8303 } 8304 8305 static void 8306 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha) 8307 { 8308 struct ecore_mcp_link_capabilities caps; 8309 struct ecore_mcp_link_params params; 8310 struct ecore_mcp_link_state link; 8311 int i; 8312 8313 if (!p_hwfn->pf_iov_info) 8314 return; 8315 8316 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params)); 8317 memset(&link, 0, sizeof(struct ecore_mcp_link_state)); 8318 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities)); 8319 8320 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 8321 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link)); 8322 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params)); 8323 8324 QL_DPRINT2(ha, "called\n"); 8325 8326 /* Update bulletin of all future possible VFs with link configuration */ 8327 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) { 8328 /* Modify link according to the VF's configured link state */ 8329 8330 link.link_up = false; 8331 8332 if (ha->link_up) { 8333 link.link_up = true; 8334 /* Set speed according to maximum supported by HW. 8335 * that is 40G for regular devices and 100G for CMT 8336 * mode devices. 8337 */ 8338 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ? 8339 100000 : link.speed; 8340 } 8341 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up); 8342 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps); 8343 } 8344 8345 qlnx_vf_bulleting_update(p_hwfn); 8346 8347 return; 8348 } 8349 #endif /* #ifndef QLNX_VF */ 8350 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 8351