1 /* 2 * Copyright (c) 2017-2018 Cavium, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * File: qlnx_os.c 30 * Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131. 31 */ 32 33 #include <sys/cdefs.h> 34 __FBSDID("$FreeBSD$"); 35 36 #include "qlnx_os.h" 37 #include "bcm_osal.h" 38 #include "reg_addr.h" 39 #include "ecore_gtt_reg_addr.h" 40 #include "ecore.h" 41 #include "ecore_chain.h" 42 #include "ecore_status.h" 43 #include "ecore_hw.h" 44 #include "ecore_rt_defs.h" 45 #include "ecore_init_ops.h" 46 #include "ecore_int.h" 47 #include "ecore_cxt.h" 48 #include "ecore_spq.h" 49 #include "ecore_init_fw_funcs.h" 50 #include "ecore_sp_commands.h" 51 #include "ecore_dev_api.h" 52 #include "ecore_l2_api.h" 53 #include "ecore_mcp.h" 54 #include "ecore_hw_defs.h" 55 #include "mcp_public.h" 56 #include "ecore_iro.h" 57 #include "nvm_cfg.h" 58 #include "ecore_dev_api.h" 59 #include "ecore_dbg_fw_funcs.h" 60 #include "ecore_iov_api.h" 61 #include "ecore_vf_api.h" 62 63 #include "qlnx_ioctl.h" 64 #include "qlnx_def.h" 65 #include "qlnx_ver.h" 66 67 #ifdef QLNX_ENABLE_IWARP 68 #include "qlnx_rdma.h" 69 #endif /* #ifdef QLNX_ENABLE_IWARP */ 70 71 #ifdef CONFIG_ECORE_SRIOV 72 #include <sys/nv.h> 73 #include <sys/iov_schema.h> 74 #include <dev/pci/pci_iov.h> 75 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 76 77 #include <sys/smp.h> 78 79 /* 80 * static functions 81 */ 82 /* 83 * ioctl related functions 84 */ 85 static void qlnx_add_sysctls(qlnx_host_t *ha); 86 87 /* 88 * main driver 89 */ 90 static void qlnx_release(qlnx_host_t *ha); 91 static void qlnx_fp_isr(void *arg); 92 static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha); 93 static void qlnx_init(void *arg); 94 static void qlnx_init_locked(qlnx_host_t *ha); 95 static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi); 96 static int qlnx_set_promisc(qlnx_host_t *ha); 97 static int qlnx_set_allmulti(qlnx_host_t *ha); 98 static int qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data); 99 static int qlnx_media_change(if_t ifp); 100 static void qlnx_media_status(if_t ifp, struct ifmediareq *ifmr); 101 static void qlnx_stop(qlnx_host_t *ha); 102 static int qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, 103 struct mbuf **m_headp); 104 static int qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha); 105 static uint32_t qlnx_get_optics(qlnx_host_t *ha, 106 struct qlnx_link_output *if_link); 107 static int qlnx_transmit(if_t ifp, struct mbuf *mp); 108 static int qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, 109 struct mbuf *mp); 110 static void qlnx_qflush(if_t ifp); 111 112 static int qlnx_alloc_parent_dma_tag(qlnx_host_t *ha); 113 static void qlnx_free_parent_dma_tag(qlnx_host_t *ha); 114 static int qlnx_alloc_tx_dma_tag(qlnx_host_t *ha); 115 static void qlnx_free_tx_dma_tag(qlnx_host_t *ha); 116 static int qlnx_alloc_rx_dma_tag(qlnx_host_t *ha); 117 static void qlnx_free_rx_dma_tag(qlnx_host_t *ha); 118 119 static int qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver); 120 static int qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size); 121 122 static int qlnx_nic_setup(struct ecore_dev *cdev, 123 struct ecore_pf_params *func_params); 124 static int qlnx_nic_start(struct ecore_dev *cdev); 125 static int qlnx_slowpath_start(qlnx_host_t *ha); 126 static int qlnx_slowpath_stop(qlnx_host_t *ha); 127 static int qlnx_init_hw(qlnx_host_t *ha); 128 static void qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 129 char ver_str[VER_SIZE]); 130 static void qlnx_unload(qlnx_host_t *ha); 131 static int qlnx_load(qlnx_host_t *ha); 132 static void qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 133 uint32_t add_mac); 134 static void qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, 135 uint32_t len); 136 static int qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq); 137 static void qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq); 138 static void qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, 139 struct qlnx_rx_queue *rxq); 140 static int qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter); 141 static int qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, 142 int hwfn_index); 143 static int qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, 144 int hwfn_index); 145 static void qlnx_timer(void *arg); 146 static int qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 147 static void qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp); 148 static void qlnx_trigger_dump(qlnx_host_t *ha); 149 static uint16_t qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 150 struct qlnx_tx_queue *txq); 151 static void qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 152 struct qlnx_tx_queue *txq); 153 static int qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 154 int lro_enable); 155 static void qlnx_fp_taskqueue(void *context, int pending); 156 static void qlnx_sample_storm_stats(qlnx_host_t *ha); 157 static int qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 158 struct qlnx_agg_info *tpa); 159 static void qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa); 160 161 static uint64_t qlnx_get_counter(if_t ifp, ift_counter cnt); 162 163 /* 164 * Hooks to the Operating Systems 165 */ 166 static int qlnx_pci_probe (device_t); 167 static int qlnx_pci_attach (device_t); 168 static int qlnx_pci_detach (device_t); 169 170 #ifndef QLNX_VF 171 172 #ifdef CONFIG_ECORE_SRIOV 173 174 static int qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params); 175 static void qlnx_iov_uninit(device_t dev); 176 static int qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params); 177 static void qlnx_initialize_sriov(qlnx_host_t *ha); 178 static void qlnx_pf_taskqueue(void *context, int pending); 179 static int qlnx_create_pf_taskqueues(qlnx_host_t *ha); 180 static void qlnx_destroy_pf_taskqueues(qlnx_host_t *ha); 181 static void qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha); 182 183 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 184 185 static device_method_t qlnx_pci_methods[] = { 186 /* Device interface */ 187 DEVMETHOD(device_probe, qlnx_pci_probe), 188 DEVMETHOD(device_attach, qlnx_pci_attach), 189 DEVMETHOD(device_detach, qlnx_pci_detach), 190 191 #ifdef CONFIG_ECORE_SRIOV 192 DEVMETHOD(pci_iov_init, qlnx_iov_init), 193 DEVMETHOD(pci_iov_uninit, qlnx_iov_uninit), 194 DEVMETHOD(pci_iov_add_vf, qlnx_iov_add_vf), 195 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 196 { 0, 0 } 197 }; 198 199 static driver_t qlnx_pci_driver = { 200 "ql", qlnx_pci_methods, sizeof (qlnx_host_t), 201 }; 202 203 MODULE_VERSION(if_qlnxe,1); 204 DRIVER_MODULE(if_qlnxe, pci, qlnx_pci_driver, 0, 0); 205 206 MODULE_DEPEND(if_qlnxe, pci, 1, 1, 1); 207 MODULE_DEPEND(if_qlnxe, ether, 1, 1, 1); 208 209 #else 210 211 static device_method_t qlnxv_pci_methods[] = { 212 /* Device interface */ 213 DEVMETHOD(device_probe, qlnx_pci_probe), 214 DEVMETHOD(device_attach, qlnx_pci_attach), 215 DEVMETHOD(device_detach, qlnx_pci_detach), 216 { 0, 0 } 217 }; 218 219 static driver_t qlnxv_pci_driver = { 220 "ql", qlnxv_pci_methods, sizeof (qlnx_host_t), 221 }; 222 223 MODULE_VERSION(if_qlnxev,1); 224 DRIVER_MODULE(if_qlnxev, pci, qlnxv_pci_driver, 0, 0); 225 226 MODULE_DEPEND(if_qlnxev, pci, 1, 1, 1); 227 MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1); 228 229 #endif /* #ifdef QLNX_VF */ 230 231 MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver"); 232 233 char qlnx_dev_str[128]; 234 char qlnx_ver_str[VER_SIZE]; 235 char qlnx_name_str[NAME_SIZE]; 236 237 /* 238 * Some PCI Configuration Space Related Defines 239 */ 240 241 #ifndef PCI_VENDOR_QLOGIC 242 #define PCI_VENDOR_QLOGIC 0x1077 243 #endif 244 245 /* 40G Adapter QLE45xxx*/ 246 #ifndef QLOGIC_PCI_DEVICE_ID_1634 247 #define QLOGIC_PCI_DEVICE_ID_1634 0x1634 248 #endif 249 250 /* 100G Adapter QLE45xxx*/ 251 #ifndef QLOGIC_PCI_DEVICE_ID_1644 252 #define QLOGIC_PCI_DEVICE_ID_1644 0x1644 253 #endif 254 255 /* 25G Adapter QLE45xxx*/ 256 #ifndef QLOGIC_PCI_DEVICE_ID_1656 257 #define QLOGIC_PCI_DEVICE_ID_1656 0x1656 258 #endif 259 260 /* 50G Adapter QLE45xxx*/ 261 #ifndef QLOGIC_PCI_DEVICE_ID_1654 262 #define QLOGIC_PCI_DEVICE_ID_1654 0x1654 263 #endif 264 265 /* 10G/25G/40G Adapter QLE41xxx*/ 266 #ifndef QLOGIC_PCI_DEVICE_ID_8070 267 #define QLOGIC_PCI_DEVICE_ID_8070 0x8070 268 #endif 269 270 /* SRIOV Device (All Speeds) Adapter QLE41xxx*/ 271 #ifndef QLOGIC_PCI_DEVICE_ID_8090 272 #define QLOGIC_PCI_DEVICE_ID_8090 0x8090 273 #endif 274 275 SYSCTL_NODE(_hw, OID_AUTO, qlnxe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 276 "qlnxe driver parameters"); 277 278 /* Number of Queues: 0 (Auto) or 1 to 32 (fixed queue number) */ 279 static int qlnxe_queue_count = QLNX_DEFAULT_RSS; 280 281 SYSCTL_INT(_hw_qlnxe, OID_AUTO, queue_count, CTLFLAG_RDTUN, 282 &qlnxe_queue_count, 0, "Multi-Queue queue count"); 283 284 /* 285 * Note on RDMA personality setting 286 * 287 * Read the personality configured in NVRAM 288 * If the personality is ETH_ONLY, ETH_IWARP or ETH_ROCE and 289 * the configured personality in sysctl is QLNX_PERSONALITY_DEFAULT 290 * use the personality in NVRAM. 291 292 * Otherwise use t the personality configured in sysctl. 293 * 294 */ 295 #define QLNX_PERSONALITY_DEFAULT 0x0 /* use personality in NVRAM */ 296 #define QLNX_PERSONALITY_ETH_ONLY 0x1 /* Override with ETH_ONLY */ 297 #define QLNX_PERSONALITY_ETH_IWARP 0x2 /* Override with ETH_IWARP */ 298 #define QLNX_PERSONALITY_ETH_ROCE 0x3 /* Override with ETH_ROCE */ 299 #define QLNX_PERSONALITY_BITS_PER_FUNC 4 300 #define QLNX_PERSONALIY_MASK 0xF 301 302 /* RDMA configuration; 64bit field allows setting for 16 physical functions*/ 303 static uint64_t qlnxe_rdma_configuration = 0x22222222; 304 305 SYSCTL_U64(_hw_qlnxe, OID_AUTO, rdma_configuration, CTLFLAG_RDTUN, 306 &qlnxe_rdma_configuration, 0, "RDMA Configuration"); 307 308 int 309 qlnx_vf_device(qlnx_host_t *ha) 310 { 311 uint16_t device_id; 312 313 device_id = ha->device_id; 314 315 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 316 return 0; 317 318 return -1; 319 } 320 321 static int 322 qlnx_valid_device(qlnx_host_t *ha) 323 { 324 uint16_t device_id; 325 326 device_id = ha->device_id; 327 328 #ifndef QLNX_VF 329 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 330 (device_id == QLOGIC_PCI_DEVICE_ID_1644) || 331 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 332 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 333 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 334 return 0; 335 #else 336 if (device_id == QLOGIC_PCI_DEVICE_ID_8090) 337 return 0; 338 339 #endif /* #ifndef QLNX_VF */ 340 return -1; 341 } 342 343 #ifdef QLNX_ENABLE_IWARP 344 static int 345 qlnx_rdma_supported(struct qlnx_host *ha) 346 { 347 uint16_t device_id; 348 349 device_id = pci_get_device(ha->pci_dev); 350 351 if ((device_id == QLOGIC_PCI_DEVICE_ID_1634) || 352 (device_id == QLOGIC_PCI_DEVICE_ID_1656) || 353 (device_id == QLOGIC_PCI_DEVICE_ID_1654) || 354 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 355 return (0); 356 357 return (-1); 358 } 359 #endif /* #ifdef QLNX_ENABLE_IWARP */ 360 361 /* 362 * Name: qlnx_pci_probe 363 * Function: Validate the PCI device to be a QLA80XX device 364 */ 365 static int 366 qlnx_pci_probe(device_t dev) 367 { 368 snprintf(qlnx_ver_str, sizeof(qlnx_ver_str), "v%d.%d.%d", 369 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, QLNX_VERSION_BUILD); 370 snprintf(qlnx_name_str, sizeof(qlnx_name_str), "qlnx"); 371 372 if (pci_get_vendor(dev) != PCI_VENDOR_QLOGIC) { 373 return (ENXIO); 374 } 375 376 switch (pci_get_device(dev)) { 377 #ifndef QLNX_VF 378 379 case QLOGIC_PCI_DEVICE_ID_1644: 380 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 381 "Qlogic 100GbE PCI CNA Adapter-Ethernet Function", 382 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 383 QLNX_VERSION_BUILD); 384 device_set_desc_copy(dev, qlnx_dev_str); 385 386 break; 387 388 case QLOGIC_PCI_DEVICE_ID_1634: 389 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 390 "Qlogic 40GbE PCI CNA Adapter-Ethernet Function", 391 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 392 QLNX_VERSION_BUILD); 393 device_set_desc_copy(dev, qlnx_dev_str); 394 395 break; 396 397 case QLOGIC_PCI_DEVICE_ID_1656: 398 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 399 "Qlogic 25GbE PCI CNA Adapter-Ethernet Function", 400 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 401 QLNX_VERSION_BUILD); 402 device_set_desc_copy(dev, qlnx_dev_str); 403 404 break; 405 406 case QLOGIC_PCI_DEVICE_ID_1654: 407 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 408 "Qlogic 50GbE PCI CNA Adapter-Ethernet Function", 409 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 410 QLNX_VERSION_BUILD); 411 device_set_desc_copy(dev, qlnx_dev_str); 412 413 break; 414 415 case QLOGIC_PCI_DEVICE_ID_8070: 416 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 417 "Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)" 418 " Adapter-Ethernet Function", 419 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 420 QLNX_VERSION_BUILD); 421 device_set_desc_copy(dev, qlnx_dev_str); 422 423 break; 424 425 #else 426 case QLOGIC_PCI_DEVICE_ID_8090: 427 snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d", 428 "Qlogic SRIOV PCI CNA (AH) " 429 "Adapter-Ethernet Function", 430 QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR, 431 QLNX_VERSION_BUILD); 432 device_set_desc_copy(dev, qlnx_dev_str); 433 434 break; 435 436 #endif /* #ifndef QLNX_VF */ 437 438 default: 439 return (ENXIO); 440 } 441 442 #ifdef QLNX_ENABLE_IWARP 443 qlnx_rdma_init(); 444 #endif /* #ifdef QLNX_ENABLE_IWARP */ 445 446 return (BUS_PROBE_DEFAULT); 447 } 448 449 static uint16_t 450 qlnx_num_tx_compl(qlnx_host_t *ha, struct qlnx_fastpath *fp, 451 struct qlnx_tx_queue *txq) 452 { 453 u16 hw_bd_cons; 454 u16 ecore_cons_idx; 455 uint16_t diff; 456 457 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 458 459 ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl); 460 if (hw_bd_cons < ecore_cons_idx) { 461 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 462 } else { 463 diff = hw_bd_cons - ecore_cons_idx; 464 } 465 return diff; 466 } 467 468 static void 469 qlnx_sp_intr(void *arg) 470 { 471 struct ecore_hwfn *p_hwfn; 472 qlnx_host_t *ha; 473 int i; 474 475 p_hwfn = arg; 476 477 if (p_hwfn == NULL) { 478 printf("%s: spurious slowpath intr\n", __func__); 479 return; 480 } 481 482 ha = (qlnx_host_t *)p_hwfn->p_dev; 483 484 QL_DPRINT2(ha, "enter\n"); 485 486 for (i = 0; i < ha->cdev.num_hwfns; i++) { 487 if (&ha->cdev.hwfns[i] == p_hwfn) { 488 taskqueue_enqueue(ha->sp_taskqueue[i], &ha->sp_task[i]); 489 break; 490 } 491 } 492 QL_DPRINT2(ha, "exit\n"); 493 494 return; 495 } 496 497 static void 498 qlnx_sp_taskqueue(void *context, int pending) 499 { 500 struct ecore_hwfn *p_hwfn; 501 502 p_hwfn = context; 503 504 if (p_hwfn != NULL) { 505 qlnx_sp_isr(p_hwfn); 506 } 507 return; 508 } 509 510 static int 511 qlnx_create_sp_taskqueues(qlnx_host_t *ha) 512 { 513 int i; 514 uint8_t tq_name[32]; 515 516 for (i = 0; i < ha->cdev.num_hwfns; i++) { 517 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 518 519 bzero(tq_name, sizeof (tq_name)); 520 snprintf(tq_name, sizeof (tq_name), "ql_sp_tq_%d", i); 521 522 TASK_INIT(&ha->sp_task[i], 0, qlnx_sp_taskqueue, p_hwfn); 523 524 ha->sp_taskqueue[i] = taskqueue_create(tq_name, M_NOWAIT, 525 taskqueue_thread_enqueue, &ha->sp_taskqueue[i]); 526 527 if (ha->sp_taskqueue[i] == NULL) 528 return (-1); 529 530 taskqueue_start_threads(&ha->sp_taskqueue[i], 1, PI_NET, "%s", 531 tq_name); 532 533 QL_DPRINT1(ha, "%p\n", ha->sp_taskqueue[i]); 534 } 535 536 return (0); 537 } 538 539 static void 540 qlnx_destroy_sp_taskqueues(qlnx_host_t *ha) 541 { 542 int i; 543 544 for (i = 0; i < ha->cdev.num_hwfns; i++) { 545 if (ha->sp_taskqueue[i] != NULL) { 546 taskqueue_drain(ha->sp_taskqueue[i], &ha->sp_task[i]); 547 taskqueue_free(ha->sp_taskqueue[i]); 548 } 549 } 550 return; 551 } 552 553 static void 554 qlnx_fp_taskqueue(void *context, int pending) 555 { 556 struct qlnx_fastpath *fp; 557 qlnx_host_t *ha; 558 if_t ifp; 559 560 fp = context; 561 562 if (fp == NULL) 563 return; 564 565 ha = (qlnx_host_t *)fp->edev; 566 567 ifp = ha->ifp; 568 569 if(if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 570 if (!drbr_empty(ifp, fp->tx_br)) { 571 if(mtx_trylock(&fp->tx_mtx)) { 572 #ifdef QLNX_TRACE_PERF_DATA 573 tx_pkts = fp->tx_pkts_transmitted; 574 tx_compl = fp->tx_pkts_completed; 575 #endif 576 577 qlnx_transmit_locked(ifp, fp, NULL); 578 579 #ifdef QLNX_TRACE_PERF_DATA 580 fp->tx_pkts_trans_fp += 581 (fp->tx_pkts_transmitted - tx_pkts); 582 fp->tx_pkts_compl_fp += 583 (fp->tx_pkts_completed - tx_compl); 584 #endif 585 mtx_unlock(&fp->tx_mtx); 586 } 587 } 588 } 589 590 QL_DPRINT2(ha, "exit \n"); 591 return; 592 } 593 594 static int 595 qlnx_create_fp_taskqueues(qlnx_host_t *ha) 596 { 597 int i; 598 uint8_t tq_name[32]; 599 struct qlnx_fastpath *fp; 600 601 for (i = 0; i < ha->num_rss; i++) { 602 fp = &ha->fp_array[i]; 603 604 bzero(tq_name, sizeof (tq_name)); 605 snprintf(tq_name, sizeof (tq_name), "ql_fp_tq_%d", i); 606 607 TASK_INIT(&fp->fp_task, 0, qlnx_fp_taskqueue, fp); 608 609 fp->fp_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 610 taskqueue_thread_enqueue, 611 &fp->fp_taskqueue); 612 613 if (fp->fp_taskqueue == NULL) 614 return (-1); 615 616 taskqueue_start_threads(&fp->fp_taskqueue, 1, PI_NET, "%s", 617 tq_name); 618 619 QL_DPRINT1(ha, "%p\n",fp->fp_taskqueue); 620 } 621 622 return (0); 623 } 624 625 static void 626 qlnx_destroy_fp_taskqueues(qlnx_host_t *ha) 627 { 628 int i; 629 struct qlnx_fastpath *fp; 630 631 for (i = 0; i < ha->num_rss; i++) { 632 fp = &ha->fp_array[i]; 633 634 if (fp->fp_taskqueue != NULL) { 635 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 636 taskqueue_free(fp->fp_taskqueue); 637 fp->fp_taskqueue = NULL; 638 } 639 } 640 return; 641 } 642 643 static void 644 qlnx_drain_fp_taskqueues(qlnx_host_t *ha) 645 { 646 int i; 647 struct qlnx_fastpath *fp; 648 649 for (i = 0; i < ha->num_rss; i++) { 650 fp = &ha->fp_array[i]; 651 652 if (fp->fp_taskqueue != NULL) { 653 QLNX_UNLOCK(ha); 654 taskqueue_drain(fp->fp_taskqueue, &fp->fp_task); 655 QLNX_LOCK(ha); 656 } 657 } 658 return; 659 } 660 661 static void 662 qlnx_get_params(qlnx_host_t *ha) 663 { 664 if ((qlnxe_queue_count < 0) || (qlnxe_queue_count > QLNX_MAX_RSS)) { 665 device_printf(ha->pci_dev, "invalid queue_count value (%d)\n", 666 qlnxe_queue_count); 667 qlnxe_queue_count = 0; 668 } 669 return; 670 } 671 672 static void 673 qlnx_error_recovery_taskqueue(void *context, int pending) 674 { 675 qlnx_host_t *ha; 676 677 ha = context; 678 679 QL_DPRINT2(ha, "enter\n"); 680 681 QLNX_LOCK(ha); 682 qlnx_stop(ha); 683 QLNX_UNLOCK(ha); 684 685 #ifdef QLNX_ENABLE_IWARP 686 qlnx_rdma_dev_remove(ha); 687 #endif /* #ifdef QLNX_ENABLE_IWARP */ 688 689 qlnx_slowpath_stop(ha); 690 qlnx_slowpath_start(ha); 691 692 #ifdef QLNX_ENABLE_IWARP 693 qlnx_rdma_dev_add(ha); 694 #endif /* #ifdef QLNX_ENABLE_IWARP */ 695 696 qlnx_init(ha); 697 698 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 699 700 QL_DPRINT2(ha, "exit\n"); 701 702 return; 703 } 704 705 static int 706 qlnx_create_error_recovery_taskqueue(qlnx_host_t *ha) 707 { 708 uint8_t tq_name[32]; 709 710 bzero(tq_name, sizeof (tq_name)); 711 snprintf(tq_name, sizeof (tq_name), "ql_err_tq"); 712 713 TASK_INIT(&ha->err_task, 0, qlnx_error_recovery_taskqueue, ha); 714 715 ha->err_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 716 taskqueue_thread_enqueue, &ha->err_taskqueue); 717 718 if (ha->err_taskqueue == NULL) 719 return (-1); 720 721 taskqueue_start_threads(&ha->err_taskqueue, 1, PI_NET, "%s", tq_name); 722 723 QL_DPRINT1(ha, "%p\n",ha->err_taskqueue); 724 725 return (0); 726 } 727 728 static void 729 qlnx_destroy_error_recovery_taskqueue(qlnx_host_t *ha) 730 { 731 if (ha->err_taskqueue != NULL) { 732 taskqueue_drain(ha->err_taskqueue, &ha->err_task); 733 taskqueue_free(ha->err_taskqueue); 734 } 735 736 ha->err_taskqueue = NULL; 737 738 return; 739 } 740 741 /* 742 * Name: qlnx_pci_attach 743 * Function: attaches the device to the operating system 744 */ 745 static int 746 qlnx_pci_attach(device_t dev) 747 { 748 qlnx_host_t *ha = NULL; 749 uint32_t rsrc_len_reg __unused = 0; 750 uint32_t rsrc_len_dbells = 0; 751 uint32_t rsrc_len_msix __unused = 0; 752 int i; 753 uint32_t mfw_ver; 754 uint32_t num_sp_msix = 0; 755 uint32_t num_rdma_irqs = 0; 756 757 if ((ha = device_get_softc(dev)) == NULL) { 758 device_printf(dev, "cannot get softc\n"); 759 return (ENOMEM); 760 } 761 762 memset(ha, 0, sizeof (qlnx_host_t)); 763 764 ha->device_id = pci_get_device(dev); 765 766 if (qlnx_valid_device(ha) != 0) { 767 device_printf(dev, "device is not valid device\n"); 768 return (ENXIO); 769 } 770 ha->pci_func = pci_get_function(dev); 771 772 ha->pci_dev = dev; 773 774 mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF); 775 776 ha->flags.lock_init = 1; 777 778 pci_enable_busmaster(dev); 779 780 /* 781 * map the PCI BARs 782 */ 783 784 ha->reg_rid = PCIR_BAR(0); 785 ha->pci_reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ha->reg_rid, 786 RF_ACTIVE); 787 788 if (ha->pci_reg == NULL) { 789 device_printf(dev, "unable to map BAR0\n"); 790 goto qlnx_pci_attach_err; 791 } 792 793 rsrc_len_reg = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 794 ha->reg_rid); 795 796 ha->dbells_rid = PCIR_BAR(2); 797 rsrc_len_dbells = (uint32_t) bus_get_resource_count(dev, 798 SYS_RES_MEMORY, 799 ha->dbells_rid); 800 if (rsrc_len_dbells) { 801 ha->pci_dbells = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 802 &ha->dbells_rid, RF_ACTIVE); 803 804 if (ha->pci_dbells == NULL) { 805 device_printf(dev, "unable to map BAR1\n"); 806 goto qlnx_pci_attach_err; 807 } 808 ha->dbells_phys_addr = (uint64_t) 809 bus_get_resource_start(dev, SYS_RES_MEMORY, ha->dbells_rid); 810 811 ha->dbells_size = rsrc_len_dbells; 812 } else { 813 if (qlnx_vf_device(ha) != 0) { 814 device_printf(dev, " BAR1 size is zero\n"); 815 goto qlnx_pci_attach_err; 816 } 817 } 818 819 ha->msix_rid = PCIR_BAR(4); 820 ha->msix_bar = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 821 &ha->msix_rid, RF_ACTIVE); 822 823 if (ha->msix_bar == NULL) { 824 device_printf(dev, "unable to map BAR2\n"); 825 goto qlnx_pci_attach_err; 826 } 827 828 rsrc_len_msix = (uint32_t) bus_get_resource_count(dev, SYS_RES_MEMORY, 829 ha->msix_rid); 830 831 ha->dbg_level = 0x0000; 832 833 QL_DPRINT1(ha, "\n\t\t\t" 834 "pci_dev = %p pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 835 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 836 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 837 " msix_avail = 0x%x " 838 "\n\t\t\t[ncpus = %d]\n", 839 ha->pci_dev, ha->pci_reg, rsrc_len_reg, 840 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 841 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 842 mp_ncpus); 843 /* 844 * allocate dma tags 845 */ 846 847 if (qlnx_alloc_parent_dma_tag(ha)) 848 goto qlnx_pci_attach_err; 849 850 if (qlnx_alloc_tx_dma_tag(ha)) 851 goto qlnx_pci_attach_err; 852 853 if (qlnx_alloc_rx_dma_tag(ha)) 854 goto qlnx_pci_attach_err; 855 856 857 if (qlnx_init_hw(ha) != 0) 858 goto qlnx_pci_attach_err; 859 860 ha->flags.hw_init = 1; 861 862 qlnx_get_params(ha); 863 864 if((pci_get_device(dev) == QLOGIC_PCI_DEVICE_ID_1644) && 865 (qlnxe_queue_count == QLNX_DEFAULT_RSS)) { 866 qlnxe_queue_count = QLNX_MAX_RSS; 867 } 868 869 /* 870 * Allocate MSI-x vectors 871 */ 872 if (qlnx_vf_device(ha) != 0) { 873 if (qlnxe_queue_count == 0) 874 ha->num_rss = QLNX_DEFAULT_RSS; 875 else 876 ha->num_rss = qlnxe_queue_count; 877 878 num_sp_msix = ha->cdev.num_hwfns; 879 } else { 880 uint8_t max_rxq; 881 uint8_t max_txq; 882 883 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_rxq); 884 ecore_vf_get_num_rxqs(&ha->cdev.hwfns[0], &max_txq); 885 886 if (max_rxq < max_txq) 887 ha->num_rss = max_rxq; 888 else 889 ha->num_rss = max_txq; 890 891 if (ha->num_rss > QLNX_MAX_VF_RSS) 892 ha->num_rss = QLNX_MAX_VF_RSS; 893 894 num_sp_msix = 0; 895 } 896 897 if (ha->num_rss > mp_ncpus) 898 ha->num_rss = mp_ncpus; 899 900 ha->num_tc = QLNX_MAX_TC; 901 902 ha->msix_count = pci_msix_count(dev); 903 904 #ifdef QLNX_ENABLE_IWARP 905 906 num_rdma_irqs = qlnx_rdma_get_num_irqs(ha); 907 908 #endif /* #ifdef QLNX_ENABLE_IWARP */ 909 910 if (!ha->msix_count || 911 (ha->msix_count < (num_sp_msix + 1 + num_rdma_irqs))) { 912 device_printf(dev, "%s: msix_count[%d] not enough\n", __func__, 913 ha->msix_count); 914 goto qlnx_pci_attach_err; 915 } 916 917 if (ha->msix_count > (ha->num_rss + num_sp_msix + num_rdma_irqs)) 918 ha->msix_count = ha->num_rss + num_sp_msix + num_rdma_irqs; 919 else 920 ha->num_rss = ha->msix_count - (num_sp_msix + num_rdma_irqs); 921 922 QL_DPRINT1(ha, "\n\t\t\t" 923 "pci_reg = %p, reg_len = 0x%08x reg_rid = 0x%08x" 924 "\n\t\t\tdbells = %p, dbells_len = 0x%08x dbells_rid = 0x%08x" 925 "\n\t\t\tmsix = %p, msix_len = 0x%08x msix_rid = 0x%08x" 926 " msix_avail = 0x%x msix_alloc = 0x%x" 927 "\n\t\t\t[ncpus = %d][num_rss = 0x%x] [num_tc = 0x%x]\n", 928 ha->pci_reg, rsrc_len_reg, 929 ha->reg_rid, ha->pci_dbells, rsrc_len_dbells, ha->dbells_rid, 930 ha->msix_bar, rsrc_len_msix, ha->msix_rid, pci_msix_count(dev), 931 ha->msix_count, mp_ncpus, ha->num_rss, ha->num_tc); 932 933 if (pci_alloc_msix(dev, &ha->msix_count)) { 934 device_printf(dev, "%s: pci_alloc_msix[%d] failed\n", __func__, 935 ha->msix_count); 936 ha->msix_count = 0; 937 goto qlnx_pci_attach_err; 938 } 939 940 /* 941 * Initialize slow path interrupt and task queue 942 */ 943 944 if (num_sp_msix) { 945 if (qlnx_create_sp_taskqueues(ha) != 0) 946 goto qlnx_pci_attach_err; 947 948 for (i = 0; i < ha->cdev.num_hwfns; i++) { 949 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 950 951 ha->sp_irq_rid[i] = i + 1; 952 ha->sp_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, 953 &ha->sp_irq_rid[i], 954 (RF_ACTIVE | RF_SHAREABLE)); 955 if (ha->sp_irq[i] == NULL) { 956 device_printf(dev, 957 "could not allocate mbx interrupt\n"); 958 goto qlnx_pci_attach_err; 959 } 960 961 if (bus_setup_intr(dev, ha->sp_irq[i], 962 (INTR_TYPE_NET | INTR_MPSAFE), NULL, 963 qlnx_sp_intr, p_hwfn, &ha->sp_handle[i])) { 964 device_printf(dev, 965 "could not setup slow path interrupt\n"); 966 goto qlnx_pci_attach_err; 967 } 968 969 QL_DPRINT1(ha, "p_hwfn [%p] sp_irq_rid %d" 970 " sp_irq %p sp_handle %p\n", p_hwfn, 971 ha->sp_irq_rid[i], ha->sp_irq[i], ha->sp_handle[i]); 972 } 973 } 974 975 /* 976 * initialize fast path interrupt 977 */ 978 if (qlnx_create_fp_taskqueues(ha) != 0) 979 goto qlnx_pci_attach_err; 980 981 for (i = 0; i < ha->num_rss; i++) { 982 ha->irq_vec[i].rss_idx = i; 983 ha->irq_vec[i].ha = ha; 984 ha->irq_vec[i].irq_rid = (1 + num_sp_msix) + i; 985 986 ha->irq_vec[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 987 &ha->irq_vec[i].irq_rid, 988 (RF_ACTIVE | RF_SHAREABLE)); 989 990 if (ha->irq_vec[i].irq == NULL) { 991 device_printf(dev, 992 "could not allocate interrupt[%d] irq_rid = %d\n", 993 i, ha->irq_vec[i].irq_rid); 994 goto qlnx_pci_attach_err; 995 } 996 997 if (qlnx_alloc_tx_br(ha, &ha->fp_array[i])) { 998 device_printf(dev, "could not allocate tx_br[%d]\n", i); 999 goto qlnx_pci_attach_err; 1000 } 1001 } 1002 1003 if (qlnx_vf_device(ha) != 0) { 1004 callout_init(&ha->qlnx_callout, 1); 1005 ha->flags.callout_init = 1; 1006 1007 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1008 if (qlnx_grc_dumpsize(ha, &ha->grcdump_size[i], i) != 0) 1009 goto qlnx_pci_attach_err; 1010 if (ha->grcdump_size[i] == 0) 1011 goto qlnx_pci_attach_err; 1012 1013 ha->grcdump_size[i] = ha->grcdump_size[i] << 2; 1014 QL_DPRINT1(ha, "grcdump_size[%d] = 0x%08x\n", 1015 i, ha->grcdump_size[i]); 1016 1017 ha->grcdump[i] = qlnx_zalloc(ha->grcdump_size[i]); 1018 if (ha->grcdump[i] == NULL) { 1019 device_printf(dev, "grcdump alloc[%d] failed\n", i); 1020 goto qlnx_pci_attach_err; 1021 } 1022 1023 if (qlnx_idle_chk_size(ha, &ha->idle_chk_size[i], i) != 0) 1024 goto qlnx_pci_attach_err; 1025 if (ha->idle_chk_size[i] == 0) 1026 goto qlnx_pci_attach_err; 1027 1028 ha->idle_chk_size[i] = ha->idle_chk_size[i] << 2; 1029 QL_DPRINT1(ha, "idle_chk_size[%d] = 0x%08x\n", 1030 i, ha->idle_chk_size[i]); 1031 1032 ha->idle_chk[i] = qlnx_zalloc(ha->idle_chk_size[i]); 1033 1034 if (ha->idle_chk[i] == NULL) { 1035 device_printf(dev, "idle_chk alloc failed\n"); 1036 goto qlnx_pci_attach_err; 1037 } 1038 } 1039 1040 if (qlnx_create_error_recovery_taskqueue(ha) != 0) 1041 goto qlnx_pci_attach_err; 1042 } 1043 1044 if (qlnx_slowpath_start(ha) != 0) 1045 goto qlnx_pci_attach_err; 1046 else 1047 ha->flags.slowpath_start = 1; 1048 1049 if (qlnx_vf_device(ha) != 0) { 1050 if (qlnx_get_flash_size(ha, &ha->flash_size) != 0) { 1051 qlnx_mdelay(__func__, 1000); 1052 qlnx_trigger_dump(ha); 1053 1054 goto qlnx_pci_attach_err0; 1055 } 1056 1057 if (qlnx_get_mfw_version(ha, &mfw_ver) != 0) { 1058 qlnx_mdelay(__func__, 1000); 1059 qlnx_trigger_dump(ha); 1060 1061 goto qlnx_pci_attach_err0; 1062 } 1063 } else { 1064 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 1065 ecore_mcp_get_mfw_ver(p_hwfn, NULL, &mfw_ver, NULL); 1066 } 1067 1068 snprintf(ha->mfw_ver, sizeof(ha->mfw_ver), "%d.%d.%d.%d", 1069 ((mfw_ver >> 24) & 0xFF), ((mfw_ver >> 16) & 0xFF), 1070 ((mfw_ver >> 8) & 0xFF), (mfw_ver & 0xFF)); 1071 snprintf(ha->stormfw_ver, sizeof(ha->stormfw_ver), "%d.%d.%d.%d", 1072 FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION, 1073 FW_ENGINEERING_VERSION); 1074 1075 QL_DPRINT1(ha, "STORM_FW version %s MFW version %s\n", 1076 ha->stormfw_ver, ha->mfw_ver); 1077 1078 qlnx_init_ifnet(dev, ha); 1079 1080 /* 1081 * add sysctls 1082 */ 1083 qlnx_add_sysctls(ha); 1084 1085 qlnx_pci_attach_err0: 1086 /* 1087 * create ioctl device interface 1088 */ 1089 if (qlnx_vf_device(ha) != 0) { 1090 if (qlnx_make_cdev(ha)) { 1091 device_printf(dev, "%s: ql_make_cdev failed\n", __func__); 1092 goto qlnx_pci_attach_err; 1093 } 1094 1095 #ifdef QLNX_ENABLE_IWARP 1096 qlnx_rdma_dev_add(ha); 1097 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1098 } 1099 1100 #ifndef QLNX_VF 1101 #ifdef CONFIG_ECORE_SRIOV 1102 1103 if (qlnx_vf_device(ha) != 0) 1104 qlnx_initialize_sriov(ha); 1105 1106 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1107 #endif /* #ifdef QLNX_VF */ 1108 1109 QL_DPRINT2(ha, "success\n"); 1110 1111 return (0); 1112 1113 qlnx_pci_attach_err: 1114 1115 qlnx_release(ha); 1116 1117 return (ENXIO); 1118 } 1119 1120 /* 1121 * Name: qlnx_pci_detach 1122 * Function: Unhooks the device from the operating system 1123 */ 1124 static int 1125 qlnx_pci_detach(device_t dev) 1126 { 1127 qlnx_host_t *ha = NULL; 1128 1129 if ((ha = device_get_softc(dev)) == NULL) { 1130 device_printf(dev, "%s: cannot get softc\n", __func__); 1131 return (ENOMEM); 1132 } 1133 1134 if (qlnx_vf_device(ha) != 0) { 1135 #ifdef CONFIG_ECORE_SRIOV 1136 int ret; 1137 1138 ret = pci_iov_detach(dev); 1139 if (ret) { 1140 device_printf(dev, "%s: SRIOV in use\n", __func__); 1141 return (ret); 1142 } 1143 1144 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 1145 1146 #ifdef QLNX_ENABLE_IWARP 1147 if (qlnx_rdma_dev_remove(ha) != 0) 1148 return (EBUSY); 1149 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1150 } 1151 1152 QLNX_LOCK(ha); 1153 qlnx_stop(ha); 1154 QLNX_UNLOCK(ha); 1155 1156 qlnx_release(ha); 1157 1158 return (0); 1159 } 1160 1161 #ifdef QLNX_ENABLE_IWARP 1162 1163 static uint8_t 1164 qlnx_get_personality(uint8_t pci_func) 1165 { 1166 uint8_t personality; 1167 1168 personality = (qlnxe_rdma_configuration >> 1169 (pci_func * QLNX_PERSONALITY_BITS_PER_FUNC)) & 1170 QLNX_PERSONALIY_MASK; 1171 return (personality); 1172 } 1173 1174 static void 1175 qlnx_set_personality(qlnx_host_t *ha) 1176 { 1177 uint8_t personality; 1178 1179 personality = qlnx_get_personality(ha->pci_func); 1180 1181 switch (personality) { 1182 case QLNX_PERSONALITY_DEFAULT: 1183 device_printf(ha->pci_dev, "%s: DEFAULT\n", 1184 __func__); 1185 ha->personality = ECORE_PCI_DEFAULT; 1186 break; 1187 1188 case QLNX_PERSONALITY_ETH_ONLY: 1189 device_printf(ha->pci_dev, "%s: ETH_ONLY\n", 1190 __func__); 1191 ha->personality = ECORE_PCI_ETH; 1192 break; 1193 1194 case QLNX_PERSONALITY_ETH_IWARP: 1195 device_printf(ha->pci_dev, "%s: ETH_IWARP\n", 1196 __func__); 1197 ha->personality = ECORE_PCI_ETH_IWARP; 1198 break; 1199 1200 case QLNX_PERSONALITY_ETH_ROCE: 1201 device_printf(ha->pci_dev, "%s: ETH_ROCE\n", 1202 __func__); 1203 ha->personality = ECORE_PCI_ETH_ROCE; 1204 break; 1205 } 1206 1207 return; 1208 } 1209 1210 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1211 1212 static int 1213 qlnx_init_hw(qlnx_host_t *ha) 1214 { 1215 int rval = 0; 1216 struct ecore_hw_prepare_params params; 1217 1218 ecore_init_struct(&ha->cdev); 1219 1220 /* ha->dp_module = ECORE_MSG_PROBE | 1221 ECORE_MSG_INTR | 1222 ECORE_MSG_SP | 1223 ECORE_MSG_LINK | 1224 ECORE_MSG_SPQ | 1225 ECORE_MSG_RDMA; 1226 ha->dp_level = ECORE_LEVEL_VERBOSE;*/ 1227 //ha->dp_module = ECORE_MSG_RDMA | ECORE_MSG_INTR | ECORE_MSG_LL2; 1228 ha->dp_level = ECORE_LEVEL_NOTICE; 1229 //ha->dp_level = ECORE_LEVEL_VERBOSE; 1230 1231 ecore_init_dp(&ha->cdev, ha->dp_module, ha->dp_level, ha->pci_dev); 1232 1233 ha->cdev.regview = ha->pci_reg; 1234 1235 ha->personality = ECORE_PCI_DEFAULT; 1236 1237 if (qlnx_vf_device(ha) == 0) { 1238 ha->cdev.b_is_vf = true; 1239 1240 if (ha->pci_dbells != NULL) { 1241 ha->cdev.doorbells = ha->pci_dbells; 1242 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1243 ha->cdev.db_size = ha->dbells_size; 1244 } else { 1245 ha->pci_dbells = ha->pci_reg; 1246 } 1247 } else { 1248 ha->cdev.doorbells = ha->pci_dbells; 1249 ha->cdev.db_phys_addr = ha->dbells_phys_addr; 1250 ha->cdev.db_size = ha->dbells_size; 1251 1252 #ifdef QLNX_ENABLE_IWARP 1253 1254 if (qlnx_rdma_supported(ha) == 0) 1255 qlnx_set_personality(ha); 1256 1257 #endif /* #ifdef QLNX_ENABLE_IWARP */ 1258 } 1259 QL_DPRINT2(ha, "%s: %s\n", __func__, 1260 (ha->personality == ECORE_PCI_ETH_IWARP ? "iwarp": "ethernet")); 1261 1262 bzero(¶ms, sizeof (struct ecore_hw_prepare_params)); 1263 1264 params.personality = ha->personality; 1265 1266 params.drv_resc_alloc = false; 1267 params.chk_reg_fifo = false; 1268 params.initiate_pf_flr = true; 1269 params.epoch = 0; 1270 1271 ecore_hw_prepare(&ha->cdev, ¶ms); 1272 1273 qlnx_set_id(&ha->cdev, qlnx_name_str, qlnx_ver_str); 1274 1275 QL_DPRINT1(ha, "ha = %p cdev = %p p_hwfn = %p\n", 1276 ha, &ha->cdev, &ha->cdev.hwfns[0]); 1277 1278 return (rval); 1279 } 1280 1281 static void 1282 qlnx_release(qlnx_host_t *ha) 1283 { 1284 device_t dev; 1285 int i; 1286 1287 dev = ha->pci_dev; 1288 1289 QL_DPRINT2(ha, "enter\n"); 1290 1291 for (i = 0; i < QLNX_MAX_HW_FUNCS; i++) { 1292 if (ha->idle_chk[i] != NULL) { 1293 free(ha->idle_chk[i], M_QLNXBUF); 1294 ha->idle_chk[i] = NULL; 1295 } 1296 1297 if (ha->grcdump[i] != NULL) { 1298 free(ha->grcdump[i], M_QLNXBUF); 1299 ha->grcdump[i] = NULL; 1300 } 1301 } 1302 1303 if (ha->flags.callout_init) 1304 callout_drain(&ha->qlnx_callout); 1305 1306 if (ha->flags.slowpath_start) { 1307 qlnx_slowpath_stop(ha); 1308 } 1309 1310 if (ha->flags.hw_init) 1311 ecore_hw_remove(&ha->cdev); 1312 1313 qlnx_del_cdev(ha); 1314 1315 if (ha->ifp != NULL) 1316 ether_ifdetach(ha->ifp); 1317 1318 qlnx_free_tx_dma_tag(ha); 1319 1320 qlnx_free_rx_dma_tag(ha); 1321 1322 qlnx_free_parent_dma_tag(ha); 1323 1324 if (qlnx_vf_device(ha) != 0) { 1325 qlnx_destroy_error_recovery_taskqueue(ha); 1326 } 1327 1328 for (i = 0; i < ha->num_rss; i++) { 1329 struct qlnx_fastpath *fp = &ha->fp_array[i]; 1330 1331 if (ha->irq_vec[i].handle) { 1332 (void)bus_teardown_intr(dev, ha->irq_vec[i].irq, 1333 ha->irq_vec[i].handle); 1334 } 1335 1336 if (ha->irq_vec[i].irq) { 1337 (void)bus_release_resource(dev, SYS_RES_IRQ, 1338 ha->irq_vec[i].irq_rid, 1339 ha->irq_vec[i].irq); 1340 } 1341 1342 qlnx_free_tx_br(ha, fp); 1343 } 1344 qlnx_destroy_fp_taskqueues(ha); 1345 1346 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1347 if (ha->sp_handle[i]) 1348 (void)bus_teardown_intr(dev, ha->sp_irq[i], 1349 ha->sp_handle[i]); 1350 1351 if (ha->sp_irq[i]) 1352 (void) bus_release_resource(dev, SYS_RES_IRQ, 1353 ha->sp_irq_rid[i], ha->sp_irq[i]); 1354 } 1355 1356 qlnx_destroy_sp_taskqueues(ha); 1357 1358 if (ha->msix_count) 1359 pci_release_msi(dev); 1360 1361 if (ha->flags.lock_init) { 1362 mtx_destroy(&ha->hw_lock); 1363 } 1364 1365 if (ha->pci_reg) 1366 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->reg_rid, 1367 ha->pci_reg); 1368 1369 if (ha->dbells_size && ha->pci_dbells) 1370 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->dbells_rid, 1371 ha->pci_dbells); 1372 1373 if (ha->msix_bar) 1374 (void) bus_release_resource(dev, SYS_RES_MEMORY, ha->msix_rid, 1375 ha->msix_bar); 1376 1377 QL_DPRINT2(ha, "exit\n"); 1378 return; 1379 } 1380 1381 static void 1382 qlnx_trigger_dump(qlnx_host_t *ha) 1383 { 1384 int i; 1385 1386 if (ha->ifp != NULL) 1387 if_setdrvflagbits(ha->ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING)); 1388 1389 QL_DPRINT2(ha, "enter\n"); 1390 1391 if (qlnx_vf_device(ha) == 0) 1392 return; 1393 1394 ha->error_recovery = 1; 1395 1396 for (i = 0; i < ha->cdev.num_hwfns; i++) { 1397 qlnx_grc_dump(ha, &ha->grcdump_dwords[i], i); 1398 qlnx_idle_chk(ha, &ha->idle_chk_dwords[i], i); 1399 } 1400 1401 QL_DPRINT2(ha, "exit\n"); 1402 1403 return; 1404 } 1405 1406 static int 1407 qlnx_trigger_dump_sysctl(SYSCTL_HANDLER_ARGS) 1408 { 1409 int err, ret = 0; 1410 qlnx_host_t *ha; 1411 1412 err = sysctl_handle_int(oidp, &ret, 0, req); 1413 1414 if (err || !req->newptr) 1415 return (err); 1416 1417 if (ret == 1) { 1418 ha = (qlnx_host_t *)arg1; 1419 qlnx_trigger_dump(ha); 1420 } 1421 return (err); 1422 } 1423 1424 static int 1425 qlnx_set_tx_coalesce(SYSCTL_HANDLER_ARGS) 1426 { 1427 int err, i, ret = 0, usecs = 0; 1428 qlnx_host_t *ha; 1429 struct ecore_hwfn *p_hwfn; 1430 struct qlnx_fastpath *fp; 1431 1432 err = sysctl_handle_int(oidp, &usecs, 0, req); 1433 1434 if (err || !req->newptr || !usecs || (usecs > 255)) 1435 return (err); 1436 1437 ha = (qlnx_host_t *)arg1; 1438 1439 if (qlnx_vf_device(ha) == 0) 1440 return (-1); 1441 1442 for (i = 0; i < ha->num_rss; i++) { 1443 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1444 1445 fp = &ha->fp_array[i]; 1446 1447 if (fp->txq[0]->handle != NULL) { 1448 ret = ecore_set_queue_coalesce(p_hwfn, 0, 1449 (uint16_t)usecs, fp->txq[0]->handle); 1450 } 1451 } 1452 1453 if (!ret) 1454 ha->tx_coalesce_usecs = (uint8_t)usecs; 1455 1456 return (err); 1457 } 1458 1459 static int 1460 qlnx_set_rx_coalesce(SYSCTL_HANDLER_ARGS) 1461 { 1462 int err, i, ret = 0, usecs = 0; 1463 qlnx_host_t *ha; 1464 struct ecore_hwfn *p_hwfn; 1465 struct qlnx_fastpath *fp; 1466 1467 err = sysctl_handle_int(oidp, &usecs, 0, req); 1468 1469 if (err || !req->newptr || !usecs || (usecs > 255)) 1470 return (err); 1471 1472 ha = (qlnx_host_t *)arg1; 1473 1474 if (qlnx_vf_device(ha) == 0) 1475 return (-1); 1476 1477 for (i = 0; i < ha->num_rss; i++) { 1478 p_hwfn = &ha->cdev.hwfns[(i % ha->cdev.num_hwfns)]; 1479 1480 fp = &ha->fp_array[i]; 1481 1482 if (fp->rxq->handle != NULL) { 1483 ret = ecore_set_queue_coalesce(p_hwfn, (uint16_t)usecs, 1484 0, fp->rxq->handle); 1485 } 1486 } 1487 1488 if (!ret) 1489 ha->rx_coalesce_usecs = (uint8_t)usecs; 1490 1491 return (err); 1492 } 1493 1494 static void 1495 qlnx_add_sp_stats_sysctls(qlnx_host_t *ha) 1496 { 1497 struct sysctl_ctx_list *ctx; 1498 struct sysctl_oid_list *children; 1499 struct sysctl_oid *ctx_oid; 1500 1501 ctx = device_get_sysctl_ctx(ha->pci_dev); 1502 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1503 1504 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "spstat", 1505 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "spstat"); 1506 children = SYSCTL_CHILDREN(ctx_oid); 1507 1508 SYSCTL_ADD_QUAD(ctx, children, 1509 OID_AUTO, "sp_interrupts", 1510 CTLFLAG_RD, &ha->sp_interrupts, 1511 "No. of slowpath interrupts"); 1512 1513 return; 1514 } 1515 1516 static void 1517 qlnx_add_fp_stats_sysctls(qlnx_host_t *ha) 1518 { 1519 struct sysctl_ctx_list *ctx; 1520 struct sysctl_oid_list *children; 1521 struct sysctl_oid_list *node_children; 1522 struct sysctl_oid *ctx_oid; 1523 int i, j; 1524 uint8_t name_str[16]; 1525 1526 ctx = device_get_sysctl_ctx(ha->pci_dev); 1527 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1528 1529 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fpstat", 1530 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "fpstat"); 1531 children = SYSCTL_CHILDREN(ctx_oid); 1532 1533 for (i = 0; i < ha->num_rss; i++) { 1534 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1535 snprintf(name_str, sizeof(name_str), "%d", i); 1536 1537 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name_str, 1538 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, name_str); 1539 node_children = SYSCTL_CHILDREN(ctx_oid); 1540 1541 /* Tx Related */ 1542 1543 SYSCTL_ADD_QUAD(ctx, node_children, 1544 OID_AUTO, "tx_pkts_processed", 1545 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_processed, 1546 "No. of packets processed for transmission"); 1547 1548 SYSCTL_ADD_QUAD(ctx, node_children, 1549 OID_AUTO, "tx_pkts_freed", 1550 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_freed, 1551 "No. of freed packets"); 1552 1553 SYSCTL_ADD_QUAD(ctx, node_children, 1554 OID_AUTO, "tx_pkts_transmitted", 1555 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_transmitted, 1556 "No. of transmitted packets"); 1557 1558 SYSCTL_ADD_QUAD(ctx, node_children, 1559 OID_AUTO, "tx_pkts_completed", 1560 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_completed, 1561 "No. of transmit completions"); 1562 1563 SYSCTL_ADD_QUAD(ctx, node_children, 1564 OID_AUTO, "tx_non_tso_pkts", 1565 CTLFLAG_RD, &ha->fp_array[i].tx_non_tso_pkts, 1566 "No. of non LSO transmited packets"); 1567 1568 #ifdef QLNX_TRACE_PERF_DATA 1569 1570 SYSCTL_ADD_QUAD(ctx, node_children, 1571 OID_AUTO, "tx_pkts_trans_ctx", 1572 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_ctx, 1573 "No. of transmitted packets in transmit context"); 1574 1575 SYSCTL_ADD_QUAD(ctx, node_children, 1576 OID_AUTO, "tx_pkts_compl_ctx", 1577 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_ctx, 1578 "No. of transmit completions in transmit context"); 1579 1580 SYSCTL_ADD_QUAD(ctx, node_children, 1581 OID_AUTO, "tx_pkts_trans_fp", 1582 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_trans_fp, 1583 "No. of transmitted packets in taskqueue"); 1584 1585 SYSCTL_ADD_QUAD(ctx, node_children, 1586 OID_AUTO, "tx_pkts_compl_fp", 1587 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_fp, 1588 "No. of transmit completions in taskqueue"); 1589 1590 SYSCTL_ADD_QUAD(ctx, node_children, 1591 OID_AUTO, "tx_pkts_compl_intr", 1592 CTLFLAG_RD, &ha->fp_array[i].tx_pkts_compl_intr, 1593 "No. of transmit completions in interrupt ctx"); 1594 #endif 1595 1596 SYSCTL_ADD_QUAD(ctx, node_children, 1597 OID_AUTO, "tx_tso_pkts", 1598 CTLFLAG_RD, &ha->fp_array[i].tx_tso_pkts, 1599 "No. of LSO transmited packets"); 1600 1601 SYSCTL_ADD_QUAD(ctx, node_children, 1602 OID_AUTO, "tx_lso_wnd_min_len", 1603 CTLFLAG_RD, &ha->fp_array[i].tx_lso_wnd_min_len, 1604 "tx_lso_wnd_min_len"); 1605 1606 SYSCTL_ADD_QUAD(ctx, node_children, 1607 OID_AUTO, "tx_defrag", 1608 CTLFLAG_RD, &ha->fp_array[i].tx_defrag, 1609 "tx_defrag"); 1610 1611 SYSCTL_ADD_QUAD(ctx, node_children, 1612 OID_AUTO, "tx_nsegs_gt_elem_left", 1613 CTLFLAG_RD, &ha->fp_array[i].tx_nsegs_gt_elem_left, 1614 "tx_nsegs_gt_elem_left"); 1615 1616 SYSCTL_ADD_UINT(ctx, node_children, 1617 OID_AUTO, "tx_tso_max_nsegs", 1618 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_nsegs, 1619 ha->fp_array[i].tx_tso_max_nsegs, "tx_tso_max_nsegs"); 1620 1621 SYSCTL_ADD_UINT(ctx, node_children, 1622 OID_AUTO, "tx_tso_min_nsegs", 1623 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_nsegs, 1624 ha->fp_array[i].tx_tso_min_nsegs, "tx_tso_min_nsegs"); 1625 1626 SYSCTL_ADD_UINT(ctx, node_children, 1627 OID_AUTO, "tx_tso_max_pkt_len", 1628 CTLFLAG_RD, &ha->fp_array[i].tx_tso_max_pkt_len, 1629 ha->fp_array[i].tx_tso_max_pkt_len, 1630 "tx_tso_max_pkt_len"); 1631 1632 SYSCTL_ADD_UINT(ctx, node_children, 1633 OID_AUTO, "tx_tso_min_pkt_len", 1634 CTLFLAG_RD, &ha->fp_array[i].tx_tso_min_pkt_len, 1635 ha->fp_array[i].tx_tso_min_pkt_len, 1636 "tx_tso_min_pkt_len"); 1637 1638 for (j = 0; j < QLNX_FP_MAX_SEGS; j++) { 1639 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1640 snprintf(name_str, sizeof(name_str), 1641 "tx_pkts_nseg_%02d", (j+1)); 1642 1643 SYSCTL_ADD_QUAD(ctx, node_children, 1644 OID_AUTO, name_str, CTLFLAG_RD, 1645 &ha->fp_array[i].tx_pkts[j], name_str); 1646 } 1647 1648 #ifdef QLNX_TRACE_PERF_DATA 1649 for (j = 0; j < 18; j++) { 1650 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1651 snprintf(name_str, sizeof(name_str), 1652 "tx_pkts_hist_%02d", (j+1)); 1653 1654 SYSCTL_ADD_QUAD(ctx, node_children, 1655 OID_AUTO, name_str, CTLFLAG_RD, 1656 &ha->fp_array[i].tx_pkts_hist[j], name_str); 1657 } 1658 for (j = 0; j < 5; j++) { 1659 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1660 snprintf(name_str, sizeof(name_str), 1661 "tx_comInt_%02d", (j+1)); 1662 1663 SYSCTL_ADD_QUAD(ctx, node_children, 1664 OID_AUTO, name_str, CTLFLAG_RD, 1665 &ha->fp_array[i].tx_comInt[j], name_str); 1666 } 1667 for (j = 0; j < 18; j++) { 1668 bzero(name_str, (sizeof(uint8_t) * sizeof(name_str))); 1669 snprintf(name_str, sizeof(name_str), 1670 "tx_pkts_q_%02d", (j+1)); 1671 1672 SYSCTL_ADD_QUAD(ctx, node_children, 1673 OID_AUTO, name_str, CTLFLAG_RD, 1674 &ha->fp_array[i].tx_pkts_q[j], name_str); 1675 } 1676 #endif 1677 1678 SYSCTL_ADD_QUAD(ctx, node_children, 1679 OID_AUTO, "err_tx_nsegs_gt_elem_left", 1680 CTLFLAG_RD, &ha->fp_array[i].err_tx_nsegs_gt_elem_left, 1681 "err_tx_nsegs_gt_elem_left"); 1682 1683 SYSCTL_ADD_QUAD(ctx, node_children, 1684 OID_AUTO, "err_tx_dmamap_create", 1685 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_create, 1686 "err_tx_dmamap_create"); 1687 1688 SYSCTL_ADD_QUAD(ctx, node_children, 1689 OID_AUTO, "err_tx_defrag_dmamap_load", 1690 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag_dmamap_load, 1691 "err_tx_defrag_dmamap_load"); 1692 1693 SYSCTL_ADD_QUAD(ctx, node_children, 1694 OID_AUTO, "err_tx_non_tso_max_seg", 1695 CTLFLAG_RD, &ha->fp_array[i].err_tx_non_tso_max_seg, 1696 "err_tx_non_tso_max_seg"); 1697 1698 SYSCTL_ADD_QUAD(ctx, node_children, 1699 OID_AUTO, "err_tx_dmamap_load", 1700 CTLFLAG_RD, &ha->fp_array[i].err_tx_dmamap_load, 1701 "err_tx_dmamap_load"); 1702 1703 SYSCTL_ADD_QUAD(ctx, node_children, 1704 OID_AUTO, "err_tx_defrag", 1705 CTLFLAG_RD, &ha->fp_array[i].err_tx_defrag, 1706 "err_tx_defrag"); 1707 1708 SYSCTL_ADD_QUAD(ctx, node_children, 1709 OID_AUTO, "err_tx_free_pkt_null", 1710 CTLFLAG_RD, &ha->fp_array[i].err_tx_free_pkt_null, 1711 "err_tx_free_pkt_null"); 1712 1713 SYSCTL_ADD_QUAD(ctx, node_children, 1714 OID_AUTO, "err_tx_cons_idx_conflict", 1715 CTLFLAG_RD, &ha->fp_array[i].err_tx_cons_idx_conflict, 1716 "err_tx_cons_idx_conflict"); 1717 1718 SYSCTL_ADD_QUAD(ctx, node_children, 1719 OID_AUTO, "lro_cnt_64", 1720 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_64, 1721 "lro_cnt_64"); 1722 1723 SYSCTL_ADD_QUAD(ctx, node_children, 1724 OID_AUTO, "lro_cnt_128", 1725 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_128, 1726 "lro_cnt_128"); 1727 1728 SYSCTL_ADD_QUAD(ctx, node_children, 1729 OID_AUTO, "lro_cnt_256", 1730 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_256, 1731 "lro_cnt_256"); 1732 1733 SYSCTL_ADD_QUAD(ctx, node_children, 1734 OID_AUTO, "lro_cnt_512", 1735 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_512, 1736 "lro_cnt_512"); 1737 1738 SYSCTL_ADD_QUAD(ctx, node_children, 1739 OID_AUTO, "lro_cnt_1024", 1740 CTLFLAG_RD, &ha->fp_array[i].lro_cnt_1024, 1741 "lro_cnt_1024"); 1742 1743 /* Rx Related */ 1744 1745 SYSCTL_ADD_QUAD(ctx, node_children, 1746 OID_AUTO, "rx_pkts", 1747 CTLFLAG_RD, &ha->fp_array[i].rx_pkts, 1748 "No. of received packets"); 1749 1750 SYSCTL_ADD_QUAD(ctx, node_children, 1751 OID_AUTO, "tpa_start", 1752 CTLFLAG_RD, &ha->fp_array[i].tpa_start, 1753 "No. of tpa_start packets"); 1754 1755 SYSCTL_ADD_QUAD(ctx, node_children, 1756 OID_AUTO, "tpa_cont", 1757 CTLFLAG_RD, &ha->fp_array[i].tpa_cont, 1758 "No. of tpa_cont packets"); 1759 1760 SYSCTL_ADD_QUAD(ctx, node_children, 1761 OID_AUTO, "tpa_end", 1762 CTLFLAG_RD, &ha->fp_array[i].tpa_end, 1763 "No. of tpa_end packets"); 1764 1765 SYSCTL_ADD_QUAD(ctx, node_children, 1766 OID_AUTO, "err_m_getcl", 1767 CTLFLAG_RD, &ha->fp_array[i].err_m_getcl, 1768 "err_m_getcl"); 1769 1770 SYSCTL_ADD_QUAD(ctx, node_children, 1771 OID_AUTO, "err_m_getjcl", 1772 CTLFLAG_RD, &ha->fp_array[i].err_m_getjcl, 1773 "err_m_getjcl"); 1774 1775 SYSCTL_ADD_QUAD(ctx, node_children, 1776 OID_AUTO, "err_rx_hw_errors", 1777 CTLFLAG_RD, &ha->fp_array[i].err_rx_hw_errors, 1778 "err_rx_hw_errors"); 1779 1780 SYSCTL_ADD_QUAD(ctx, node_children, 1781 OID_AUTO, "err_rx_alloc_errors", 1782 CTLFLAG_RD, &ha->fp_array[i].err_rx_alloc_errors, 1783 "err_rx_alloc_errors"); 1784 } 1785 1786 return; 1787 } 1788 1789 static void 1790 qlnx_add_hw_stats_sysctls(qlnx_host_t *ha) 1791 { 1792 struct sysctl_ctx_list *ctx; 1793 struct sysctl_oid_list *children; 1794 struct sysctl_oid *ctx_oid; 1795 1796 ctx = device_get_sysctl_ctx(ha->pci_dev); 1797 children = SYSCTL_CHILDREN(device_get_sysctl_tree(ha->pci_dev)); 1798 1799 ctx_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "hwstat", 1800 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "hwstat"); 1801 children = SYSCTL_CHILDREN(ctx_oid); 1802 1803 SYSCTL_ADD_QUAD(ctx, children, 1804 OID_AUTO, "no_buff_discards", 1805 CTLFLAG_RD, &ha->hw_stats.common.no_buff_discards, 1806 "No. of packets discarded due to lack of buffer"); 1807 1808 SYSCTL_ADD_QUAD(ctx, children, 1809 OID_AUTO, "packet_too_big_discard", 1810 CTLFLAG_RD, &ha->hw_stats.common.packet_too_big_discard, 1811 "No. of packets discarded because packet was too big"); 1812 1813 SYSCTL_ADD_QUAD(ctx, children, 1814 OID_AUTO, "ttl0_discard", 1815 CTLFLAG_RD, &ha->hw_stats.common.ttl0_discard, 1816 "ttl0_discard"); 1817 1818 SYSCTL_ADD_QUAD(ctx, children, 1819 OID_AUTO, "rx_ucast_bytes", 1820 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_bytes, 1821 "rx_ucast_bytes"); 1822 1823 SYSCTL_ADD_QUAD(ctx, children, 1824 OID_AUTO, "rx_mcast_bytes", 1825 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_bytes, 1826 "rx_mcast_bytes"); 1827 1828 SYSCTL_ADD_QUAD(ctx, children, 1829 OID_AUTO, "rx_bcast_bytes", 1830 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_bytes, 1831 "rx_bcast_bytes"); 1832 1833 SYSCTL_ADD_QUAD(ctx, children, 1834 OID_AUTO, "rx_ucast_pkts", 1835 CTLFLAG_RD, &ha->hw_stats.common.rx_ucast_pkts, 1836 "rx_ucast_pkts"); 1837 1838 SYSCTL_ADD_QUAD(ctx, children, 1839 OID_AUTO, "rx_mcast_pkts", 1840 CTLFLAG_RD, &ha->hw_stats.common.rx_mcast_pkts, 1841 "rx_mcast_pkts"); 1842 1843 SYSCTL_ADD_QUAD(ctx, children, 1844 OID_AUTO, "rx_bcast_pkts", 1845 CTLFLAG_RD, &ha->hw_stats.common.rx_bcast_pkts, 1846 "rx_bcast_pkts"); 1847 1848 SYSCTL_ADD_QUAD(ctx, children, 1849 OID_AUTO, "mftag_filter_discards", 1850 CTLFLAG_RD, &ha->hw_stats.common.mftag_filter_discards, 1851 "mftag_filter_discards"); 1852 1853 SYSCTL_ADD_QUAD(ctx, children, 1854 OID_AUTO, "mac_filter_discards", 1855 CTLFLAG_RD, &ha->hw_stats.common.mac_filter_discards, 1856 "mac_filter_discards"); 1857 1858 SYSCTL_ADD_QUAD(ctx, children, 1859 OID_AUTO, "tx_ucast_bytes", 1860 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_bytes, 1861 "tx_ucast_bytes"); 1862 1863 SYSCTL_ADD_QUAD(ctx, children, 1864 OID_AUTO, "tx_mcast_bytes", 1865 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_bytes, 1866 "tx_mcast_bytes"); 1867 1868 SYSCTL_ADD_QUAD(ctx, children, 1869 OID_AUTO, "tx_bcast_bytes", 1870 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_bytes, 1871 "tx_bcast_bytes"); 1872 1873 SYSCTL_ADD_QUAD(ctx, children, 1874 OID_AUTO, "tx_ucast_pkts", 1875 CTLFLAG_RD, &ha->hw_stats.common.tx_ucast_pkts, 1876 "tx_ucast_pkts"); 1877 1878 SYSCTL_ADD_QUAD(ctx, children, 1879 OID_AUTO, "tx_mcast_pkts", 1880 CTLFLAG_RD, &ha->hw_stats.common.tx_mcast_pkts, 1881 "tx_mcast_pkts"); 1882 1883 SYSCTL_ADD_QUAD(ctx, children, 1884 OID_AUTO, "tx_bcast_pkts", 1885 CTLFLAG_RD, &ha->hw_stats.common.tx_bcast_pkts, 1886 "tx_bcast_pkts"); 1887 1888 SYSCTL_ADD_QUAD(ctx, children, 1889 OID_AUTO, "tx_err_drop_pkts", 1890 CTLFLAG_RD, &ha->hw_stats.common.tx_err_drop_pkts, 1891 "tx_err_drop_pkts"); 1892 1893 SYSCTL_ADD_QUAD(ctx, children, 1894 OID_AUTO, "tpa_coalesced_pkts", 1895 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_pkts, 1896 "tpa_coalesced_pkts"); 1897 1898 SYSCTL_ADD_QUAD(ctx, children, 1899 OID_AUTO, "tpa_coalesced_events", 1900 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_events, 1901 "tpa_coalesced_events"); 1902 1903 SYSCTL_ADD_QUAD(ctx, children, 1904 OID_AUTO, "tpa_aborts_num", 1905 CTLFLAG_RD, &ha->hw_stats.common.tpa_aborts_num, 1906 "tpa_aborts_num"); 1907 1908 SYSCTL_ADD_QUAD(ctx, children, 1909 OID_AUTO, "tpa_not_coalesced_pkts", 1910 CTLFLAG_RD, &ha->hw_stats.common.tpa_not_coalesced_pkts, 1911 "tpa_not_coalesced_pkts"); 1912 1913 SYSCTL_ADD_QUAD(ctx, children, 1914 OID_AUTO, "tpa_coalesced_bytes", 1915 CTLFLAG_RD, &ha->hw_stats.common.tpa_coalesced_bytes, 1916 "tpa_coalesced_bytes"); 1917 1918 SYSCTL_ADD_QUAD(ctx, children, 1919 OID_AUTO, "rx_64_byte_packets", 1920 CTLFLAG_RD, &ha->hw_stats.common.rx_64_byte_packets, 1921 "rx_64_byte_packets"); 1922 1923 SYSCTL_ADD_QUAD(ctx, children, 1924 OID_AUTO, "rx_65_to_127_byte_packets", 1925 CTLFLAG_RD, &ha->hw_stats.common.rx_65_to_127_byte_packets, 1926 "rx_65_to_127_byte_packets"); 1927 1928 SYSCTL_ADD_QUAD(ctx, children, 1929 OID_AUTO, "rx_128_to_255_byte_packets", 1930 CTLFLAG_RD, &ha->hw_stats.common.rx_128_to_255_byte_packets, 1931 "rx_128_to_255_byte_packets"); 1932 1933 SYSCTL_ADD_QUAD(ctx, children, 1934 OID_AUTO, "rx_256_to_511_byte_packets", 1935 CTLFLAG_RD, &ha->hw_stats.common.rx_256_to_511_byte_packets, 1936 "rx_256_to_511_byte_packets"); 1937 1938 SYSCTL_ADD_QUAD(ctx, children, 1939 OID_AUTO, "rx_512_to_1023_byte_packets", 1940 CTLFLAG_RD, &ha->hw_stats.common.rx_512_to_1023_byte_packets, 1941 "rx_512_to_1023_byte_packets"); 1942 1943 SYSCTL_ADD_QUAD(ctx, children, 1944 OID_AUTO, "rx_1024_to_1518_byte_packets", 1945 CTLFLAG_RD, &ha->hw_stats.common.rx_1024_to_1518_byte_packets, 1946 "rx_1024_to_1518_byte_packets"); 1947 1948 SYSCTL_ADD_QUAD(ctx, children, 1949 OID_AUTO, "rx_1519_to_1522_byte_packets", 1950 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_1522_byte_packets, 1951 "rx_1519_to_1522_byte_packets"); 1952 1953 SYSCTL_ADD_QUAD(ctx, children, 1954 OID_AUTO, "rx_1523_to_2047_byte_packets", 1955 CTLFLAG_RD, &ha->hw_stats.bb.rx_1519_to_2047_byte_packets, 1956 "rx_1523_to_2047_byte_packets"); 1957 1958 SYSCTL_ADD_QUAD(ctx, children, 1959 OID_AUTO, "rx_2048_to_4095_byte_packets", 1960 CTLFLAG_RD, &ha->hw_stats.bb.rx_2048_to_4095_byte_packets, 1961 "rx_2048_to_4095_byte_packets"); 1962 1963 SYSCTL_ADD_QUAD(ctx, children, 1964 OID_AUTO, "rx_4096_to_9216_byte_packets", 1965 CTLFLAG_RD, &ha->hw_stats.bb.rx_4096_to_9216_byte_packets, 1966 "rx_4096_to_9216_byte_packets"); 1967 1968 SYSCTL_ADD_QUAD(ctx, children, 1969 OID_AUTO, "rx_9217_to_16383_byte_packets", 1970 CTLFLAG_RD, &ha->hw_stats.bb.rx_9217_to_16383_byte_packets, 1971 "rx_9217_to_16383_byte_packets"); 1972 1973 SYSCTL_ADD_QUAD(ctx, children, 1974 OID_AUTO, "rx_crc_errors", 1975 CTLFLAG_RD, &ha->hw_stats.common.rx_crc_errors, 1976 "rx_crc_errors"); 1977 1978 SYSCTL_ADD_QUAD(ctx, children, 1979 OID_AUTO, "rx_mac_crtl_frames", 1980 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_crtl_frames, 1981 "rx_mac_crtl_frames"); 1982 1983 SYSCTL_ADD_QUAD(ctx, children, 1984 OID_AUTO, "rx_pause_frames", 1985 CTLFLAG_RD, &ha->hw_stats.common.rx_pause_frames, 1986 "rx_pause_frames"); 1987 1988 SYSCTL_ADD_QUAD(ctx, children, 1989 OID_AUTO, "rx_pfc_frames", 1990 CTLFLAG_RD, &ha->hw_stats.common.rx_pfc_frames, 1991 "rx_pfc_frames"); 1992 1993 SYSCTL_ADD_QUAD(ctx, children, 1994 OID_AUTO, "rx_align_errors", 1995 CTLFLAG_RD, &ha->hw_stats.common.rx_align_errors, 1996 "rx_align_errors"); 1997 1998 SYSCTL_ADD_QUAD(ctx, children, 1999 OID_AUTO, "rx_carrier_errors", 2000 CTLFLAG_RD, &ha->hw_stats.common.rx_carrier_errors, 2001 "rx_carrier_errors"); 2002 2003 SYSCTL_ADD_QUAD(ctx, children, 2004 OID_AUTO, "rx_oversize_packets", 2005 CTLFLAG_RD, &ha->hw_stats.common.rx_oversize_packets, 2006 "rx_oversize_packets"); 2007 2008 SYSCTL_ADD_QUAD(ctx, children, 2009 OID_AUTO, "rx_jabbers", 2010 CTLFLAG_RD, &ha->hw_stats.common.rx_jabbers, 2011 "rx_jabbers"); 2012 2013 SYSCTL_ADD_QUAD(ctx, children, 2014 OID_AUTO, "rx_undersize_packets", 2015 CTLFLAG_RD, &ha->hw_stats.common.rx_undersize_packets, 2016 "rx_undersize_packets"); 2017 2018 SYSCTL_ADD_QUAD(ctx, children, 2019 OID_AUTO, "rx_fragments", 2020 CTLFLAG_RD, &ha->hw_stats.common.rx_fragments, 2021 "rx_fragments"); 2022 2023 SYSCTL_ADD_QUAD(ctx, children, 2024 OID_AUTO, "tx_64_byte_packets", 2025 CTLFLAG_RD, &ha->hw_stats.common.tx_64_byte_packets, 2026 "tx_64_byte_packets"); 2027 2028 SYSCTL_ADD_QUAD(ctx, children, 2029 OID_AUTO, "tx_65_to_127_byte_packets", 2030 CTLFLAG_RD, &ha->hw_stats.common.tx_65_to_127_byte_packets, 2031 "tx_65_to_127_byte_packets"); 2032 2033 SYSCTL_ADD_QUAD(ctx, children, 2034 OID_AUTO, "tx_128_to_255_byte_packets", 2035 CTLFLAG_RD, &ha->hw_stats.common.tx_128_to_255_byte_packets, 2036 "tx_128_to_255_byte_packets"); 2037 2038 SYSCTL_ADD_QUAD(ctx, children, 2039 OID_AUTO, "tx_256_to_511_byte_packets", 2040 CTLFLAG_RD, &ha->hw_stats.common.tx_256_to_511_byte_packets, 2041 "tx_256_to_511_byte_packets"); 2042 2043 SYSCTL_ADD_QUAD(ctx, children, 2044 OID_AUTO, "tx_512_to_1023_byte_packets", 2045 CTLFLAG_RD, &ha->hw_stats.common.tx_512_to_1023_byte_packets, 2046 "tx_512_to_1023_byte_packets"); 2047 2048 SYSCTL_ADD_QUAD(ctx, children, 2049 OID_AUTO, "tx_1024_to_1518_byte_packets", 2050 CTLFLAG_RD, &ha->hw_stats.common.tx_1024_to_1518_byte_packets, 2051 "tx_1024_to_1518_byte_packets"); 2052 2053 SYSCTL_ADD_QUAD(ctx, children, 2054 OID_AUTO, "tx_1519_to_2047_byte_packets", 2055 CTLFLAG_RD, &ha->hw_stats.bb.tx_1519_to_2047_byte_packets, 2056 "tx_1519_to_2047_byte_packets"); 2057 2058 SYSCTL_ADD_QUAD(ctx, children, 2059 OID_AUTO, "tx_2048_to_4095_byte_packets", 2060 CTLFLAG_RD, &ha->hw_stats.bb.tx_2048_to_4095_byte_packets, 2061 "tx_2048_to_4095_byte_packets"); 2062 2063 SYSCTL_ADD_QUAD(ctx, children, 2064 OID_AUTO, "tx_4096_to_9216_byte_packets", 2065 CTLFLAG_RD, &ha->hw_stats.bb.tx_4096_to_9216_byte_packets, 2066 "tx_4096_to_9216_byte_packets"); 2067 2068 SYSCTL_ADD_QUAD(ctx, children, 2069 OID_AUTO, "tx_9217_to_16383_byte_packets", 2070 CTLFLAG_RD, &ha->hw_stats.bb.tx_9217_to_16383_byte_packets, 2071 "tx_9217_to_16383_byte_packets"); 2072 2073 SYSCTL_ADD_QUAD(ctx, children, 2074 OID_AUTO, "tx_pause_frames", 2075 CTLFLAG_RD, &ha->hw_stats.common.tx_pause_frames, 2076 "tx_pause_frames"); 2077 2078 SYSCTL_ADD_QUAD(ctx, children, 2079 OID_AUTO, "tx_pfc_frames", 2080 CTLFLAG_RD, &ha->hw_stats.common.tx_pfc_frames, 2081 "tx_pfc_frames"); 2082 2083 SYSCTL_ADD_QUAD(ctx, children, 2084 OID_AUTO, "tx_lpi_entry_count", 2085 CTLFLAG_RD, &ha->hw_stats.bb.tx_lpi_entry_count, 2086 "tx_lpi_entry_count"); 2087 2088 SYSCTL_ADD_QUAD(ctx, children, 2089 OID_AUTO, "tx_total_collisions", 2090 CTLFLAG_RD, &ha->hw_stats.bb.tx_total_collisions, 2091 "tx_total_collisions"); 2092 2093 SYSCTL_ADD_QUAD(ctx, children, 2094 OID_AUTO, "brb_truncates", 2095 CTLFLAG_RD, &ha->hw_stats.common.brb_truncates, 2096 "brb_truncates"); 2097 2098 SYSCTL_ADD_QUAD(ctx, children, 2099 OID_AUTO, "brb_discards", 2100 CTLFLAG_RD, &ha->hw_stats.common.brb_discards, 2101 "brb_discards"); 2102 2103 SYSCTL_ADD_QUAD(ctx, children, 2104 OID_AUTO, "rx_mac_bytes", 2105 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bytes, 2106 "rx_mac_bytes"); 2107 2108 SYSCTL_ADD_QUAD(ctx, children, 2109 OID_AUTO, "rx_mac_uc_packets", 2110 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_uc_packets, 2111 "rx_mac_uc_packets"); 2112 2113 SYSCTL_ADD_QUAD(ctx, children, 2114 OID_AUTO, "rx_mac_mc_packets", 2115 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_mc_packets, 2116 "rx_mac_mc_packets"); 2117 2118 SYSCTL_ADD_QUAD(ctx, children, 2119 OID_AUTO, "rx_mac_bc_packets", 2120 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_bc_packets, 2121 "rx_mac_bc_packets"); 2122 2123 SYSCTL_ADD_QUAD(ctx, children, 2124 OID_AUTO, "rx_mac_frames_ok", 2125 CTLFLAG_RD, &ha->hw_stats.common.rx_mac_frames_ok, 2126 "rx_mac_frames_ok"); 2127 2128 SYSCTL_ADD_QUAD(ctx, children, 2129 OID_AUTO, "tx_mac_bytes", 2130 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bytes, 2131 "tx_mac_bytes"); 2132 2133 SYSCTL_ADD_QUAD(ctx, children, 2134 OID_AUTO, "tx_mac_uc_packets", 2135 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_uc_packets, 2136 "tx_mac_uc_packets"); 2137 2138 SYSCTL_ADD_QUAD(ctx, children, 2139 OID_AUTO, "tx_mac_mc_packets", 2140 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_mc_packets, 2141 "tx_mac_mc_packets"); 2142 2143 SYSCTL_ADD_QUAD(ctx, children, 2144 OID_AUTO, "tx_mac_bc_packets", 2145 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_bc_packets, 2146 "tx_mac_bc_packets"); 2147 2148 SYSCTL_ADD_QUAD(ctx, children, 2149 OID_AUTO, "tx_mac_ctrl_frames", 2150 CTLFLAG_RD, &ha->hw_stats.common.tx_mac_ctrl_frames, 2151 "tx_mac_ctrl_frames"); 2152 return; 2153 } 2154 2155 static void 2156 qlnx_add_sysctls(qlnx_host_t *ha) 2157 { 2158 device_t dev = ha->pci_dev; 2159 struct sysctl_ctx_list *ctx; 2160 struct sysctl_oid_list *children; 2161 2162 ctx = device_get_sysctl_ctx(dev); 2163 children = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 2164 2165 qlnx_add_fp_stats_sysctls(ha); 2166 qlnx_add_sp_stats_sysctls(ha); 2167 2168 if (qlnx_vf_device(ha) != 0) 2169 qlnx_add_hw_stats_sysctls(ha); 2170 2171 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "Driver_Version", 2172 CTLFLAG_RD, qlnx_ver_str, 0, 2173 "Driver Version"); 2174 2175 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "STORMFW_Version", 2176 CTLFLAG_RD, ha->stormfw_ver, 0, 2177 "STORM Firmware Version"); 2178 2179 SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "MFW_Version", 2180 CTLFLAG_RD, ha->mfw_ver, 0, 2181 "Management Firmware Version"); 2182 2183 SYSCTL_ADD_UINT(ctx, children, 2184 OID_AUTO, "personality", CTLFLAG_RD, 2185 &ha->personality, ha->personality, 2186 "\tpersonality = 0 => Ethernet Only\n" 2187 "\tpersonality = 3 => Ethernet and RoCE\n" 2188 "\tpersonality = 4 => Ethernet and iWARP\n" 2189 "\tpersonality = 6 => Default in Shared Memory\n"); 2190 2191 ha->dbg_level = 0; 2192 SYSCTL_ADD_UINT(ctx, children, 2193 OID_AUTO, "debug", CTLFLAG_RW, 2194 &ha->dbg_level, ha->dbg_level, "Debug Level"); 2195 2196 ha->dp_level = 0x01; 2197 SYSCTL_ADD_UINT(ctx, children, 2198 OID_AUTO, "dp_level", CTLFLAG_RW, 2199 &ha->dp_level, ha->dp_level, "DP Level"); 2200 2201 ha->dbg_trace_lro_cnt = 0; 2202 SYSCTL_ADD_UINT(ctx, children, 2203 OID_AUTO, "dbg_trace_lro_cnt", CTLFLAG_RW, 2204 &ha->dbg_trace_lro_cnt, ha->dbg_trace_lro_cnt, 2205 "Trace LRO Counts"); 2206 2207 ha->dbg_trace_tso_pkt_len = 0; 2208 SYSCTL_ADD_UINT(ctx, children, 2209 OID_AUTO, "dbg_trace_tso_pkt_len", CTLFLAG_RW, 2210 &ha->dbg_trace_tso_pkt_len, ha->dbg_trace_tso_pkt_len, 2211 "Trace TSO packet lengths"); 2212 2213 ha->dp_module = 0; 2214 SYSCTL_ADD_UINT(ctx, children, 2215 OID_AUTO, "dp_module", CTLFLAG_RW, 2216 &ha->dp_module, ha->dp_module, "DP Module"); 2217 2218 ha->err_inject = 0; 2219 2220 SYSCTL_ADD_UINT(ctx, children, 2221 OID_AUTO, "err_inject", CTLFLAG_RW, 2222 &ha->err_inject, ha->err_inject, "Error Inject"); 2223 2224 ha->storm_stats_enable = 0; 2225 2226 SYSCTL_ADD_UINT(ctx, children, 2227 OID_AUTO, "storm_stats_enable", CTLFLAG_RW, 2228 &ha->storm_stats_enable, ha->storm_stats_enable, 2229 "Enable Storm Statistics Gathering"); 2230 2231 ha->storm_stats_index = 0; 2232 2233 SYSCTL_ADD_UINT(ctx, children, 2234 OID_AUTO, "storm_stats_index", CTLFLAG_RD, 2235 &ha->storm_stats_index, ha->storm_stats_index, 2236 "Enable Storm Statistics Gathering Current Index"); 2237 2238 ha->grcdump_taken = 0; 2239 SYSCTL_ADD_UINT(ctx, children, 2240 OID_AUTO, "grcdump_taken", CTLFLAG_RD, 2241 &ha->grcdump_taken, ha->grcdump_taken, 2242 "grcdump_taken"); 2243 2244 ha->idle_chk_taken = 0; 2245 SYSCTL_ADD_UINT(ctx, children, 2246 OID_AUTO, "idle_chk_taken", CTLFLAG_RD, 2247 &ha->idle_chk_taken, ha->idle_chk_taken, 2248 "idle_chk_taken"); 2249 2250 SYSCTL_ADD_UINT(ctx, children, 2251 OID_AUTO, "rx_coalesce_usecs", CTLFLAG_RD, 2252 &ha->rx_coalesce_usecs, ha->rx_coalesce_usecs, 2253 "rx_coalesce_usecs"); 2254 2255 SYSCTL_ADD_UINT(ctx, children, 2256 OID_AUTO, "tx_coalesce_usecs", CTLFLAG_RD, 2257 &ha->tx_coalesce_usecs, ha->tx_coalesce_usecs, 2258 "tx_coalesce_usecs"); 2259 2260 SYSCTL_ADD_PROC(ctx, children, 2261 OID_AUTO, "trigger_dump", 2262 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2263 (void *)ha, 0, qlnx_trigger_dump_sysctl, "I", "trigger_dump"); 2264 2265 SYSCTL_ADD_PROC(ctx, children, 2266 OID_AUTO, "set_rx_coalesce_usecs", 2267 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2268 (void *)ha, 0, qlnx_set_rx_coalesce, "I", 2269 "rx interrupt coalesce period microseconds"); 2270 2271 SYSCTL_ADD_PROC(ctx, children, 2272 OID_AUTO, "set_tx_coalesce_usecs", 2273 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 2274 (void *)ha, 0, qlnx_set_tx_coalesce, "I", 2275 "tx interrupt coalesce period microseconds"); 2276 2277 ha->rx_pkt_threshold = 128; 2278 SYSCTL_ADD_UINT(ctx, children, 2279 OID_AUTO, "rx_pkt_threshold", CTLFLAG_RW, 2280 &ha->rx_pkt_threshold, ha->rx_pkt_threshold, 2281 "No. of Rx Pkts to process at a time"); 2282 2283 ha->rx_jumbo_buf_eq_mtu = 0; 2284 SYSCTL_ADD_UINT(ctx, children, 2285 OID_AUTO, "rx_jumbo_buf_eq_mtu", CTLFLAG_RW, 2286 &ha->rx_jumbo_buf_eq_mtu, ha->rx_jumbo_buf_eq_mtu, 2287 "== 0 => Rx Jumbo buffers are capped to 4Kbytes\n" 2288 "otherwise Rx Jumbo buffers are set to >= MTU size\n"); 2289 2290 SYSCTL_ADD_QUAD(ctx, children, 2291 OID_AUTO, "err_illegal_intr", CTLFLAG_RD, 2292 &ha->err_illegal_intr, "err_illegal_intr"); 2293 2294 SYSCTL_ADD_QUAD(ctx, children, 2295 OID_AUTO, "err_fp_null", CTLFLAG_RD, 2296 &ha->err_fp_null, "err_fp_null"); 2297 2298 SYSCTL_ADD_QUAD(ctx, children, 2299 OID_AUTO, "err_get_proto_invalid_type", CTLFLAG_RD, 2300 &ha->err_get_proto_invalid_type, "err_get_proto_invalid_type"); 2301 return; 2302 } 2303 2304 /***************************************************************************** 2305 * Operating System Network Interface Functions 2306 *****************************************************************************/ 2307 2308 static void 2309 qlnx_init_ifnet(device_t dev, qlnx_host_t *ha) 2310 { 2311 uint16_t device_id; 2312 if_t ifp; 2313 2314 ifp = ha->ifp = if_alloc(IFT_ETHER); 2315 2316 if (ifp == NULL) 2317 panic("%s: cannot if_alloc()\n", device_get_nameunit(dev)); 2318 2319 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 2320 2321 device_id = pci_get_device(ha->pci_dev); 2322 2323 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) 2324 if_setbaudrate(ifp, IF_Gbps(40)); 2325 else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2326 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) 2327 if_setbaudrate(ifp, IF_Gbps(25)); 2328 else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) 2329 if_setbaudrate(ifp, IF_Gbps(50)); 2330 else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) 2331 if_setbaudrate(ifp, IF_Gbps(100)); 2332 2333 if_setcapabilities(ifp, IFCAP_LINKSTATE); 2334 2335 if_setinitfn(ifp, qlnx_init); 2336 if_setsoftc(ifp, ha); 2337 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 2338 if_setioctlfn(ifp, qlnx_ioctl); 2339 if_settransmitfn(ifp, qlnx_transmit); 2340 if_setqflushfn(ifp, qlnx_qflush); 2341 2342 if_setsendqlen(ifp, qlnx_get_ifq_snd_maxlen(ha)); 2343 if_setsendqready(ifp); 2344 2345 if_setgetcounterfn(ifp, qlnx_get_counter); 2346 2347 ha->max_frame_size = if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; 2348 2349 memcpy(ha->primary_mac, qlnx_get_mac_addr(ha), ETH_ALEN); 2350 2351 if (!ha->primary_mac[0] && !ha->primary_mac[1] && 2352 !ha->primary_mac[2] && !ha->primary_mac[3] && 2353 !ha->primary_mac[4] && !ha->primary_mac[5]) { 2354 uint32_t rnd; 2355 2356 rnd = arc4random(); 2357 2358 ha->primary_mac[0] = 0x00; 2359 ha->primary_mac[1] = 0x0e; 2360 ha->primary_mac[2] = 0x1e; 2361 ha->primary_mac[3] = rnd & 0xFF; 2362 ha->primary_mac[4] = (rnd >> 8) & 0xFF; 2363 ha->primary_mac[5] = (rnd >> 16) & 0xFF; 2364 } 2365 2366 ether_ifattach(ifp, ha->primary_mac); 2367 bcopy(if_getlladdr(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN); 2368 2369 if_setcapabilities(ifp, IFCAP_HWCSUM); 2370 if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0); 2371 2372 if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0); 2373 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0); 2374 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0); 2375 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0); 2376 if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTSO, 0); 2377 if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0); 2378 if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0); 2379 if_setcapabilitiesbit(ifp, IFCAP_LRO, 0); 2380 2381 if_sethwtsomax(ifp, QLNX_MAX_TSO_FRAME_SIZE - 2382 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN)); 2383 if_sethwtsomaxsegcount(ifp, QLNX_MAX_SEGMENTS - 1); /* hdr */ 2384 if_sethwtsomaxsegsize(ifp, QLNX_MAX_TX_MBUF_SIZE); 2385 2386 if_setcapenable(ifp, if_getcapabilities(ifp)); 2387 2388 if_sethwassist(ifp, CSUM_IP); 2389 if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP, 0); 2390 if_sethwassistbits(ifp, CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0); 2391 if_sethwassistbits(ifp, CSUM_TSO, 0); 2392 2393 if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); 2394 2395 ifmedia_init(&ha->media, IFM_IMASK, qlnx_media_change,\ 2396 qlnx_media_status); 2397 2398 if (device_id == QLOGIC_PCI_DEVICE_ID_1634) { 2399 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_LR4), 0, NULL); 2400 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_SR4), 0, NULL); 2401 ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL); 2402 } else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) || 2403 (device_id == QLOGIC_PCI_DEVICE_ID_8070)) { 2404 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL); 2405 ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL); 2406 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) { 2407 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL); 2408 ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL); 2409 } else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) { 2410 ifmedia_add(&ha->media, 2411 (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL); 2412 ifmedia_add(&ha->media, 2413 (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL); 2414 ifmedia_add(&ha->media, 2415 (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL); 2416 } 2417 2418 ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL); 2419 ifmedia_add(&ha->media, (IFM_ETHER | IFM_AUTO), 0, NULL); 2420 2421 ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO)); 2422 2423 QL_DPRINT2(ha, "exit\n"); 2424 2425 return; 2426 } 2427 2428 static void 2429 qlnx_init_locked(qlnx_host_t *ha) 2430 { 2431 if_t ifp = ha->ifp; 2432 2433 QL_DPRINT1(ha, "Driver Initialization start \n"); 2434 2435 qlnx_stop(ha); 2436 2437 if (qlnx_load(ha) == 0) { 2438 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 2439 if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); 2440 2441 #ifdef QLNX_ENABLE_IWARP 2442 if (qlnx_vf_device(ha) != 0) { 2443 qlnx_rdma_dev_open(ha); 2444 } 2445 #endif /* #ifdef QLNX_ENABLE_IWARP */ 2446 } 2447 2448 return; 2449 } 2450 2451 static void 2452 qlnx_init(void *arg) 2453 { 2454 qlnx_host_t *ha; 2455 2456 ha = (qlnx_host_t *)arg; 2457 2458 QL_DPRINT2(ha, "enter\n"); 2459 2460 QLNX_LOCK(ha); 2461 qlnx_init_locked(ha); 2462 QLNX_UNLOCK(ha); 2463 2464 QL_DPRINT2(ha, "exit\n"); 2465 2466 return; 2467 } 2468 2469 static int 2470 qlnx_config_mcast_mac_addr(qlnx_host_t *ha, uint8_t *mac_addr, uint32_t add_mac) 2471 { 2472 struct ecore_filter_mcast *mcast; 2473 struct ecore_dev *cdev; 2474 int rc; 2475 2476 cdev = &ha->cdev; 2477 2478 mcast = &ha->ecore_mcast; 2479 bzero(mcast, sizeof(struct ecore_filter_mcast)); 2480 2481 if (add_mac) 2482 mcast->opcode = ECORE_FILTER_ADD; 2483 else 2484 mcast->opcode = ECORE_FILTER_REMOVE; 2485 2486 mcast->num_mc_addrs = 1; 2487 memcpy(mcast->mac, mac_addr, ETH_ALEN); 2488 2489 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 2490 2491 return (rc); 2492 } 2493 2494 static int 2495 qlnx_hw_add_mcast(qlnx_host_t *ha, uint8_t *mta) 2496 { 2497 int i; 2498 2499 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2500 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) 2501 return 0; /* its been already added */ 2502 } 2503 2504 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2505 if ((ha->mcast[i].addr[0] == 0) && 2506 (ha->mcast[i].addr[1] == 0) && 2507 (ha->mcast[i].addr[2] == 0) && 2508 (ha->mcast[i].addr[3] == 0) && 2509 (ha->mcast[i].addr[4] == 0) && 2510 (ha->mcast[i].addr[5] == 0)) { 2511 if (qlnx_config_mcast_mac_addr(ha, mta, 1)) 2512 return (-1); 2513 2514 bcopy(mta, ha->mcast[i].addr, ETH_ALEN); 2515 ha->nmcast++; 2516 2517 return 0; 2518 } 2519 } 2520 return 0; 2521 } 2522 2523 static int 2524 qlnx_hw_del_mcast(qlnx_host_t *ha, uint8_t *mta) 2525 { 2526 int i; 2527 2528 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 2529 if (QL_MAC_CMP(ha->mcast[i].addr, mta) == 0) { 2530 if (qlnx_config_mcast_mac_addr(ha, mta, 0)) 2531 return (-1); 2532 2533 ha->mcast[i].addr[0] = 0; 2534 ha->mcast[i].addr[1] = 0; 2535 ha->mcast[i].addr[2] = 0; 2536 ha->mcast[i].addr[3] = 0; 2537 ha->mcast[i].addr[4] = 0; 2538 ha->mcast[i].addr[5] = 0; 2539 2540 ha->nmcast--; 2541 2542 return 0; 2543 } 2544 } 2545 return 0; 2546 } 2547 2548 /* 2549 * Name: qls_hw_set_multi 2550 * Function: Sets the Multicast Addresses provided the host O.S into the 2551 * hardware (for the given interface) 2552 */ 2553 static void 2554 qlnx_hw_set_multi(qlnx_host_t *ha, uint8_t *mta, uint32_t mcnt, 2555 uint32_t add_mac) 2556 { 2557 int i; 2558 2559 for (i = 0; i < mcnt; i++) { 2560 if (add_mac) { 2561 if (qlnx_hw_add_mcast(ha, mta)) 2562 break; 2563 } else { 2564 if (qlnx_hw_del_mcast(ha, mta)) 2565 break; 2566 } 2567 2568 mta += ETHER_HDR_LEN; 2569 } 2570 return; 2571 } 2572 2573 static u_int 2574 qlnx_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int mcnt) 2575 { 2576 uint8_t *mta = arg; 2577 2578 if (mcnt == QLNX_MAX_NUM_MULTICAST_ADDRS) 2579 return (0); 2580 2581 bcopy(LLADDR(sdl), &mta[mcnt * ETHER_HDR_LEN], ETHER_HDR_LEN); 2582 2583 return (1); 2584 } 2585 2586 static int 2587 qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi) 2588 { 2589 uint8_t mta[QLNX_MAX_NUM_MULTICAST_ADDRS * ETHER_HDR_LEN]; 2590 if_t ifp = ha->ifp; 2591 u_int mcnt; 2592 2593 if (qlnx_vf_device(ha) == 0) 2594 return (0); 2595 2596 mcnt = if_foreach_llmaddr(ifp, qlnx_copy_maddr, mta); 2597 2598 QLNX_LOCK(ha); 2599 qlnx_hw_set_multi(ha, mta, mcnt, add_multi); 2600 QLNX_UNLOCK(ha); 2601 2602 return (0); 2603 } 2604 2605 static int 2606 qlnx_set_promisc(qlnx_host_t *ha) 2607 { 2608 int rc = 0; 2609 uint8_t filter; 2610 2611 if (qlnx_vf_device(ha) == 0) 2612 return (0); 2613 2614 filter = ha->filter; 2615 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2616 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 2617 2618 rc = qlnx_set_rx_accept_filter(ha, filter); 2619 return (rc); 2620 } 2621 2622 static int 2623 qlnx_set_allmulti(qlnx_host_t *ha) 2624 { 2625 int rc = 0; 2626 uint8_t filter; 2627 2628 if (qlnx_vf_device(ha) == 0) 2629 return (0); 2630 2631 filter = ha->filter; 2632 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 2633 rc = qlnx_set_rx_accept_filter(ha, filter); 2634 2635 return (rc); 2636 } 2637 2638 static int 2639 qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data) 2640 { 2641 int ret = 0, mask; 2642 struct ifreq *ifr = (struct ifreq *)data; 2643 struct ifaddr *ifa = (struct ifaddr *)data; 2644 qlnx_host_t *ha; 2645 2646 ha = (qlnx_host_t *)if_getsoftc(ifp); 2647 2648 switch (cmd) { 2649 case SIOCSIFADDR: 2650 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx)\n", cmd); 2651 2652 if (ifa->ifa_addr->sa_family == AF_INET) { 2653 if_setflagbits(ifp, IFF_UP, 0); 2654 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) { 2655 QLNX_LOCK(ha); 2656 qlnx_init_locked(ha); 2657 QLNX_UNLOCK(ha); 2658 } 2659 QL_DPRINT4(ha, "SIOCSIFADDR (0x%lx) ipv4 [0x%08x]\n", 2660 cmd, ntohl(IA_SIN(ifa)->sin_addr.s_addr)); 2661 2662 arp_ifinit(ifp, ifa); 2663 } else { 2664 ether_ioctl(ifp, cmd, data); 2665 } 2666 break; 2667 2668 case SIOCSIFMTU: 2669 QL_DPRINT4(ha, "SIOCSIFMTU (0x%lx)\n", cmd); 2670 2671 if (ifr->ifr_mtu > QLNX_MAX_MTU) { 2672 ret = EINVAL; 2673 } else { 2674 QLNX_LOCK(ha); 2675 if_setmtu(ifp, ifr->ifr_mtu); 2676 ha->max_frame_size = 2677 if_getmtu(ifp) + ETHER_HDR_LEN + ETHER_CRC_LEN; 2678 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2679 qlnx_init_locked(ha); 2680 } 2681 2682 QLNX_UNLOCK(ha); 2683 } 2684 2685 break; 2686 2687 case SIOCSIFFLAGS: 2688 QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd); 2689 2690 QLNX_LOCK(ha); 2691 2692 if (if_getflags(ifp) & IFF_UP) { 2693 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2694 if ((if_getflags(ifp) ^ ha->if_flags) & 2695 IFF_PROMISC) { 2696 ret = qlnx_set_promisc(ha); 2697 } else if ((if_getflags(ifp) ^ ha->if_flags) & 2698 IFF_ALLMULTI) { 2699 ret = qlnx_set_allmulti(ha); 2700 } 2701 } else { 2702 ha->max_frame_size = if_getmtu(ifp) + 2703 ETHER_HDR_LEN + ETHER_CRC_LEN; 2704 qlnx_init_locked(ha); 2705 } 2706 } else { 2707 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 2708 qlnx_stop(ha); 2709 ha->if_flags = if_getflags(ifp); 2710 } 2711 2712 QLNX_UNLOCK(ha); 2713 break; 2714 2715 case SIOCADDMULTI: 2716 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCADDMULTI", cmd); 2717 2718 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2719 if (qlnx_set_multi(ha, 1)) 2720 ret = EINVAL; 2721 } 2722 break; 2723 2724 case SIOCDELMULTI: 2725 QL_DPRINT4(ha, "%s (0x%lx)\n", "SIOCDELMULTI", cmd); 2726 2727 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 2728 if (qlnx_set_multi(ha, 0)) 2729 ret = EINVAL; 2730 } 2731 break; 2732 2733 case SIOCSIFMEDIA: 2734 case SIOCGIFMEDIA: 2735 QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd); 2736 2737 ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd); 2738 break; 2739 2740 case SIOCSIFCAP: 2741 2742 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 2743 2744 QL_DPRINT4(ha, "SIOCSIFCAP (0x%lx)\n", cmd); 2745 2746 if (mask & IFCAP_HWCSUM) 2747 if_togglecapenable(ifp, IFCAP_HWCSUM); 2748 if (mask & IFCAP_TSO4) 2749 if_togglecapenable(ifp, IFCAP_TSO4); 2750 if (mask & IFCAP_TSO6) 2751 if_togglecapenable(ifp, IFCAP_TSO6); 2752 if (mask & IFCAP_VLAN_HWTAGGING) 2753 if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING); 2754 if (mask & IFCAP_VLAN_HWTSO) 2755 if_togglecapenable(ifp, IFCAP_VLAN_HWTSO); 2756 if (mask & IFCAP_LRO) 2757 if_togglecapenable(ifp, IFCAP_LRO); 2758 2759 QLNX_LOCK(ha); 2760 2761 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) 2762 qlnx_init_locked(ha); 2763 2764 QLNX_UNLOCK(ha); 2765 2766 VLAN_CAPABILITIES(ifp); 2767 break; 2768 2769 case SIOCGI2C: 2770 { 2771 struct ifi2creq i2c; 2772 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[0]; 2773 struct ecore_ptt *p_ptt; 2774 2775 ret = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 2776 2777 if (ret) 2778 break; 2779 2780 if ((i2c.len > sizeof (i2c.data)) || 2781 (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2)) { 2782 ret = EINVAL; 2783 break; 2784 } 2785 2786 p_ptt = ecore_ptt_acquire(p_hwfn); 2787 2788 if (!p_ptt) { 2789 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 2790 ret = -1; 2791 break; 2792 } 2793 2794 ret = ecore_mcp_phy_sfp_read(p_hwfn, p_ptt, 2795 (ha->pci_func & 0x1), i2c.dev_addr, i2c.offset, 2796 i2c.len, &i2c.data[0]); 2797 2798 ecore_ptt_release(p_hwfn, p_ptt); 2799 2800 if (ret) { 2801 ret = -1; 2802 break; 2803 } 2804 2805 ret = copyout(&i2c, ifr_data_get_ptr(ifr), sizeof(i2c)); 2806 2807 QL_DPRINT8(ha, "SIOCGI2C copyout ret = %d \ 2808 len = %d addr = 0x%02x offset = 0x%04x \ 2809 data[0..7]=0x%02x 0x%02x 0x%02x 0x%02x 0x%02x \ 2810 0x%02x 0x%02x 0x%02x\n", 2811 ret, i2c.len, i2c.dev_addr, i2c.offset, 2812 i2c.data[0], i2c.data[1], i2c.data[2], i2c.data[3], 2813 i2c.data[4], i2c.data[5], i2c.data[6], i2c.data[7]); 2814 break; 2815 } 2816 2817 default: 2818 QL_DPRINT4(ha, "default (0x%lx)\n", cmd); 2819 ret = ether_ioctl(ifp, cmd, data); 2820 break; 2821 } 2822 2823 return (ret); 2824 } 2825 2826 static int 2827 qlnx_media_change(if_t ifp) 2828 { 2829 qlnx_host_t *ha; 2830 struct ifmedia *ifm; 2831 int ret = 0; 2832 2833 ha = (qlnx_host_t *)if_getsoftc(ifp); 2834 2835 QL_DPRINT2(ha, "enter\n"); 2836 2837 ifm = &ha->media; 2838 2839 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2840 ret = EINVAL; 2841 2842 QL_DPRINT2(ha, "exit\n"); 2843 2844 return (ret); 2845 } 2846 2847 static void 2848 qlnx_media_status(if_t ifp, struct ifmediareq *ifmr) 2849 { 2850 qlnx_host_t *ha; 2851 2852 ha = (qlnx_host_t *)if_getsoftc(ifp); 2853 2854 QL_DPRINT2(ha, "enter\n"); 2855 2856 ifmr->ifm_status = IFM_AVALID; 2857 ifmr->ifm_active = IFM_ETHER; 2858 2859 if (ha->link_up) { 2860 ifmr->ifm_status |= IFM_ACTIVE; 2861 ifmr->ifm_active |= 2862 (IFM_FDX | qlnx_get_optics(ha, &ha->if_link)); 2863 2864 if (ha->if_link.link_partner_caps & 2865 (QLNX_LINK_CAP_Pause | QLNX_LINK_CAP_Asym_Pause)) 2866 ifmr->ifm_active |= 2867 (IFM_ETH_RXPAUSE | IFM_ETH_TXPAUSE); 2868 } 2869 2870 QL_DPRINT2(ha, "exit (%s)\n", (ha->link_up ? "link_up" : "link_down")); 2871 2872 return; 2873 } 2874 2875 static void 2876 qlnx_free_tx_pkt(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2877 struct qlnx_tx_queue *txq) 2878 { 2879 u16 idx; 2880 struct mbuf *mp; 2881 bus_dmamap_t map; 2882 int i; 2883 // struct eth_tx_bd *tx_data_bd; 2884 struct eth_tx_1st_bd *first_bd; 2885 int nbds = 0; 2886 2887 idx = txq->sw_tx_cons; 2888 mp = txq->sw_tx_ring[idx].mp; 2889 map = txq->sw_tx_ring[idx].map; 2890 2891 if ((mp == NULL) || QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL)){ 2892 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_MBUF_NULL); 2893 2894 QL_DPRINT1(ha, "(mp == NULL) " 2895 " tx_idx = 0x%x" 2896 " ecore_prod_idx = 0x%x" 2897 " ecore_cons_idx = 0x%x" 2898 " hw_bd_cons = 0x%x" 2899 " txq_db_last = 0x%x" 2900 " elem_left = 0x%x\n", 2901 fp->rss_id, 2902 ecore_chain_get_prod_idx(&txq->tx_pbl), 2903 ecore_chain_get_cons_idx(&txq->tx_pbl), 2904 le16toh(*txq->hw_cons_ptr), 2905 txq->tx_db.raw, 2906 ecore_chain_get_elem_left(&txq->tx_pbl)); 2907 2908 fp->err_tx_free_pkt_null++; 2909 2910 //DEBUG 2911 qlnx_trigger_dump(ha); 2912 2913 return; 2914 } else { 2915 QLNX_INC_OPACKETS((ha->ifp)); 2916 QLNX_INC_OBYTES((ha->ifp), (mp->m_pkthdr.len)); 2917 2918 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_POSTWRITE); 2919 bus_dmamap_unload(ha->tx_tag, map); 2920 2921 fp->tx_pkts_freed++; 2922 fp->tx_pkts_completed++; 2923 2924 m_freem(mp); 2925 } 2926 2927 first_bd = (struct eth_tx_1st_bd *)ecore_chain_consume(&txq->tx_pbl); 2928 nbds = first_bd->data.nbds; 2929 2930 // BD_SET_UNMAP_ADDR_LEN(first_bd, 0, 0); 2931 2932 for (i = 1; i < nbds; i++) { 2933 /* tx_data_bd = */ ecore_chain_consume(&txq->tx_pbl); 2934 // BD_SET_UNMAP_ADDR_LEN(tx_data_bd, 0, 0); 2935 } 2936 txq->sw_tx_ring[idx].flags = 0; 2937 txq->sw_tx_ring[idx].mp = NULL; 2938 txq->sw_tx_ring[idx].map = (bus_dmamap_t)0; 2939 2940 return; 2941 } 2942 2943 static void 2944 qlnx_tx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, 2945 struct qlnx_tx_queue *txq) 2946 { 2947 u16 hw_bd_cons; 2948 u16 ecore_cons_idx; 2949 uint16_t diff; 2950 uint16_t idx, idx2; 2951 2952 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 2953 2954 while (hw_bd_cons != 2955 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 2956 if (hw_bd_cons < ecore_cons_idx) { 2957 diff = (1 << 16) - (ecore_cons_idx - hw_bd_cons); 2958 } else { 2959 diff = hw_bd_cons - ecore_cons_idx; 2960 } 2961 if ((diff > TX_RING_SIZE) || 2962 QL_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF)){ 2963 QL_RESET_ERR_INJECT(ha, QL_ERR_INJCT_TX_INT_DIFF); 2964 2965 QL_DPRINT1(ha, "(diff = 0x%x) " 2966 " tx_idx = 0x%x" 2967 " ecore_prod_idx = 0x%x" 2968 " ecore_cons_idx = 0x%x" 2969 " hw_bd_cons = 0x%x" 2970 " txq_db_last = 0x%x" 2971 " elem_left = 0x%x\n", 2972 diff, 2973 fp->rss_id, 2974 ecore_chain_get_prod_idx(&txq->tx_pbl), 2975 ecore_chain_get_cons_idx(&txq->tx_pbl), 2976 le16toh(*txq->hw_cons_ptr), 2977 txq->tx_db.raw, 2978 ecore_chain_get_elem_left(&txq->tx_pbl)); 2979 2980 fp->err_tx_cons_idx_conflict++; 2981 2982 //DEBUG 2983 qlnx_trigger_dump(ha); 2984 } 2985 2986 idx = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 2987 idx2 = (txq->sw_tx_cons + 2) & (TX_RING_SIZE - 1); 2988 prefetch(txq->sw_tx_ring[idx].mp); 2989 prefetch(txq->sw_tx_ring[idx2].mp); 2990 2991 qlnx_free_tx_pkt(ha, fp, txq); 2992 2993 txq->sw_tx_cons = (txq->sw_tx_cons + 1) & (TX_RING_SIZE - 1); 2994 } 2995 return; 2996 } 2997 2998 static int 2999 qlnx_transmit_locked(if_t ifp, struct qlnx_fastpath *fp, struct mbuf *mp) 3000 { 3001 int ret = 0; 3002 struct qlnx_tx_queue *txq; 3003 qlnx_host_t * ha; 3004 uint16_t elem_left; 3005 3006 txq = fp->txq[0]; 3007 ha = (qlnx_host_t *)fp->edev; 3008 3009 if ((!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) || (!ha->link_up)) { 3010 if(mp != NULL) 3011 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3012 return (ret); 3013 } 3014 3015 if(mp != NULL) 3016 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3017 3018 mp = drbr_peek(ifp, fp->tx_br); 3019 3020 while (mp != NULL) { 3021 if (qlnx_send(ha, fp, &mp)) { 3022 if (mp != NULL) { 3023 drbr_putback(ifp, fp->tx_br, mp); 3024 } else { 3025 fp->tx_pkts_processed++; 3026 drbr_advance(ifp, fp->tx_br); 3027 } 3028 goto qlnx_transmit_locked_exit; 3029 3030 } else { 3031 drbr_advance(ifp, fp->tx_br); 3032 fp->tx_pkts_transmitted++; 3033 fp->tx_pkts_processed++; 3034 } 3035 3036 mp = drbr_peek(ifp, fp->tx_br); 3037 } 3038 3039 qlnx_transmit_locked_exit: 3040 if((qlnx_num_tx_compl(ha,fp, fp->txq[0]) > QLNX_TX_COMPL_THRESH) || 3041 ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) 3042 < QLNX_TX_ELEM_MAX_THRESH)) 3043 (void)qlnx_tx_int(ha, fp, fp->txq[0]); 3044 3045 QL_DPRINT2(ha, "%s: exit ret = %d\n", __func__, ret); 3046 return ret; 3047 } 3048 3049 static int 3050 qlnx_transmit(if_t ifp, struct mbuf *mp) 3051 { 3052 qlnx_host_t *ha = (qlnx_host_t *)if_getsoftc(ifp); 3053 struct qlnx_fastpath *fp; 3054 int rss_id = 0, ret = 0; 3055 3056 #ifdef QLNX_TRACEPERF_DATA 3057 uint64_t tx_pkts = 0, tx_compl = 0; 3058 #endif 3059 3060 QL_DPRINT2(ha, "enter\n"); 3061 3062 if (M_HASHTYPE_GET(mp) != M_HASHTYPE_NONE) 3063 rss_id = (mp->m_pkthdr.flowid % ECORE_RSS_IND_TABLE_SIZE) % 3064 ha->num_rss; 3065 3066 fp = &ha->fp_array[rss_id]; 3067 3068 if (fp->tx_br == NULL) { 3069 ret = EINVAL; 3070 goto qlnx_transmit_exit; 3071 } 3072 3073 if (mtx_trylock(&fp->tx_mtx)) { 3074 #ifdef QLNX_TRACEPERF_DATA 3075 tx_pkts = fp->tx_pkts_transmitted; 3076 tx_compl = fp->tx_pkts_completed; 3077 #endif 3078 3079 ret = qlnx_transmit_locked(ifp, fp, mp); 3080 3081 #ifdef QLNX_TRACEPERF_DATA 3082 fp->tx_pkts_trans_ctx += (fp->tx_pkts_transmitted - tx_pkts); 3083 fp->tx_pkts_compl_ctx += (fp->tx_pkts_completed - tx_compl); 3084 #endif 3085 mtx_unlock(&fp->tx_mtx); 3086 } else { 3087 if (mp != NULL && (fp->fp_taskqueue != NULL)) { 3088 ret = drbr_enqueue(ifp, fp->tx_br, mp); 3089 taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); 3090 } 3091 } 3092 3093 qlnx_transmit_exit: 3094 3095 QL_DPRINT2(ha, "exit ret = %d\n", ret); 3096 return ret; 3097 } 3098 3099 static void 3100 qlnx_qflush(if_t ifp) 3101 { 3102 int rss_id; 3103 struct qlnx_fastpath *fp; 3104 struct mbuf *mp; 3105 qlnx_host_t *ha; 3106 3107 ha = (qlnx_host_t *)if_getsoftc(ifp); 3108 3109 QL_DPRINT2(ha, "enter\n"); 3110 3111 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 3112 fp = &ha->fp_array[rss_id]; 3113 3114 if (fp == NULL) 3115 continue; 3116 3117 if (fp->tx_br) { 3118 mtx_lock(&fp->tx_mtx); 3119 3120 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 3121 fp->tx_pkts_freed++; 3122 m_freem(mp); 3123 } 3124 mtx_unlock(&fp->tx_mtx); 3125 } 3126 } 3127 QL_DPRINT2(ha, "exit\n"); 3128 3129 return; 3130 } 3131 3132 static void 3133 qlnx_txq_doorbell_wr32(qlnx_host_t *ha, void *reg_addr, uint32_t value) 3134 { 3135 uint32_t offset; 3136 3137 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)ha->pci_dbells); 3138 3139 bus_write_4(ha->pci_dbells, offset, value); 3140 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_READ); 3141 bus_barrier(ha->pci_dbells, 0, 0, BUS_SPACE_BARRIER_READ); 3142 3143 return; 3144 } 3145 3146 static uint32_t 3147 qlnx_tcp_offset(qlnx_host_t *ha, struct mbuf *mp) 3148 { 3149 struct ether_vlan_header *eh = NULL; 3150 struct ip *ip = NULL; 3151 struct ip6_hdr *ip6 = NULL; 3152 struct tcphdr *th = NULL; 3153 uint32_t ehdrlen = 0, ip_hlen = 0, offset = 0; 3154 uint16_t etype = 0; 3155 uint8_t buf[sizeof(struct ip6_hdr)]; 3156 3157 eh = mtod(mp, struct ether_vlan_header *); 3158 3159 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 3160 ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 3161 etype = ntohs(eh->evl_proto); 3162 } else { 3163 ehdrlen = ETHER_HDR_LEN; 3164 etype = ntohs(eh->evl_encap_proto); 3165 } 3166 3167 switch (etype) { 3168 case ETHERTYPE_IP: 3169 ip = (struct ip *)(mp->m_data + ehdrlen); 3170 3171 ip_hlen = sizeof (struct ip); 3172 3173 if (mp->m_len < (ehdrlen + ip_hlen)) { 3174 m_copydata(mp, ehdrlen, sizeof(struct ip), buf); 3175 ip = (struct ip *)buf; 3176 } 3177 3178 th = (struct tcphdr *)(ip + 1); 3179 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3180 break; 3181 3182 case ETHERTYPE_IPV6: 3183 ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); 3184 3185 ip_hlen = sizeof(struct ip6_hdr); 3186 3187 if (mp->m_len < (ehdrlen + ip_hlen)) { 3188 m_copydata(mp, ehdrlen, sizeof (struct ip6_hdr), 3189 buf); 3190 ip6 = (struct ip6_hdr *)buf; 3191 } 3192 th = (struct tcphdr *)(ip6 + 1); 3193 offset = ip_hlen + ehdrlen + (th->th_off << 2); 3194 break; 3195 3196 default: 3197 break; 3198 } 3199 3200 return (offset); 3201 } 3202 3203 static __inline int 3204 qlnx_tso_check(struct qlnx_fastpath *fp, bus_dma_segment_t *segs, int nsegs, 3205 uint32_t offset) 3206 { 3207 int i; 3208 uint32_t sum, nbds_in_hdr = 1; 3209 uint32_t window; 3210 bus_dma_segment_t *s_seg; 3211 3212 /* If the header spans multiple segments, skip those segments */ 3213 3214 if (nsegs < ETH_TX_LSO_WINDOW_BDS_NUM) 3215 return (0); 3216 3217 i = 0; 3218 3219 while ((i < nsegs) && (offset >= segs->ds_len)) { 3220 offset = offset - segs->ds_len; 3221 segs++; 3222 i++; 3223 nbds_in_hdr++; 3224 } 3225 3226 window = ETH_TX_LSO_WINDOW_BDS_NUM - nbds_in_hdr; 3227 3228 nsegs = nsegs - i; 3229 3230 while (nsegs >= window) { 3231 sum = 0; 3232 s_seg = segs; 3233 3234 for (i = 0; i < window; i++){ 3235 sum += s_seg->ds_len; 3236 s_seg++; 3237 } 3238 3239 if (sum < ETH_TX_LSO_WINDOW_MIN_LEN) { 3240 fp->tx_lso_wnd_min_len++; 3241 return (-1); 3242 } 3243 3244 nsegs = nsegs - 1; 3245 segs++; 3246 } 3247 3248 return (0); 3249 } 3250 3251 static int 3252 qlnx_send(qlnx_host_t *ha, struct qlnx_fastpath *fp, struct mbuf **m_headp) 3253 { 3254 bus_dma_segment_t *segs; 3255 bus_dmamap_t map = 0; 3256 uint32_t nsegs = 0; 3257 int ret = -1; 3258 struct mbuf *m_head = *m_headp; 3259 uint16_t idx = 0; 3260 uint16_t elem_left; 3261 3262 uint8_t nbd = 0; 3263 struct qlnx_tx_queue *txq; 3264 3265 struct eth_tx_1st_bd *first_bd; 3266 struct eth_tx_2nd_bd *second_bd; 3267 struct eth_tx_3rd_bd *third_bd; 3268 struct eth_tx_bd *tx_data_bd; 3269 3270 int seg_idx = 0; 3271 uint32_t nbds_in_hdr = 0; 3272 uint32_t offset = 0; 3273 3274 #ifdef QLNX_TRACE_PERF_DATA 3275 uint16_t bd_used; 3276 #endif 3277 3278 QL_DPRINT8(ha, "enter[%d]\n", fp->rss_id); 3279 3280 if (!ha->link_up) 3281 return (-1); 3282 3283 first_bd = NULL; 3284 second_bd = NULL; 3285 third_bd = NULL; 3286 tx_data_bd = NULL; 3287 3288 txq = fp->txq[0]; 3289 3290 if ((int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl)) < 3291 QLNX_TX_ELEM_MIN_THRESH) { 3292 fp->tx_nsegs_gt_elem_left++; 3293 fp->err_tx_nsegs_gt_elem_left++; 3294 3295 return (ENOBUFS); 3296 } 3297 3298 idx = txq->sw_tx_prod; 3299 3300 map = txq->sw_tx_ring[idx].map; 3301 segs = txq->segs; 3302 3303 ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, segs, &nsegs, 3304 BUS_DMA_NOWAIT); 3305 3306 if (ha->dbg_trace_tso_pkt_len) { 3307 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3308 if (!fp->tx_tso_min_pkt_len) { 3309 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3310 fp->tx_tso_min_pkt_len = m_head->m_pkthdr.len; 3311 } else { 3312 if (fp->tx_tso_min_pkt_len > m_head->m_pkthdr.len) 3313 fp->tx_tso_min_pkt_len = 3314 m_head->m_pkthdr.len; 3315 if (fp->tx_tso_max_pkt_len < m_head->m_pkthdr.len) 3316 fp->tx_tso_max_pkt_len = 3317 m_head->m_pkthdr.len; 3318 } 3319 } 3320 } 3321 3322 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3323 offset = qlnx_tcp_offset(ha, m_head); 3324 3325 if ((ret == EFBIG) || 3326 ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && ( 3327 (!(m_head->m_pkthdr.csum_flags & CSUM_TSO)) || 3328 ((m_head->m_pkthdr.csum_flags & CSUM_TSO) && 3329 qlnx_tso_check(fp, segs, nsegs, offset))))) { 3330 struct mbuf *m; 3331 3332 QL_DPRINT8(ha, "EFBIG [%d]\n", m_head->m_pkthdr.len); 3333 3334 fp->tx_defrag++; 3335 3336 m = m_defrag(m_head, M_NOWAIT); 3337 if (m == NULL) { 3338 fp->err_tx_defrag++; 3339 fp->tx_pkts_freed++; 3340 m_freem(m_head); 3341 *m_headp = NULL; 3342 QL_DPRINT1(ha, "m_defrag() = NULL [%d]\n", ret); 3343 return (ENOBUFS); 3344 } 3345 3346 m_head = m; 3347 *m_headp = m_head; 3348 3349 if ((ret = bus_dmamap_load_mbuf_sg(ha->tx_tag, map, m_head, 3350 segs, &nsegs, BUS_DMA_NOWAIT))) { 3351 fp->err_tx_defrag_dmamap_load++; 3352 3353 QL_DPRINT1(ha, 3354 "bus_dmamap_load_mbuf_sg failed0 [%d, %d]\n", 3355 ret, m_head->m_pkthdr.len); 3356 3357 fp->tx_pkts_freed++; 3358 m_freem(m_head); 3359 *m_headp = NULL; 3360 3361 return (ret); 3362 } 3363 3364 if ((nsegs > QLNX_MAX_SEGMENTS_NON_TSO) && 3365 !(m_head->m_pkthdr.csum_flags & CSUM_TSO)) { 3366 fp->err_tx_non_tso_max_seg++; 3367 3368 QL_DPRINT1(ha, 3369 "(%d) nsegs too many for non-TSO [%d, %d]\n", 3370 ret, nsegs, m_head->m_pkthdr.len); 3371 3372 fp->tx_pkts_freed++; 3373 m_freem(m_head); 3374 *m_headp = NULL; 3375 3376 return (ret); 3377 } 3378 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) 3379 offset = qlnx_tcp_offset(ha, m_head); 3380 3381 } else if (ret) { 3382 fp->err_tx_dmamap_load++; 3383 3384 QL_DPRINT1(ha, "bus_dmamap_load_mbuf_sg failed1 [%d, %d]\n", 3385 ret, m_head->m_pkthdr.len); 3386 fp->tx_pkts_freed++; 3387 m_freem(m_head); 3388 *m_headp = NULL; 3389 return (ret); 3390 } 3391 3392 QL_ASSERT(ha, (nsegs != 0), ("qlnx_send: empty packet")); 3393 3394 if (ha->dbg_trace_tso_pkt_len) { 3395 if (nsegs < QLNX_FP_MAX_SEGS) 3396 fp->tx_pkts[(nsegs - 1)]++; 3397 else 3398 fp->tx_pkts[(QLNX_FP_MAX_SEGS - 1)]++; 3399 } 3400 3401 #ifdef QLNX_TRACE_PERF_DATA 3402 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3403 if(m_head->m_pkthdr.len <= 2048) 3404 fp->tx_pkts_hist[0]++; 3405 else if((m_head->m_pkthdr.len > 2048) && 3406 (m_head->m_pkthdr.len <= 4096)) 3407 fp->tx_pkts_hist[1]++; 3408 else if((m_head->m_pkthdr.len > 4096) && 3409 (m_head->m_pkthdr.len <= 8192)) 3410 fp->tx_pkts_hist[2]++; 3411 else if((m_head->m_pkthdr.len > 8192) && 3412 (m_head->m_pkthdr.len <= 12288 )) 3413 fp->tx_pkts_hist[3]++; 3414 else if((m_head->m_pkthdr.len > 11288) && 3415 (m_head->m_pkthdr.len <= 16394)) 3416 fp->tx_pkts_hist[4]++; 3417 else if((m_head->m_pkthdr.len > 16384) && 3418 (m_head->m_pkthdr.len <= 20480)) 3419 fp->tx_pkts_hist[5]++; 3420 else if((m_head->m_pkthdr.len > 20480) && 3421 (m_head->m_pkthdr.len <= 24576)) 3422 fp->tx_pkts_hist[6]++; 3423 else if((m_head->m_pkthdr.len > 24576) && 3424 (m_head->m_pkthdr.len <= 28672)) 3425 fp->tx_pkts_hist[7]++; 3426 else if((m_head->m_pkthdr.len > 28762) && 3427 (m_head->m_pkthdr.len <= 32768)) 3428 fp->tx_pkts_hist[8]++; 3429 else if((m_head->m_pkthdr.len > 32768) && 3430 (m_head->m_pkthdr.len <= 36864)) 3431 fp->tx_pkts_hist[9]++; 3432 else if((m_head->m_pkthdr.len > 36864) && 3433 (m_head->m_pkthdr.len <= 40960)) 3434 fp->tx_pkts_hist[10]++; 3435 else if((m_head->m_pkthdr.len > 40960) && 3436 (m_head->m_pkthdr.len <= 45056)) 3437 fp->tx_pkts_hist[11]++; 3438 else if((m_head->m_pkthdr.len > 45056) && 3439 (m_head->m_pkthdr.len <= 49152)) 3440 fp->tx_pkts_hist[12]++; 3441 else if((m_head->m_pkthdr.len > 49512) && 3442 m_head->m_pkthdr.len <= 53248)) 3443 fp->tx_pkts_hist[13]++; 3444 else if((m_head->m_pkthdr.len > 53248) && 3445 (m_head->m_pkthdr.len <= 57344)) 3446 fp->tx_pkts_hist[14]++; 3447 else if((m_head->m_pkthdr.len > 53248) && 3448 (m_head->m_pkthdr.len <= 57344)) 3449 fp->tx_pkts_hist[15]++; 3450 else if((m_head->m_pkthdr.len > 57344) && 3451 (m_head->m_pkthdr.len <= 61440)) 3452 fp->tx_pkts_hist[16]++; 3453 else 3454 fp->tx_pkts_hist[17]++; 3455 } 3456 3457 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3458 elem_left = ecore_chain_get_elem_left(&txq->tx_pbl); 3459 bd_used = TX_RING_SIZE - elem_left; 3460 3461 if(bd_used <= 100) 3462 fp->tx_pkts_q[0]++; 3463 else if((bd_used > 100) && (bd_used <= 500)) 3464 fp->tx_pkts_q[1]++; 3465 else if((bd_used > 500) && (bd_used <= 1000)) 3466 fp->tx_pkts_q[2]++; 3467 else if((bd_used > 1000) && (bd_used <= 2000)) 3468 fp->tx_pkts_q[3]++; 3469 else if((bd_used > 3000) && (bd_used <= 4000)) 3470 fp->tx_pkts_q[4]++; 3471 else if((bd_used > 4000) && (bd_used <= 5000)) 3472 fp->tx_pkts_q[5]++; 3473 else if((bd_used > 6000) && (bd_used <= 7000)) 3474 fp->tx_pkts_q[6]++; 3475 else if((bd_used > 7000) && (bd_used <= 8000)) 3476 fp->tx_pkts_q[7]++; 3477 else if((bd_used > 8000) && (bd_used <= 9000)) 3478 fp->tx_pkts_q[8]++; 3479 else if((bd_used > 9000) && (bd_used <= 10000)) 3480 fp->tx_pkts_q[9]++; 3481 else if((bd_used > 10000) && (bd_used <= 11000)) 3482 fp->tx_pkts_q[10]++; 3483 else if((bd_used > 11000) && (bd_used <= 12000)) 3484 fp->tx_pkts_q[11]++; 3485 else if((bd_used > 12000) && (bd_used <= 13000)) 3486 fp->tx_pkts_q[12]++; 3487 else if((bd_used > 13000) && (bd_used <= 14000)) 3488 fp->tx_pkts_q[13]++; 3489 else if((bd_used > 14000) && (bd_used <= 15000)) 3490 fp->tx_pkts_q[14]++; 3491 else if((bd_used > 15000) && (bd_used <= 16000)) 3492 fp->tx_pkts_q[15]++; 3493 else 3494 fp->tx_pkts_q[16]++; 3495 } 3496 3497 #endif /* end of QLNX_TRACE_PERF_DATA */ 3498 3499 if ((nsegs + QLNX_TX_ELEM_RESERVE) > 3500 (int)(elem_left = ecore_chain_get_elem_left(&txq->tx_pbl))) { 3501 QL_DPRINT1(ha, "(%d, 0x%x) insuffient BDs" 3502 " in chain[%d] trying to free packets\n", 3503 nsegs, elem_left, fp->rss_id); 3504 3505 fp->tx_nsegs_gt_elem_left++; 3506 3507 (void)qlnx_tx_int(ha, fp, txq); 3508 3509 if ((nsegs + QLNX_TX_ELEM_RESERVE) > (int)(elem_left = 3510 ecore_chain_get_elem_left(&txq->tx_pbl))) { 3511 QL_DPRINT1(ha, 3512 "(%d, 0x%x) insuffient BDs in chain[%d]\n", 3513 nsegs, elem_left, fp->rss_id); 3514 3515 fp->err_tx_nsegs_gt_elem_left++; 3516 fp->tx_ring_full = 1; 3517 if (ha->storm_stats_enable) 3518 ha->storm_stats_gather = 1; 3519 return (ENOBUFS); 3520 } 3521 } 3522 3523 bus_dmamap_sync(ha->tx_tag, map, BUS_DMASYNC_PREWRITE); 3524 3525 txq->sw_tx_ring[idx].mp = m_head; 3526 3527 first_bd = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl); 3528 3529 memset(first_bd, 0, sizeof(*first_bd)); 3530 3531 first_bd->data.bd_flags.bitfields = 3532 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; 3533 3534 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, segs->ds_len); 3535 3536 nbd++; 3537 3538 if (m_head->m_pkthdr.csum_flags & CSUM_IP) { 3539 first_bd->data.bd_flags.bitfields |= 3540 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3541 } 3542 3543 if (m_head->m_pkthdr.csum_flags & 3544 (CSUM_UDP | CSUM_TCP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)) { 3545 first_bd->data.bd_flags.bitfields |= 3546 (1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT); 3547 } 3548 3549 if (m_head->m_flags & M_VLANTAG) { 3550 first_bd->data.vlan = m_head->m_pkthdr.ether_vtag; 3551 first_bd->data.bd_flags.bitfields |= 3552 (1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT); 3553 } 3554 3555 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3556 first_bd->data.bd_flags.bitfields |= 3557 (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT); 3558 first_bd->data.bd_flags.bitfields |= 3559 (1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT); 3560 3561 nbds_in_hdr = 1; 3562 3563 if (offset == segs->ds_len) { 3564 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3565 segs++; 3566 seg_idx++; 3567 3568 second_bd = (struct eth_tx_2nd_bd *) 3569 ecore_chain_produce(&txq->tx_pbl); 3570 memset(second_bd, 0, sizeof(*second_bd)); 3571 nbd++; 3572 3573 if (seg_idx < nsegs) { 3574 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3575 (segs->ds_addr), (segs->ds_len)); 3576 segs++; 3577 seg_idx++; 3578 } 3579 3580 third_bd = (struct eth_tx_3rd_bd *) 3581 ecore_chain_produce(&txq->tx_pbl); 3582 memset(third_bd, 0, sizeof(*third_bd)); 3583 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3584 third_bd->data.bitfields |= 3585 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3586 nbd++; 3587 3588 if (seg_idx < nsegs) { 3589 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3590 (segs->ds_addr), (segs->ds_len)); 3591 segs++; 3592 seg_idx++; 3593 } 3594 3595 for (; seg_idx < nsegs; seg_idx++) { 3596 tx_data_bd = (struct eth_tx_bd *) 3597 ecore_chain_produce(&txq->tx_pbl); 3598 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3599 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3600 segs->ds_addr,\ 3601 segs->ds_len); 3602 segs++; 3603 nbd++; 3604 } 3605 3606 } else if (offset < segs->ds_len) { 3607 BD_SET_UNMAP_ADDR_LEN(first_bd, segs->ds_addr, offset); 3608 3609 second_bd = (struct eth_tx_2nd_bd *) 3610 ecore_chain_produce(&txq->tx_pbl); 3611 memset(second_bd, 0, sizeof(*second_bd)); 3612 BD_SET_UNMAP_ADDR_LEN(second_bd, \ 3613 (segs->ds_addr + offset),\ 3614 (segs->ds_len - offset)); 3615 nbd++; 3616 segs++; 3617 3618 third_bd = (struct eth_tx_3rd_bd *) 3619 ecore_chain_produce(&txq->tx_pbl); 3620 memset(third_bd, 0, sizeof(*third_bd)); 3621 3622 BD_SET_UNMAP_ADDR_LEN(third_bd, \ 3623 segs->ds_addr,\ 3624 segs->ds_len); 3625 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3626 third_bd->data.bitfields |= 3627 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3628 segs++; 3629 nbd++; 3630 3631 for (seg_idx = 2; seg_idx < nsegs; seg_idx++) { 3632 tx_data_bd = (struct eth_tx_bd *) 3633 ecore_chain_produce(&txq->tx_pbl); 3634 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3635 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, \ 3636 segs->ds_addr,\ 3637 segs->ds_len); 3638 segs++; 3639 nbd++; 3640 } 3641 3642 } else { 3643 offset = offset - segs->ds_len; 3644 segs++; 3645 3646 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3647 if (offset) 3648 nbds_in_hdr++; 3649 3650 tx_data_bd = (struct eth_tx_bd *) 3651 ecore_chain_produce(&txq->tx_pbl); 3652 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3653 3654 if (second_bd == NULL) { 3655 second_bd = (struct eth_tx_2nd_bd *) 3656 tx_data_bd; 3657 } else if (third_bd == NULL) { 3658 third_bd = (struct eth_tx_3rd_bd *) 3659 tx_data_bd; 3660 } 3661 3662 if (offset && (offset < segs->ds_len)) { 3663 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3664 segs->ds_addr, offset); 3665 3666 tx_data_bd = (struct eth_tx_bd *) 3667 ecore_chain_produce(&txq->tx_pbl); 3668 3669 memset(tx_data_bd, 0, 3670 sizeof(*tx_data_bd)); 3671 3672 if (second_bd == NULL) { 3673 second_bd = 3674 (struct eth_tx_2nd_bd *)tx_data_bd; 3675 } else if (third_bd == NULL) { 3676 third_bd = 3677 (struct eth_tx_3rd_bd *)tx_data_bd; 3678 } 3679 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3680 (segs->ds_addr + offset), \ 3681 (segs->ds_len - offset)); 3682 nbd++; 3683 offset = 0; 3684 } else { 3685 if (offset) 3686 offset = offset - segs->ds_len; 3687 BD_SET_UNMAP_ADDR_LEN(tx_data_bd,\ 3688 segs->ds_addr, segs->ds_len); 3689 } 3690 segs++; 3691 nbd++; 3692 } 3693 3694 if (third_bd == NULL) { 3695 third_bd = (struct eth_tx_3rd_bd *) 3696 ecore_chain_produce(&txq->tx_pbl); 3697 memset(third_bd, 0, sizeof(*third_bd)); 3698 } 3699 3700 third_bd->data.lso_mss = m_head->m_pkthdr.tso_segsz; 3701 third_bd->data.bitfields |= 3702 (nbds_in_hdr<<ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT); 3703 } 3704 fp->tx_tso_pkts++; 3705 } else { 3706 segs++; 3707 for (seg_idx = 1; seg_idx < nsegs; seg_idx++) { 3708 tx_data_bd = (struct eth_tx_bd *) 3709 ecore_chain_produce(&txq->tx_pbl); 3710 memset(tx_data_bd, 0, sizeof(*tx_data_bd)); 3711 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, segs->ds_addr,\ 3712 segs->ds_len); 3713 segs++; 3714 nbd++; 3715 } 3716 first_bd->data.bitfields = 3717 (m_head->m_pkthdr.len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) 3718 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT; 3719 first_bd->data.bitfields = 3720 htole16(first_bd->data.bitfields); 3721 fp->tx_non_tso_pkts++; 3722 } 3723 3724 first_bd->data.nbds = nbd; 3725 3726 if (ha->dbg_trace_tso_pkt_len) { 3727 if (fp->tx_tso_max_nsegs < nsegs) 3728 fp->tx_tso_max_nsegs = nsegs; 3729 3730 if ((nsegs < fp->tx_tso_min_nsegs) || (!fp->tx_tso_min_nsegs)) 3731 fp->tx_tso_min_nsegs = nsegs; 3732 } 3733 3734 txq->sw_tx_ring[idx].nsegs = nsegs; 3735 txq->sw_tx_prod = (txq->sw_tx_prod + 1) & (TX_RING_SIZE - 1); 3736 3737 txq->tx_db.data.bd_prod = 3738 htole16(ecore_chain_get_prod_idx(&txq->tx_pbl)); 3739 3740 qlnx_txq_doorbell_wr32(ha, txq->doorbell_addr, txq->tx_db.raw); 3741 3742 QL_DPRINT8(ha, "exit[%d]\n", fp->rss_id); 3743 return (0); 3744 } 3745 3746 static void 3747 qlnx_stop(qlnx_host_t *ha) 3748 { 3749 if_t ifp = ha->ifp; 3750 int i; 3751 3752 if_setdrvflagbits(ifp, 0, (IFF_DRV_OACTIVE | IFF_DRV_RUNNING)); 3753 3754 /* 3755 * We simply lock and unlock each fp->tx_mtx to 3756 * propagate the if_drv_flags 3757 * state to each tx thread 3758 */ 3759 QL_DPRINT1(ha, "QLNX STATE = %d\n",ha->state); 3760 3761 if (ha->state == QLNX_STATE_OPEN) { 3762 for (i = 0; i < ha->num_rss; i++) { 3763 struct qlnx_fastpath *fp = &ha->fp_array[i]; 3764 3765 mtx_lock(&fp->tx_mtx); 3766 mtx_unlock(&fp->tx_mtx); 3767 3768 if (fp->fp_taskqueue != NULL) 3769 taskqueue_enqueue(fp->fp_taskqueue, 3770 &fp->fp_task); 3771 } 3772 } 3773 #ifdef QLNX_ENABLE_IWARP 3774 if (qlnx_vf_device(ha) != 0) { 3775 qlnx_rdma_dev_close(ha); 3776 } 3777 #endif /* #ifdef QLNX_ENABLE_IWARP */ 3778 3779 qlnx_unload(ha); 3780 3781 return; 3782 } 3783 3784 static int 3785 qlnx_get_ifq_snd_maxlen(qlnx_host_t *ha) 3786 { 3787 return(TX_RING_SIZE - 1); 3788 } 3789 3790 uint8_t * 3791 qlnx_get_mac_addr(qlnx_host_t *ha) 3792 { 3793 struct ecore_hwfn *p_hwfn; 3794 unsigned char mac[ETHER_ADDR_LEN]; 3795 uint8_t p_is_forced; 3796 3797 p_hwfn = &ha->cdev.hwfns[0]; 3798 3799 if (qlnx_vf_device(ha) != 0) 3800 return (p_hwfn->hw_info.hw_mac_addr); 3801 3802 ecore_vf_read_bulletin(p_hwfn, &p_is_forced); 3803 if (ecore_vf_bulletin_get_forced_mac(p_hwfn, mac, &p_is_forced) == 3804 true) { 3805 device_printf(ha->pci_dev, "%s: p_is_forced = %d" 3806 " mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", __func__, 3807 p_is_forced, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3808 memcpy(ha->primary_mac, mac, ETH_ALEN); 3809 } 3810 3811 return (ha->primary_mac); 3812 } 3813 3814 static uint32_t 3815 qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link) 3816 { 3817 uint32_t ifm_type = 0; 3818 3819 switch (if_link->media_type) { 3820 case MEDIA_MODULE_FIBER: 3821 case MEDIA_UNSPECIFIED: 3822 if (if_link->speed == (100 * 1000)) 3823 ifm_type = QLNX_IFM_100G_SR4; 3824 else if (if_link->speed == (40 * 1000)) 3825 ifm_type = IFM_40G_SR4; 3826 else if (if_link->speed == (25 * 1000)) 3827 ifm_type = QLNX_IFM_25G_SR; 3828 else if (if_link->speed == (10 * 1000)) 3829 ifm_type = (IFM_10G_LR | IFM_10G_SR); 3830 else if (if_link->speed == (1 * 1000)) 3831 ifm_type = (IFM_1000_SX | IFM_1000_LX); 3832 3833 break; 3834 3835 case MEDIA_DA_TWINAX: 3836 if (if_link->speed == (100 * 1000)) 3837 ifm_type = QLNX_IFM_100G_CR4; 3838 else if (if_link->speed == (40 * 1000)) 3839 ifm_type = IFM_40G_CR4; 3840 else if (if_link->speed == (25 * 1000)) 3841 ifm_type = QLNX_IFM_25G_CR; 3842 else if (if_link->speed == (10 * 1000)) 3843 ifm_type = IFM_10G_TWINAX; 3844 3845 break; 3846 3847 default : 3848 ifm_type = IFM_UNKNOWN; 3849 break; 3850 } 3851 return (ifm_type); 3852 } 3853 3854 /***************************************************************************** 3855 * Interrupt Service Functions 3856 *****************************************************************************/ 3857 3858 static int 3859 qlnx_rx_jumbo_chain(qlnx_host_t *ha, struct qlnx_fastpath *fp, 3860 struct mbuf *mp_head, uint16_t len) 3861 { 3862 struct mbuf *mp, *mpf, *mpl; 3863 struct sw_rx_data *sw_rx_data; 3864 struct qlnx_rx_queue *rxq; 3865 uint16_t len_in_buffer; 3866 3867 rxq = fp->rxq; 3868 mpf = mpl = mp = NULL; 3869 3870 while (len) { 3871 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3872 3873 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3874 mp = sw_rx_data->data; 3875 3876 if (mp == NULL) { 3877 QL_DPRINT1(ha, "mp = NULL\n"); 3878 fp->err_rx_mp_null++; 3879 rxq->sw_rx_cons = 3880 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3881 3882 if (mpf != NULL) 3883 m_freem(mpf); 3884 3885 return (-1); 3886 } 3887 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 3888 BUS_DMASYNC_POSTREAD); 3889 3890 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 3891 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 3892 " incoming packet and reusing its buffer\n"); 3893 3894 qlnx_reuse_rx_data(rxq); 3895 fp->err_rx_alloc_errors++; 3896 3897 if (mpf != NULL) 3898 m_freem(mpf); 3899 3900 return (-1); 3901 } 3902 ecore_chain_consume(&rxq->rx_bd_ring); 3903 3904 if (len > rxq->rx_buf_size) 3905 len_in_buffer = rxq->rx_buf_size; 3906 else 3907 len_in_buffer = len; 3908 3909 len = len - len_in_buffer; 3910 3911 mp->m_flags &= ~M_PKTHDR; 3912 mp->m_next = NULL; 3913 mp->m_len = len_in_buffer; 3914 3915 if (mpf == NULL) 3916 mpf = mpl = mp; 3917 else { 3918 mpl->m_next = mp; 3919 mpl = mp; 3920 } 3921 } 3922 3923 if (mpf != NULL) 3924 mp_head->m_next = mpf; 3925 3926 return (0); 3927 } 3928 3929 static void 3930 qlnx_tpa_start(qlnx_host_t *ha, 3931 struct qlnx_fastpath *fp, 3932 struct qlnx_rx_queue *rxq, 3933 struct eth_fast_path_rx_tpa_start_cqe *cqe) 3934 { 3935 uint32_t agg_index; 3936 if_t ifp = ha->ifp; 3937 struct mbuf *mp; 3938 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 3939 struct sw_rx_data *sw_rx_data; 3940 dma_addr_t addr; 3941 bus_dmamap_t map; 3942 struct eth_rx_bd *rx_bd; 3943 int i; 3944 uint8_t hash_type; 3945 3946 agg_index = cqe->tpa_agg_index; 3947 3948 QL_DPRINT7(ha, "[rss_id = %d]: enter\n \ 3949 \t type = 0x%x\n \ 3950 \t bitfields = 0x%x\n \ 3951 \t seg_len = 0x%x\n \ 3952 \t pars_flags = 0x%x\n \ 3953 \t vlan_tag = 0x%x\n \ 3954 \t rss_hash = 0x%x\n \ 3955 \t len_on_first_bd = 0x%x\n \ 3956 \t placement_offset = 0x%x\n \ 3957 \t tpa_agg_index = 0x%x\n \ 3958 \t header_len = 0x%x\n \ 3959 \t ext_bd_len_list[0] = 0x%x\n \ 3960 \t ext_bd_len_list[1] = 0x%x\n \ 3961 \t ext_bd_len_list[2] = 0x%x\n \ 3962 \t ext_bd_len_list[3] = 0x%x\n \ 3963 \t ext_bd_len_list[4] = 0x%x\n", 3964 fp->rss_id, cqe->type, cqe->bitfields, cqe->seg_len, 3965 cqe->pars_flags.flags, cqe->vlan_tag, 3966 cqe->rss_hash, cqe->len_on_first_bd, cqe->placement_offset, 3967 cqe->tpa_agg_index, cqe->header_len, 3968 cqe->ext_bd_len_list[0], cqe->ext_bd_len_list[1], 3969 cqe->ext_bd_len_list[2], cqe->ext_bd_len_list[3], 3970 cqe->ext_bd_len_list[4]); 3971 3972 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 3973 fp->err_rx_tpa_invalid_agg_num++; 3974 return; 3975 } 3976 3977 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 3978 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, BUS_DMASYNC_POSTREAD); 3979 mp = sw_rx_data->data; 3980 3981 QL_DPRINT7(ha, "[rss_id = %d]: mp = %p \n ", fp->rss_id, mp); 3982 3983 if (mp == NULL) { 3984 QL_DPRINT7(ha, "[%d]: mp = NULL\n", fp->rss_id); 3985 fp->err_rx_mp_null++; 3986 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 3987 3988 return; 3989 } 3990 3991 if ((le16toh(cqe->pars_flags.flags)) & CQE_FLAGS_ERR) { 3992 QL_DPRINT7(ha, "[%d]: CQE in CONS = %u has error," 3993 " flags = %x, dropping incoming packet\n", fp->rss_id, 3994 rxq->sw_rx_cons, le16toh(cqe->pars_flags.flags)); 3995 3996 fp->err_rx_hw_errors++; 3997 3998 qlnx_reuse_rx_data(rxq); 3999 4000 QLNX_INC_IERRORS(ifp); 4001 4002 return; 4003 } 4004 4005 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4006 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4007 " dropping incoming packet and reusing its buffer\n", 4008 fp->rss_id); 4009 4010 fp->err_rx_alloc_errors++; 4011 QLNX_INC_IQDROPS(ifp); 4012 4013 /* 4014 * Load the tpa mbuf into the rx ring and save the 4015 * posted mbuf 4016 */ 4017 4018 map = sw_rx_data->map; 4019 addr = sw_rx_data->dma_addr; 4020 4021 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 4022 4023 sw_rx_data->data = rxq->tpa_info[agg_index].rx_buf.data; 4024 sw_rx_data->dma_addr = rxq->tpa_info[agg_index].rx_buf.dma_addr; 4025 sw_rx_data->map = rxq->tpa_info[agg_index].rx_buf.map; 4026 4027 rxq->tpa_info[agg_index].rx_buf.data = mp; 4028 rxq->tpa_info[agg_index].rx_buf.dma_addr = addr; 4029 rxq->tpa_info[agg_index].rx_buf.map = map; 4030 4031 rx_bd = (struct eth_rx_bd *) 4032 ecore_chain_produce(&rxq->rx_bd_ring); 4033 4034 rx_bd->addr.hi = htole32(U64_HI(sw_rx_data->dma_addr)); 4035 rx_bd->addr.lo = htole32(U64_LO(sw_rx_data->dma_addr)); 4036 4037 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4038 BUS_DMASYNC_PREREAD); 4039 4040 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 4041 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4042 4043 ecore_chain_consume(&rxq->rx_bd_ring); 4044 4045 /* Now reuse any buffers posted in ext_bd_len_list */ 4046 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4047 if (cqe->ext_bd_len_list[i] == 0) 4048 break; 4049 4050 qlnx_reuse_rx_data(rxq); 4051 } 4052 4053 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4054 return; 4055 } 4056 4057 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4058 QL_DPRINT7(ha, "[%d]: invalid aggregation state," 4059 " dropping incoming packet and reusing its buffer\n", 4060 fp->rss_id); 4061 4062 QLNX_INC_IQDROPS(ifp); 4063 4064 /* if we already have mbuf head in aggregation free it */ 4065 if (rxq->tpa_info[agg_index].mpf) { 4066 m_freem(rxq->tpa_info[agg_index].mpf); 4067 rxq->tpa_info[agg_index].mpl = NULL; 4068 } 4069 rxq->tpa_info[agg_index].mpf = mp; 4070 rxq->tpa_info[agg_index].mpl = NULL; 4071 4072 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4073 ecore_chain_consume(&rxq->rx_bd_ring); 4074 4075 /* Now reuse any buffers posted in ext_bd_len_list */ 4076 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4077 if (cqe->ext_bd_len_list[i] == 0) 4078 break; 4079 4080 qlnx_reuse_rx_data(rxq); 4081 } 4082 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_ERROR; 4083 4084 return; 4085 } 4086 4087 /* 4088 * first process the ext_bd_len_list 4089 * if this fails then we simply drop the packet 4090 */ 4091 ecore_chain_consume(&rxq->rx_bd_ring); 4092 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4093 4094 for (i = 0; i < ETH_TPA_CQE_START_LEN_LIST_SIZE; i++) { 4095 QL_DPRINT7(ha, "[%d]: 4\n ", fp->rss_id); 4096 4097 if (cqe->ext_bd_len_list[i] == 0) 4098 break; 4099 4100 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4101 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4102 BUS_DMASYNC_POSTREAD); 4103 4104 mpc = sw_rx_data->data; 4105 4106 if (mpc == NULL) { 4107 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4108 fp->err_rx_mp_null++; 4109 if (mpf != NULL) 4110 m_freem(mpf); 4111 mpf = mpl = NULL; 4112 rxq->tpa_info[agg_index].agg_state = 4113 QLNX_AGG_STATE_ERROR; 4114 ecore_chain_consume(&rxq->rx_bd_ring); 4115 rxq->sw_rx_cons = 4116 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4117 continue; 4118 } 4119 4120 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4121 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4122 " dropping incoming packet and reusing its" 4123 " buffer\n", fp->rss_id); 4124 4125 qlnx_reuse_rx_data(rxq); 4126 4127 if (mpf != NULL) 4128 m_freem(mpf); 4129 mpf = mpl = NULL; 4130 4131 rxq->tpa_info[agg_index].agg_state = 4132 QLNX_AGG_STATE_ERROR; 4133 4134 ecore_chain_consume(&rxq->rx_bd_ring); 4135 rxq->sw_rx_cons = 4136 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4137 4138 continue; 4139 } 4140 4141 mpc->m_flags &= ~M_PKTHDR; 4142 mpc->m_next = NULL; 4143 mpc->m_len = cqe->ext_bd_len_list[i]; 4144 4145 if (mpf == NULL) { 4146 mpf = mpl = mpc; 4147 } else { 4148 mpl->m_len = ha->rx_buf_size; 4149 mpl->m_next = mpc; 4150 mpl = mpc; 4151 } 4152 4153 ecore_chain_consume(&rxq->rx_bd_ring); 4154 rxq->sw_rx_cons = 4155 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4156 } 4157 4158 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_NONE) { 4159 QL_DPRINT7(ha, "[%d]: invalid aggregation state, dropping" 4160 " incoming packet and reusing its buffer\n", 4161 fp->rss_id); 4162 4163 QLNX_INC_IQDROPS(ifp); 4164 4165 rxq->tpa_info[agg_index].mpf = mp; 4166 rxq->tpa_info[agg_index].mpl = NULL; 4167 4168 return; 4169 } 4170 4171 rxq->tpa_info[agg_index].placement_offset = cqe->placement_offset; 4172 4173 if (mpf != NULL) { 4174 mp->m_len = ha->rx_buf_size; 4175 mp->m_next = mpf; 4176 rxq->tpa_info[agg_index].mpf = mp; 4177 rxq->tpa_info[agg_index].mpl = mpl; 4178 } else { 4179 mp->m_len = cqe->len_on_first_bd + cqe->placement_offset; 4180 rxq->tpa_info[agg_index].mpf = mp; 4181 rxq->tpa_info[agg_index].mpl = mp; 4182 mp->m_next = NULL; 4183 } 4184 4185 mp->m_flags |= M_PKTHDR; 4186 4187 /* assign packet to this interface interface */ 4188 mp->m_pkthdr.rcvif = ifp; 4189 4190 /* assume no hardware checksum has complated */ 4191 mp->m_pkthdr.csum_flags = 0; 4192 4193 //mp->m_pkthdr.flowid = fp->rss_id; 4194 mp->m_pkthdr.flowid = cqe->rss_hash; 4195 4196 hash_type = cqe->bitfields & 4197 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4198 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4199 4200 switch (hash_type) { 4201 case RSS_HASH_TYPE_IPV4: 4202 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4203 break; 4204 4205 case RSS_HASH_TYPE_TCP_IPV4: 4206 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4207 break; 4208 4209 case RSS_HASH_TYPE_IPV6: 4210 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4211 break; 4212 4213 case RSS_HASH_TYPE_TCP_IPV6: 4214 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4215 break; 4216 4217 default: 4218 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4219 break; 4220 } 4221 4222 mp->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | 4223 CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4224 4225 mp->m_pkthdr.csum_data = 0xFFFF; 4226 4227 if (CQE_HAS_VLAN(cqe->pars_flags.flags)) { 4228 mp->m_pkthdr.ether_vtag = le16toh(cqe->vlan_tag); 4229 mp->m_flags |= M_VLANTAG; 4230 } 4231 4232 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_START; 4233 4234 QL_DPRINT7(ha, "[%d]: 5\n\tagg_state = %d\n\t mpf = %p mpl = %p\n", 4235 fp->rss_id, rxq->tpa_info[agg_index].agg_state, 4236 rxq->tpa_info[agg_index].mpf, rxq->tpa_info[agg_index].mpl); 4237 4238 return; 4239 } 4240 4241 static void 4242 qlnx_tpa_cont(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4243 struct qlnx_rx_queue *rxq, 4244 struct eth_fast_path_rx_tpa_cont_cqe *cqe) 4245 { 4246 struct sw_rx_data *sw_rx_data; 4247 int i; 4248 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4249 struct mbuf *mp; 4250 uint32_t agg_index; 4251 4252 QL_DPRINT7(ha, "[%d]: enter\n \ 4253 \t type = 0x%x\n \ 4254 \t tpa_agg_index = 0x%x\n \ 4255 \t len_list[0] = 0x%x\n \ 4256 \t len_list[1] = 0x%x\n \ 4257 \t len_list[2] = 0x%x\n \ 4258 \t len_list[3] = 0x%x\n \ 4259 \t len_list[4] = 0x%x\n \ 4260 \t len_list[5] = 0x%x\n", 4261 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4262 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4263 cqe->len_list[3], cqe->len_list[4], cqe->len_list[5]); 4264 4265 agg_index = cqe->tpa_agg_index; 4266 4267 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4268 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4269 fp->err_rx_tpa_invalid_agg_num++; 4270 return; 4271 } 4272 4273 for (i = 0; i < ETH_TPA_CQE_CONT_LEN_LIST_SIZE; i++) { 4274 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4275 4276 if (cqe->len_list[i] == 0) 4277 break; 4278 4279 if (rxq->tpa_info[agg_index].agg_state != 4280 QLNX_AGG_STATE_START) { 4281 qlnx_reuse_rx_data(rxq); 4282 continue; 4283 } 4284 4285 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4286 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4287 BUS_DMASYNC_POSTREAD); 4288 4289 mpc = sw_rx_data->data; 4290 4291 if (mpc == NULL) { 4292 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4293 4294 fp->err_rx_mp_null++; 4295 if (mpf != NULL) 4296 m_freem(mpf); 4297 mpf = mpl = NULL; 4298 rxq->tpa_info[agg_index].agg_state = 4299 QLNX_AGG_STATE_ERROR; 4300 ecore_chain_consume(&rxq->rx_bd_ring); 4301 rxq->sw_rx_cons = 4302 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4303 continue; 4304 } 4305 4306 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4307 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4308 " dropping incoming packet and reusing its" 4309 " buffer\n", fp->rss_id); 4310 4311 qlnx_reuse_rx_data(rxq); 4312 4313 if (mpf != NULL) 4314 m_freem(mpf); 4315 mpf = mpl = NULL; 4316 4317 rxq->tpa_info[agg_index].agg_state = 4318 QLNX_AGG_STATE_ERROR; 4319 4320 ecore_chain_consume(&rxq->rx_bd_ring); 4321 rxq->sw_rx_cons = 4322 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4323 4324 continue; 4325 } 4326 4327 mpc->m_flags &= ~M_PKTHDR; 4328 mpc->m_next = NULL; 4329 mpc->m_len = cqe->len_list[i]; 4330 4331 if (mpf == NULL) { 4332 mpf = mpl = mpc; 4333 } else { 4334 mpl->m_len = ha->rx_buf_size; 4335 mpl->m_next = mpc; 4336 mpl = mpc; 4337 } 4338 4339 ecore_chain_consume(&rxq->rx_bd_ring); 4340 rxq->sw_rx_cons = 4341 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4342 } 4343 4344 QL_DPRINT7(ha, "[%d]: 2\n" "\tmpf = %p mpl = %p\n", 4345 fp->rss_id, mpf, mpl); 4346 4347 if (mpf != NULL) { 4348 mp = rxq->tpa_info[agg_index].mpl; 4349 mp->m_len = ha->rx_buf_size; 4350 mp->m_next = mpf; 4351 rxq->tpa_info[agg_index].mpl = mpl; 4352 } 4353 4354 return; 4355 } 4356 4357 static int 4358 qlnx_tpa_end(qlnx_host_t *ha, struct qlnx_fastpath *fp, 4359 struct qlnx_rx_queue *rxq, 4360 struct eth_fast_path_rx_tpa_end_cqe *cqe) 4361 { 4362 struct sw_rx_data *sw_rx_data; 4363 int i; 4364 struct mbuf *mpf = NULL, *mpl = NULL, *mpc = NULL; 4365 struct mbuf *mp; 4366 uint32_t agg_index; 4367 uint32_t len = 0; 4368 if_t ifp = ha->ifp; 4369 4370 QL_DPRINT7(ha, "[%d]: enter\n \ 4371 \t type = 0x%x\n \ 4372 \t tpa_agg_index = 0x%x\n \ 4373 \t total_packet_len = 0x%x\n \ 4374 \t num_of_bds = 0x%x\n \ 4375 \t end_reason = 0x%x\n \ 4376 \t num_of_coalesced_segs = 0x%x\n \ 4377 \t ts_delta = 0x%x\n \ 4378 \t len_list[0] = 0x%x\n \ 4379 \t len_list[1] = 0x%x\n \ 4380 \t len_list[2] = 0x%x\n \ 4381 \t len_list[3] = 0x%x\n", 4382 fp->rss_id, cqe->type, cqe->tpa_agg_index, 4383 cqe->total_packet_len, cqe->num_of_bds, 4384 cqe->end_reason, cqe->num_of_coalesced_segs, cqe->ts_delta, 4385 cqe->len_list[0], cqe->len_list[1], cqe->len_list[2], 4386 cqe->len_list[3]); 4387 4388 agg_index = cqe->tpa_agg_index; 4389 4390 if (agg_index >= ETH_TPA_MAX_AGGS_NUM) { 4391 QL_DPRINT7(ha, "[%d]: 0\n ", fp->rss_id); 4392 4393 fp->err_rx_tpa_invalid_agg_num++; 4394 return (0); 4395 } 4396 4397 for (i = 0; i < ETH_TPA_CQE_END_LEN_LIST_SIZE; i++) { 4398 QL_DPRINT7(ha, "[%d]: 1\n ", fp->rss_id); 4399 4400 if (cqe->len_list[i] == 0) 4401 break; 4402 4403 if (rxq->tpa_info[agg_index].agg_state != 4404 QLNX_AGG_STATE_START) { 4405 QL_DPRINT7(ha, "[%d]: 2\n ", fp->rss_id); 4406 4407 qlnx_reuse_rx_data(rxq); 4408 continue; 4409 } 4410 4411 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4412 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4413 BUS_DMASYNC_POSTREAD); 4414 4415 mpc = sw_rx_data->data; 4416 4417 if (mpc == NULL) { 4418 QL_DPRINT7(ha, "[%d]: mpc = NULL\n", fp->rss_id); 4419 4420 fp->err_rx_mp_null++; 4421 if (mpf != NULL) 4422 m_freem(mpf); 4423 mpf = mpl = NULL; 4424 rxq->tpa_info[agg_index].agg_state = 4425 QLNX_AGG_STATE_ERROR; 4426 ecore_chain_consume(&rxq->rx_bd_ring); 4427 rxq->sw_rx_cons = 4428 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4429 continue; 4430 } 4431 4432 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4433 QL_DPRINT7(ha, "[%d]: New buffer allocation failed," 4434 " dropping incoming packet and reusing its" 4435 " buffer\n", fp->rss_id); 4436 4437 qlnx_reuse_rx_data(rxq); 4438 4439 if (mpf != NULL) 4440 m_freem(mpf); 4441 mpf = mpl = NULL; 4442 4443 rxq->tpa_info[agg_index].agg_state = 4444 QLNX_AGG_STATE_ERROR; 4445 4446 ecore_chain_consume(&rxq->rx_bd_ring); 4447 rxq->sw_rx_cons = 4448 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4449 4450 continue; 4451 } 4452 4453 mpc->m_flags &= ~M_PKTHDR; 4454 mpc->m_next = NULL; 4455 mpc->m_len = cqe->len_list[i]; 4456 4457 if (mpf == NULL) { 4458 mpf = mpl = mpc; 4459 } else { 4460 mpl->m_len = ha->rx_buf_size; 4461 mpl->m_next = mpc; 4462 mpl = mpc; 4463 } 4464 4465 ecore_chain_consume(&rxq->rx_bd_ring); 4466 rxq->sw_rx_cons = 4467 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4468 } 4469 4470 QL_DPRINT7(ha, "[%d]: 5\n ", fp->rss_id); 4471 4472 if (mpf != NULL) { 4473 QL_DPRINT7(ha, "[%d]: 6\n ", fp->rss_id); 4474 4475 mp = rxq->tpa_info[agg_index].mpl; 4476 mp->m_len = ha->rx_buf_size; 4477 mp->m_next = mpf; 4478 } 4479 4480 if (rxq->tpa_info[agg_index].agg_state != QLNX_AGG_STATE_START) { 4481 QL_DPRINT7(ha, "[%d]: 7\n ", fp->rss_id); 4482 4483 if (rxq->tpa_info[agg_index].mpf != NULL) 4484 m_freem(rxq->tpa_info[agg_index].mpf); 4485 rxq->tpa_info[agg_index].mpf = NULL; 4486 rxq->tpa_info[agg_index].mpl = NULL; 4487 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4488 return (0); 4489 } 4490 4491 mp = rxq->tpa_info[agg_index].mpf; 4492 m_adj(mp, rxq->tpa_info[agg_index].placement_offset); 4493 mp->m_pkthdr.len = cqe->total_packet_len; 4494 4495 if (mp->m_next == NULL) 4496 mp->m_len = mp->m_pkthdr.len; 4497 else { 4498 /* compute the total packet length */ 4499 mpf = mp; 4500 while (mpf != NULL) { 4501 len += mpf->m_len; 4502 mpf = mpf->m_next; 4503 } 4504 4505 if (cqe->total_packet_len > len) { 4506 mpl = rxq->tpa_info[agg_index].mpl; 4507 mpl->m_len += (cqe->total_packet_len - len); 4508 } 4509 } 4510 4511 QLNX_INC_IPACKETS(ifp); 4512 QLNX_INC_IBYTES(ifp, (cqe->total_packet_len)); 4513 4514 QL_DPRINT7(ha, "[%d]: 8 csum_data = 0x%x csum_flags = 0x%" PRIu64 "\n \ 4515 m_len = 0x%x m_pkthdr_len = 0x%x\n", 4516 fp->rss_id, mp->m_pkthdr.csum_data, 4517 (uint64_t)mp->m_pkthdr.csum_flags, mp->m_len, mp->m_pkthdr.len); 4518 4519 if_input(ifp, mp); 4520 4521 rxq->tpa_info[agg_index].mpf = NULL; 4522 rxq->tpa_info[agg_index].mpl = NULL; 4523 rxq->tpa_info[agg_index].agg_state = QLNX_AGG_STATE_NONE; 4524 4525 return (cqe->num_of_coalesced_segs); 4526 } 4527 4528 static int 4529 qlnx_rx_int(qlnx_host_t *ha, struct qlnx_fastpath *fp, int budget, 4530 int lro_enable) 4531 { 4532 uint16_t hw_comp_cons, sw_comp_cons; 4533 int rx_pkt = 0; 4534 struct qlnx_rx_queue *rxq = fp->rxq; 4535 if_t ifp = ha->ifp; 4536 struct ecore_dev *cdev = &ha->cdev; 4537 struct ecore_hwfn *p_hwfn; 4538 4539 #ifdef QLNX_SOFT_LRO 4540 struct lro_ctrl *lro; 4541 4542 lro = &rxq->lro; 4543 #endif /* #ifdef QLNX_SOFT_LRO */ 4544 4545 hw_comp_cons = le16toh(*rxq->hw_cons_ptr); 4546 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4547 4548 p_hwfn = &ha->cdev.hwfns[(fp->rss_id % cdev->num_hwfns)]; 4549 4550 /* Memory barrier to prevent the CPU from doing speculative reads of CQE 4551 * / BD in the while-loop before reading hw_comp_cons. If the CQE is 4552 * read before it is written by FW, then FW writes CQE and SB, and then 4553 * the CPU reads the hw_comp_cons, it will use an old CQE. 4554 */ 4555 4556 /* Loop to complete all indicated BDs */ 4557 while (sw_comp_cons != hw_comp_cons) { 4558 union eth_rx_cqe *cqe; 4559 struct eth_fast_path_rx_reg_cqe *fp_cqe; 4560 struct sw_rx_data *sw_rx_data; 4561 register struct mbuf *mp; 4562 enum eth_rx_cqe_type cqe_type; 4563 uint16_t len, pad, len_on_first_bd; 4564 uint8_t *data; 4565 uint8_t hash_type; 4566 4567 /* Get the CQE from the completion ring */ 4568 cqe = (union eth_rx_cqe *) 4569 ecore_chain_consume(&rxq->rx_comp_ring); 4570 cqe_type = cqe->fast_path_regular.type; 4571 4572 if (cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH) { 4573 QL_DPRINT3(ha, "Got a slowath CQE\n"); 4574 4575 ecore_eth_cqe_completion(p_hwfn, 4576 (struct eth_slow_path_rx_cqe *)cqe); 4577 goto next_cqe; 4578 } 4579 4580 if (cqe_type != ETH_RX_CQE_TYPE_REGULAR) { 4581 switch (cqe_type) { 4582 case ETH_RX_CQE_TYPE_TPA_START: 4583 qlnx_tpa_start(ha, fp, rxq, 4584 &cqe->fast_path_tpa_start); 4585 fp->tpa_start++; 4586 break; 4587 4588 case ETH_RX_CQE_TYPE_TPA_CONT: 4589 qlnx_tpa_cont(ha, fp, rxq, 4590 &cqe->fast_path_tpa_cont); 4591 fp->tpa_cont++; 4592 break; 4593 4594 case ETH_RX_CQE_TYPE_TPA_END: 4595 rx_pkt += qlnx_tpa_end(ha, fp, rxq, 4596 &cqe->fast_path_tpa_end); 4597 fp->tpa_end++; 4598 break; 4599 4600 default: 4601 break; 4602 } 4603 4604 goto next_cqe; 4605 } 4606 4607 /* Get the data from the SW ring */ 4608 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_cons]; 4609 mp = sw_rx_data->data; 4610 4611 if (mp == NULL) { 4612 QL_DPRINT1(ha, "mp = NULL\n"); 4613 fp->err_rx_mp_null++; 4614 rxq->sw_rx_cons = 4615 (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4616 goto next_cqe; 4617 } 4618 bus_dmamap_sync(ha->rx_tag, sw_rx_data->map, 4619 BUS_DMASYNC_POSTREAD); 4620 4621 /* non GRO */ 4622 fp_cqe = &cqe->fast_path_regular;/* MK CR TPA check assembly */ 4623 len = le16toh(fp_cqe->pkt_len); 4624 pad = fp_cqe->placement_offset; 4625 #if 0 4626 QL_DPRINT3(ha, "CQE type = %x, flags = %x, vlan = %x," 4627 " len %u, parsing flags = %d pad = %d\n", 4628 cqe_type, fp_cqe->bitfields, 4629 le16toh(fp_cqe->vlan_tag), 4630 len, le16toh(fp_cqe->pars_flags.flags), pad); 4631 #endif 4632 data = mtod(mp, uint8_t *); 4633 data = data + pad; 4634 4635 if (0) 4636 qlnx_dump_buf8(ha, __func__, data, len); 4637 4638 /* For every Rx BD consumed, we allocate a new BD so the BD ring 4639 * is always with a fixed size. If allocation fails, we take the 4640 * consumed BD and return it to the ring in the PROD position. 4641 * The packet that was received on that BD will be dropped (and 4642 * not passed to the upper stack). 4643 */ 4644 /* If this is an error packet then drop it */ 4645 if ((le16toh(cqe->fast_path_regular.pars_flags.flags)) & 4646 CQE_FLAGS_ERR) { 4647 QL_DPRINT1(ha, "CQE in CONS = %u has error, flags = %x," 4648 " dropping incoming packet\n", sw_comp_cons, 4649 le16toh(cqe->fast_path_regular.pars_flags.flags)); 4650 fp->err_rx_hw_errors++; 4651 4652 qlnx_reuse_rx_data(rxq); 4653 4654 QLNX_INC_IERRORS(ifp); 4655 4656 goto next_cqe; 4657 } 4658 4659 if (qlnx_alloc_rx_buffer(ha, rxq) != 0) { 4660 QL_DPRINT1(ha, "New buffer allocation failed, dropping" 4661 " incoming packet and reusing its buffer\n"); 4662 qlnx_reuse_rx_data(rxq); 4663 4664 fp->err_rx_alloc_errors++; 4665 4666 QLNX_INC_IQDROPS(ifp); 4667 4668 goto next_cqe; 4669 } 4670 4671 ecore_chain_consume(&rxq->rx_bd_ring); 4672 4673 len_on_first_bd = fp_cqe->len_on_first_bd; 4674 m_adj(mp, pad); 4675 mp->m_pkthdr.len = len; 4676 4677 if ((len > 60 ) && (len > len_on_first_bd)) { 4678 mp->m_len = len_on_first_bd; 4679 4680 if (qlnx_rx_jumbo_chain(ha, fp, mp, 4681 (len - len_on_first_bd)) != 0) { 4682 m_freem(mp); 4683 4684 QLNX_INC_IQDROPS(ifp); 4685 4686 goto next_cqe; 4687 } 4688 4689 } else if (len_on_first_bd < len) { 4690 fp->err_rx_jumbo_chain_pkts++; 4691 } else { 4692 mp->m_len = len; 4693 } 4694 4695 mp->m_flags |= M_PKTHDR; 4696 4697 /* assign packet to this interface interface */ 4698 mp->m_pkthdr.rcvif = ifp; 4699 4700 /* assume no hardware checksum has complated */ 4701 mp->m_pkthdr.csum_flags = 0; 4702 4703 mp->m_pkthdr.flowid = fp_cqe->rss_hash; 4704 4705 hash_type = fp_cqe->bitfields & 4706 (ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK << 4707 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT); 4708 4709 switch (hash_type) { 4710 case RSS_HASH_TYPE_IPV4: 4711 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV4); 4712 break; 4713 4714 case RSS_HASH_TYPE_TCP_IPV4: 4715 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV4); 4716 break; 4717 4718 case RSS_HASH_TYPE_IPV6: 4719 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_IPV6); 4720 break; 4721 4722 case RSS_HASH_TYPE_TCP_IPV6: 4723 M_HASHTYPE_SET(mp, M_HASHTYPE_RSS_TCP_IPV6); 4724 break; 4725 4726 default: 4727 M_HASHTYPE_SET(mp, M_HASHTYPE_OPAQUE); 4728 break; 4729 } 4730 4731 if (CQE_L3_PACKET(fp_cqe->pars_flags.flags)) { 4732 mp->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; 4733 } 4734 4735 if (!(CQE_IP_HDR_ERR(fp_cqe->pars_flags.flags))) { 4736 mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; 4737 } 4738 4739 if (CQE_L4_HAS_CSUM(fp_cqe->pars_flags.flags)) { 4740 mp->m_pkthdr.csum_data = 0xFFFF; 4741 mp->m_pkthdr.csum_flags |= 4742 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 4743 } 4744 4745 if (CQE_HAS_VLAN(fp_cqe->pars_flags.flags)) { 4746 mp->m_pkthdr.ether_vtag = le16toh(fp_cqe->vlan_tag); 4747 mp->m_flags |= M_VLANTAG; 4748 } 4749 4750 QLNX_INC_IPACKETS(ifp); 4751 QLNX_INC_IBYTES(ifp, len); 4752 4753 #ifdef QLNX_SOFT_LRO 4754 if (lro_enable) 4755 tcp_lro_queue_mbuf(lro, mp); 4756 else 4757 if_input(ifp, mp); 4758 #else 4759 4760 if_input(ifp, mp); 4761 4762 #endif /* #ifdef QLNX_SOFT_LRO */ 4763 4764 rx_pkt++; 4765 4766 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 4767 4768 next_cqe: /* don't consume bd rx buffer */ 4769 ecore_chain_recycle_consumed(&rxq->rx_comp_ring); 4770 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); 4771 4772 /* CR TPA - revisit how to handle budget in TPA perhaps 4773 increase on "end" */ 4774 if (rx_pkt == budget) 4775 break; 4776 } /* repeat while sw_comp_cons != hw_comp_cons... */ 4777 4778 /* Update producers */ 4779 qlnx_update_rx_prod(p_hwfn, rxq); 4780 4781 return rx_pkt; 4782 } 4783 4784 /* 4785 * fast path interrupt 4786 */ 4787 4788 static void 4789 qlnx_fp_isr(void *arg) 4790 { 4791 qlnx_ivec_t *ivec = arg; 4792 qlnx_host_t *ha; 4793 struct qlnx_fastpath *fp = NULL; 4794 int idx; 4795 4796 ha = ivec->ha; 4797 4798 if (ha->state != QLNX_STATE_OPEN) { 4799 return; 4800 } 4801 4802 idx = ivec->rss_idx; 4803 4804 if ((idx = ivec->rss_idx) >= ha->num_rss) { 4805 QL_DPRINT1(ha, "illegal interrupt[%d]\n", idx); 4806 ha->err_illegal_intr++; 4807 return; 4808 } 4809 fp = &ha->fp_array[idx]; 4810 4811 if (fp == NULL) { 4812 ha->err_fp_null++; 4813 } else { 4814 int rx_int = 0; 4815 #ifdef QLNX_SOFT_LRO 4816 int total_rx_count = 0; 4817 #endif 4818 int lro_enable, tc; 4819 struct qlnx_tx_queue *txq; 4820 uint16_t elem_left; 4821 4822 lro_enable = if_getcapenable(ha->ifp) & IFCAP_LRO; 4823 4824 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); 4825 4826 do { 4827 for (tc = 0; tc < ha->num_tc; tc++) { 4828 txq = fp->txq[tc]; 4829 4830 if((int)(elem_left = 4831 ecore_chain_get_elem_left(&txq->tx_pbl)) < 4832 QLNX_TX_ELEM_THRESH) { 4833 if (mtx_trylock(&fp->tx_mtx)) { 4834 #ifdef QLNX_TRACE_PERF_DATA 4835 tx_compl = fp->tx_pkts_completed; 4836 #endif 4837 4838 qlnx_tx_int(ha, fp, fp->txq[tc]); 4839 #ifdef QLNX_TRACE_PERF_DATA 4840 fp->tx_pkts_compl_intr += 4841 (fp->tx_pkts_completed - tx_compl); 4842 if ((fp->tx_pkts_completed - tx_compl) <= 32) 4843 fp->tx_comInt[0]++; 4844 else if (((fp->tx_pkts_completed - tx_compl) > 32) && 4845 ((fp->tx_pkts_completed - tx_compl) <= 64)) 4846 fp->tx_comInt[1]++; 4847 else if(((fp->tx_pkts_completed - tx_compl) > 64) && 4848 ((fp->tx_pkts_completed - tx_compl) <= 128)) 4849 fp->tx_comInt[2]++; 4850 else if(((fp->tx_pkts_completed - tx_compl) > 128)) 4851 fp->tx_comInt[3]++; 4852 #endif 4853 mtx_unlock(&fp->tx_mtx); 4854 } 4855 } 4856 } 4857 4858 rx_int = qlnx_rx_int(ha, fp, ha->rx_pkt_threshold, 4859 lro_enable); 4860 4861 if (rx_int) { 4862 fp->rx_pkts += rx_int; 4863 #ifdef QLNX_SOFT_LRO 4864 total_rx_count += rx_int; 4865 #endif 4866 } 4867 4868 } while (rx_int); 4869 4870 #ifdef QLNX_SOFT_LRO 4871 { 4872 struct lro_ctrl *lro; 4873 4874 lro = &fp->rxq->lro; 4875 4876 if (lro_enable && total_rx_count) { 4877 4878 #ifdef QLNX_TRACE_LRO_CNT 4879 if (lro->lro_mbuf_count & ~1023) 4880 fp->lro_cnt_1024++; 4881 else if (lro->lro_mbuf_count & ~511) 4882 fp->lro_cnt_512++; 4883 else if (lro->lro_mbuf_count & ~255) 4884 fp->lro_cnt_256++; 4885 else if (lro->lro_mbuf_count & ~127) 4886 fp->lro_cnt_128++; 4887 else if (lro->lro_mbuf_count & ~63) 4888 fp->lro_cnt_64++; 4889 #endif /* #ifdef QLNX_TRACE_LRO_CNT */ 4890 4891 tcp_lro_flush_all(lro); 4892 } 4893 } 4894 #endif /* #ifdef QLNX_SOFT_LRO */ 4895 4896 ecore_sb_update_sb_idx(fp->sb_info); 4897 rmb(); 4898 ecore_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); 4899 } 4900 4901 return; 4902 } 4903 4904 /* 4905 * slow path interrupt processing function 4906 * can be invoked in polled mode or in interrupt mode via taskqueue. 4907 */ 4908 void 4909 qlnx_sp_isr(void *arg) 4910 { 4911 struct ecore_hwfn *p_hwfn; 4912 qlnx_host_t *ha; 4913 4914 p_hwfn = arg; 4915 4916 ha = (qlnx_host_t *)p_hwfn->p_dev; 4917 4918 ha->sp_interrupts++; 4919 4920 QL_DPRINT2(ha, "enter\n"); 4921 4922 ecore_int_sp_dpc(p_hwfn); 4923 4924 QL_DPRINT2(ha, "exit\n"); 4925 4926 return; 4927 } 4928 4929 /***************************************************************************** 4930 * Support Functions for DMA'able Memory 4931 *****************************************************************************/ 4932 4933 static void 4934 qlnx_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 4935 { 4936 *((bus_addr_t *)arg) = 0; 4937 4938 if (error) { 4939 printf("%s: bus_dmamap_load failed (%d)\n", __func__, error); 4940 return; 4941 } 4942 4943 *((bus_addr_t *)arg) = segs[0].ds_addr; 4944 4945 return; 4946 } 4947 4948 static int 4949 qlnx_alloc_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 4950 { 4951 int ret = 0; 4952 bus_addr_t b_addr; 4953 4954 ret = bus_dma_tag_create( 4955 ha->parent_tag,/* parent */ 4956 dma_buf->alignment, 4957 ((bus_size_t)(1ULL << 32)),/* boundary */ 4958 BUS_SPACE_MAXADDR, /* lowaddr */ 4959 BUS_SPACE_MAXADDR, /* highaddr */ 4960 NULL, NULL, /* filter, filterarg */ 4961 dma_buf->size, /* maxsize */ 4962 1, /* nsegments */ 4963 dma_buf->size, /* maxsegsize */ 4964 0, /* flags */ 4965 NULL, NULL, /* lockfunc, lockarg */ 4966 &dma_buf->dma_tag); 4967 4968 if (ret) { 4969 QL_DPRINT1(ha, "could not create dma tag\n"); 4970 goto qlnx_alloc_dmabuf_exit; 4971 } 4972 ret = bus_dmamem_alloc(dma_buf->dma_tag, 4973 (void **)&dma_buf->dma_b, 4974 (BUS_DMA_ZERO | BUS_DMA_COHERENT | BUS_DMA_NOWAIT), 4975 &dma_buf->dma_map); 4976 if (ret) { 4977 bus_dma_tag_destroy(dma_buf->dma_tag); 4978 QL_DPRINT1(ha, "bus_dmamem_alloc failed\n"); 4979 goto qlnx_alloc_dmabuf_exit; 4980 } 4981 4982 ret = bus_dmamap_load(dma_buf->dma_tag, 4983 dma_buf->dma_map, 4984 dma_buf->dma_b, 4985 dma_buf->size, 4986 qlnx_dmamap_callback, 4987 &b_addr, BUS_DMA_NOWAIT); 4988 4989 if (ret || !b_addr) { 4990 bus_dma_tag_destroy(dma_buf->dma_tag); 4991 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, 4992 dma_buf->dma_map); 4993 ret = -1; 4994 goto qlnx_alloc_dmabuf_exit; 4995 } 4996 4997 dma_buf->dma_addr = b_addr; 4998 4999 qlnx_alloc_dmabuf_exit: 5000 5001 return ret; 5002 } 5003 5004 static void 5005 qlnx_free_dmabuf(qlnx_host_t *ha, qlnx_dma_t *dma_buf) 5006 { 5007 bus_dmamap_unload(dma_buf->dma_tag, dma_buf->dma_map); 5008 bus_dmamem_free(dma_buf->dma_tag, dma_buf->dma_b, dma_buf->dma_map); 5009 bus_dma_tag_destroy(dma_buf->dma_tag); 5010 return; 5011 } 5012 5013 void * 5014 qlnx_dma_alloc_coherent(void *ecore_dev, bus_addr_t *phys, uint32_t size) 5015 { 5016 qlnx_dma_t dma_buf; 5017 qlnx_dma_t *dma_p; 5018 qlnx_host_t *ha __unused; 5019 5020 ha = (qlnx_host_t *)ecore_dev; 5021 5022 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5023 5024 memset(&dma_buf, 0, sizeof (qlnx_dma_t)); 5025 5026 dma_buf.size = size + PAGE_SIZE; 5027 dma_buf.alignment = 8; 5028 5029 if (qlnx_alloc_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf) != 0) 5030 return (NULL); 5031 bzero((uint8_t *)dma_buf.dma_b, dma_buf.size); 5032 5033 *phys = dma_buf.dma_addr; 5034 5035 dma_p = (qlnx_dma_t *)((uint8_t *)dma_buf.dma_b + size); 5036 5037 memcpy(dma_p, &dma_buf, sizeof(qlnx_dma_t)); 5038 5039 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5040 (void *)dma_buf.dma_map, (void *)dma_buf.dma_tag, 5041 dma_buf.dma_b, (void *)dma_buf.dma_addr, size); 5042 5043 return (dma_buf.dma_b); 5044 } 5045 5046 void 5047 qlnx_dma_free_coherent(void *ecore_dev, void *v_addr, bus_addr_t phys, 5048 uint32_t size) 5049 { 5050 qlnx_dma_t dma_buf, *dma_p; 5051 qlnx_host_t *ha; 5052 5053 ha = (qlnx_host_t *)ecore_dev; 5054 5055 if (v_addr == NULL) 5056 return; 5057 5058 size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 5059 5060 dma_p = (qlnx_dma_t *)((uint8_t *)v_addr + size); 5061 5062 QL_DPRINT5(ha, "[%p %p %p %p 0x%08x ]\n", 5063 (void *)dma_p->dma_map, (void *)dma_p->dma_tag, 5064 dma_p->dma_b, (void *)dma_p->dma_addr, size); 5065 5066 dma_buf = *dma_p; 5067 5068 if (!ha->qlnxr_debug) 5069 qlnx_free_dmabuf((qlnx_host_t *)ecore_dev, &dma_buf); 5070 return; 5071 } 5072 5073 static int 5074 qlnx_alloc_parent_dma_tag(qlnx_host_t *ha) 5075 { 5076 int ret; 5077 device_t dev; 5078 5079 dev = ha->pci_dev; 5080 5081 /* 5082 * Allocate parent DMA Tag 5083 */ 5084 ret = bus_dma_tag_create( 5085 bus_get_dma_tag(dev), /* parent */ 5086 1,((bus_size_t)(1ULL << 32)),/* alignment, boundary */ 5087 BUS_SPACE_MAXADDR, /* lowaddr */ 5088 BUS_SPACE_MAXADDR, /* highaddr */ 5089 NULL, NULL, /* filter, filterarg */ 5090 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 5091 0, /* nsegments */ 5092 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 5093 0, /* flags */ 5094 NULL, NULL, /* lockfunc, lockarg */ 5095 &ha->parent_tag); 5096 5097 if (ret) { 5098 QL_DPRINT1(ha, "could not create parent dma tag\n"); 5099 return (-1); 5100 } 5101 5102 ha->flags.parent_tag = 1; 5103 5104 return (0); 5105 } 5106 5107 static void 5108 qlnx_free_parent_dma_tag(qlnx_host_t *ha) 5109 { 5110 if (ha->parent_tag != NULL) { 5111 bus_dma_tag_destroy(ha->parent_tag); 5112 ha->parent_tag = NULL; 5113 } 5114 return; 5115 } 5116 5117 static int 5118 qlnx_alloc_tx_dma_tag(qlnx_host_t *ha) 5119 { 5120 if (bus_dma_tag_create(NULL, /* parent */ 5121 1, 0, /* alignment, bounds */ 5122 BUS_SPACE_MAXADDR, /* lowaddr */ 5123 BUS_SPACE_MAXADDR, /* highaddr */ 5124 NULL, NULL, /* filter, filterarg */ 5125 QLNX_MAX_TSO_FRAME_SIZE, /* maxsize */ 5126 QLNX_MAX_SEGMENTS, /* nsegments */ 5127 QLNX_MAX_TX_MBUF_SIZE, /* maxsegsize */ 5128 0, /* flags */ 5129 NULL, /* lockfunc */ 5130 NULL, /* lockfuncarg */ 5131 &ha->tx_tag)) { 5132 QL_DPRINT1(ha, "tx_tag alloc failed\n"); 5133 return (-1); 5134 } 5135 5136 return (0); 5137 } 5138 5139 static void 5140 qlnx_free_tx_dma_tag(qlnx_host_t *ha) 5141 { 5142 if (ha->tx_tag != NULL) { 5143 bus_dma_tag_destroy(ha->tx_tag); 5144 ha->tx_tag = NULL; 5145 } 5146 return; 5147 } 5148 5149 static int 5150 qlnx_alloc_rx_dma_tag(qlnx_host_t *ha) 5151 { 5152 if (bus_dma_tag_create(NULL, /* parent */ 5153 1, 0, /* alignment, bounds */ 5154 BUS_SPACE_MAXADDR, /* lowaddr */ 5155 BUS_SPACE_MAXADDR, /* highaddr */ 5156 NULL, NULL, /* filter, filterarg */ 5157 MJUM9BYTES, /* maxsize */ 5158 1, /* nsegments */ 5159 MJUM9BYTES, /* maxsegsize */ 5160 0, /* flags */ 5161 NULL, /* lockfunc */ 5162 NULL, /* lockfuncarg */ 5163 &ha->rx_tag)) { 5164 QL_DPRINT1(ha, " rx_tag alloc failed\n"); 5165 5166 return (-1); 5167 } 5168 return (0); 5169 } 5170 5171 static void 5172 qlnx_free_rx_dma_tag(qlnx_host_t *ha) 5173 { 5174 if (ha->rx_tag != NULL) { 5175 bus_dma_tag_destroy(ha->rx_tag); 5176 ha->rx_tag = NULL; 5177 } 5178 return; 5179 } 5180 5181 /********************************* 5182 * Exported functions 5183 *********************************/ 5184 uint32_t 5185 qlnx_pci_bus_get_bar_size(void *ecore_dev, uint8_t bar_id) 5186 { 5187 uint32_t bar_size; 5188 5189 bar_id = bar_id * 2; 5190 5191 bar_size = bus_get_resource_count(((qlnx_host_t *)ecore_dev)->pci_dev, 5192 SYS_RES_MEMORY, 5193 PCIR_BAR(bar_id)); 5194 5195 return (bar_size); 5196 } 5197 5198 uint32_t 5199 qlnx_pci_read_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t *reg_value) 5200 { 5201 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5202 pci_reg, 1); 5203 return 0; 5204 } 5205 5206 uint32_t 5207 qlnx_pci_read_config_word(void *ecore_dev, uint32_t pci_reg, 5208 uint16_t *reg_value) 5209 { 5210 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5211 pci_reg, 2); 5212 return 0; 5213 } 5214 5215 uint32_t 5216 qlnx_pci_read_config_dword(void *ecore_dev, uint32_t pci_reg, 5217 uint32_t *reg_value) 5218 { 5219 *reg_value = pci_read_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5220 pci_reg, 4); 5221 return 0; 5222 } 5223 5224 void 5225 qlnx_pci_write_config_byte(void *ecore_dev, uint32_t pci_reg, uint8_t reg_value) 5226 { 5227 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5228 pci_reg, reg_value, 1); 5229 return; 5230 } 5231 5232 void 5233 qlnx_pci_write_config_word(void *ecore_dev, uint32_t pci_reg, 5234 uint16_t reg_value) 5235 { 5236 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5237 pci_reg, reg_value, 2); 5238 return; 5239 } 5240 5241 void 5242 qlnx_pci_write_config_dword(void *ecore_dev, uint32_t pci_reg, 5243 uint32_t reg_value) 5244 { 5245 pci_write_config(((qlnx_host_t *)ecore_dev)->pci_dev, 5246 pci_reg, reg_value, 4); 5247 return; 5248 } 5249 5250 int 5251 qlnx_pci_find_capability(void *ecore_dev, int cap) 5252 { 5253 int reg; 5254 qlnx_host_t *ha; 5255 5256 ha = ecore_dev; 5257 5258 if (pci_find_cap(ha->pci_dev, PCIY_EXPRESS, ®) == 0) 5259 return reg; 5260 else { 5261 QL_DPRINT1(ha, "failed\n"); 5262 return 0; 5263 } 5264 } 5265 5266 int 5267 qlnx_pci_find_ext_capability(void *ecore_dev, int ext_cap) 5268 { 5269 int reg; 5270 qlnx_host_t *ha; 5271 5272 ha = ecore_dev; 5273 5274 if (pci_find_extcap(ha->pci_dev, ext_cap, ®) == 0) 5275 return reg; 5276 else { 5277 QL_DPRINT1(ha, "failed\n"); 5278 return 0; 5279 } 5280 } 5281 5282 uint32_t 5283 qlnx_reg_rd32(void *hwfn, uint32_t reg_addr) 5284 { 5285 uint32_t data32; 5286 struct ecore_hwfn *p_hwfn; 5287 5288 p_hwfn = hwfn; 5289 5290 data32 = bus_read_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5291 (bus_size_t)(p_hwfn->reg_offset + reg_addr)); 5292 5293 return (data32); 5294 } 5295 5296 void 5297 qlnx_reg_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5298 { 5299 struct ecore_hwfn *p_hwfn = hwfn; 5300 5301 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5302 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5303 5304 return; 5305 } 5306 5307 void 5308 qlnx_reg_wr16(void *hwfn, uint32_t reg_addr, uint16_t value) 5309 { 5310 struct ecore_hwfn *p_hwfn = hwfn; 5311 5312 bus_write_2(((qlnx_host_t *)p_hwfn->p_dev)->pci_reg, \ 5313 (bus_size_t)(p_hwfn->reg_offset + reg_addr), value); 5314 return; 5315 } 5316 5317 void 5318 qlnx_dbell_wr32_db(void *hwfn, void *reg_addr, uint32_t value) 5319 { 5320 struct ecore_dev *cdev; 5321 struct ecore_hwfn *p_hwfn; 5322 uint32_t offset; 5323 5324 p_hwfn = hwfn; 5325 5326 cdev = p_hwfn->p_dev; 5327 5328 offset = (uint32_t)((uint8_t *)reg_addr - (uint8_t *)(p_hwfn->doorbells)); 5329 bus_write_4(((qlnx_host_t *)cdev)->pci_dbells, offset, value); 5330 5331 return; 5332 } 5333 5334 void 5335 qlnx_dbell_wr32(void *hwfn, uint32_t reg_addr, uint32_t value) 5336 { 5337 struct ecore_hwfn *p_hwfn = hwfn; 5338 5339 bus_write_4(((qlnx_host_t *)p_hwfn->p_dev)->pci_dbells, \ 5340 (bus_size_t)(p_hwfn->db_offset + reg_addr), value); 5341 5342 return; 5343 } 5344 5345 uint32_t 5346 qlnx_direct_reg_rd32(void *p_hwfn, uint32_t *reg_addr) 5347 { 5348 uint32_t data32; 5349 bus_size_t offset; 5350 struct ecore_dev *cdev; 5351 5352 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5353 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5354 5355 data32 = bus_read_4(((qlnx_host_t *)cdev)->pci_reg, offset); 5356 5357 return (data32); 5358 } 5359 5360 void 5361 qlnx_direct_reg_wr32(void *p_hwfn, void *reg_addr, uint32_t value) 5362 { 5363 bus_size_t offset; 5364 struct ecore_dev *cdev; 5365 5366 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5367 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5368 5369 bus_write_4(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5370 5371 return; 5372 } 5373 5374 void 5375 qlnx_direct_reg_wr64(void *p_hwfn, void *reg_addr, uint64_t value) 5376 { 5377 bus_size_t offset; 5378 struct ecore_dev *cdev; 5379 5380 cdev = ((struct ecore_hwfn *)p_hwfn)->p_dev; 5381 offset = (bus_size_t)((uint8_t *)reg_addr - (uint8_t *)(cdev->regview)); 5382 5383 bus_write_8(((qlnx_host_t *)cdev)->pci_reg, offset, value); 5384 return; 5385 } 5386 5387 void * 5388 qlnx_zalloc(uint32_t size) 5389 { 5390 caddr_t va; 5391 5392 va = malloc((unsigned long)size, M_QLNXBUF, M_NOWAIT); 5393 bzero(va, size); 5394 return ((void *)va); 5395 } 5396 5397 void 5398 qlnx_barrier(void *p_hwfn) 5399 { 5400 qlnx_host_t *ha; 5401 5402 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5403 bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE); 5404 } 5405 5406 void 5407 qlnx_link_update(void *p_hwfn) 5408 { 5409 qlnx_host_t *ha; 5410 int prev_link_state; 5411 5412 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5413 5414 qlnx_fill_link(ha, p_hwfn, &ha->if_link); 5415 5416 prev_link_state = ha->link_up; 5417 ha->link_up = ha->if_link.link_up; 5418 5419 if (prev_link_state != ha->link_up) { 5420 if (ha->link_up) { 5421 if_link_state_change(ha->ifp, LINK_STATE_UP); 5422 } else { 5423 if_link_state_change(ha->ifp, LINK_STATE_DOWN); 5424 } 5425 } 5426 #ifndef QLNX_VF 5427 #ifdef CONFIG_ECORE_SRIOV 5428 5429 if (qlnx_vf_device(ha) != 0) { 5430 if (ha->sriov_initialized) 5431 qlnx_inform_vf_link_state(p_hwfn, ha); 5432 } 5433 5434 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 5435 #endif /* #ifdef QLNX_VF */ 5436 5437 return; 5438 } 5439 5440 static void 5441 __qlnx_osal_vf_fill_acquire_resc_req(struct ecore_hwfn *p_hwfn, 5442 struct ecore_vf_acquire_sw_info *p_sw_info) 5443 { 5444 p_sw_info->driver_version = (QLNX_VERSION_MAJOR << 24) | 5445 (QLNX_VERSION_MINOR << 16) | 5446 QLNX_VERSION_BUILD; 5447 p_sw_info->os_type = VFPF_ACQUIRE_OS_FREEBSD; 5448 5449 return; 5450 } 5451 5452 void 5453 qlnx_osal_vf_fill_acquire_resc_req(void *p_hwfn, void *p_resc_req, 5454 void *p_sw_info) 5455 { 5456 __qlnx_osal_vf_fill_acquire_resc_req(p_hwfn, p_sw_info); 5457 5458 return; 5459 } 5460 5461 void 5462 qlnx_fill_link(qlnx_host_t *ha, struct ecore_hwfn *hwfn, 5463 struct qlnx_link_output *if_link) 5464 { 5465 struct ecore_mcp_link_params link_params; 5466 struct ecore_mcp_link_state link_state; 5467 uint8_t p_change; 5468 struct ecore_ptt *p_ptt = NULL; 5469 5470 memset(if_link, 0, sizeof(*if_link)); 5471 memset(&link_params, 0, sizeof(struct ecore_mcp_link_params)); 5472 memset(&link_state, 0, sizeof(struct ecore_mcp_link_state)); 5473 5474 ha = (qlnx_host_t *)hwfn->p_dev; 5475 5476 /* Prepare source inputs */ 5477 /* we only deal with physical functions */ 5478 if (qlnx_vf_device(ha) != 0) { 5479 p_ptt = ecore_ptt_acquire(hwfn); 5480 5481 if (p_ptt == NULL) { 5482 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 5483 return; 5484 } 5485 5486 ecore_mcp_get_media_type(hwfn, p_ptt, &if_link->media_type); 5487 ecore_ptt_release(hwfn, p_ptt); 5488 5489 memcpy(&link_params, ecore_mcp_get_link_params(hwfn), 5490 sizeof(link_params)); 5491 memcpy(&link_state, ecore_mcp_get_link_state(hwfn), 5492 sizeof(link_state)); 5493 } else { 5494 ecore_mcp_get_media_type(hwfn, NULL, &if_link->media_type); 5495 ecore_vf_read_bulletin(hwfn, &p_change); 5496 ecore_vf_get_link_params(hwfn, &link_params); 5497 ecore_vf_get_link_state(hwfn, &link_state); 5498 } 5499 5500 /* Set the link parameters to pass to protocol driver */ 5501 if (link_state.link_up) { 5502 if_link->link_up = true; 5503 if_link->speed = link_state.speed; 5504 } 5505 5506 if_link->supported_caps = QLNX_LINK_CAP_FIBRE; 5507 5508 if (link_params.speed.autoneg) 5509 if_link->supported_caps |= QLNX_LINK_CAP_Autoneg; 5510 5511 if (link_params.pause.autoneg || 5512 (link_params.pause.forced_rx && link_params.pause.forced_tx)) 5513 if_link->supported_caps |= QLNX_LINK_CAP_Asym_Pause; 5514 5515 if (link_params.pause.autoneg || link_params.pause.forced_rx || 5516 link_params.pause.forced_tx) 5517 if_link->supported_caps |= QLNX_LINK_CAP_Pause; 5518 5519 if (link_params.speed.advertised_speeds & 5520 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) 5521 if_link->supported_caps |= QLNX_LINK_CAP_1000baseT_Half | 5522 QLNX_LINK_CAP_1000baseT_Full; 5523 5524 if (link_params.speed.advertised_speeds & 5525 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) 5526 if_link->supported_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5527 5528 if (link_params.speed.advertised_speeds & 5529 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) 5530 if_link->supported_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5531 5532 if (link_params.speed.advertised_speeds & 5533 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) 5534 if_link->supported_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5535 5536 if (link_params.speed.advertised_speeds & 5537 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G) 5538 if_link->supported_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5539 5540 if (link_params.speed.advertised_speeds & 5541 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) 5542 if_link->supported_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5543 5544 if_link->advertised_caps = if_link->supported_caps; 5545 5546 if_link->autoneg = link_params.speed.autoneg; 5547 if_link->duplex = QLNX_LINK_DUPLEX; 5548 5549 /* Link partner capabilities */ 5550 5551 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_HD) 5552 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Half; 5553 5554 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_1G_FD) 5555 if_link->link_partner_caps |= QLNX_LINK_CAP_1000baseT_Full; 5556 5557 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_10G) 5558 if_link->link_partner_caps |= QLNX_LINK_CAP_10000baseKR_Full; 5559 5560 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_25G) 5561 if_link->link_partner_caps |= QLNX_LINK_CAP_25000baseKR_Full; 5562 5563 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_40G) 5564 if_link->link_partner_caps |= QLNX_LINK_CAP_40000baseLR4_Full; 5565 5566 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_50G) 5567 if_link->link_partner_caps |= QLNX_LINK_CAP_50000baseKR2_Full; 5568 5569 if (link_state.partner_adv_speed & ECORE_LINK_PARTNER_SPEED_100G) 5570 if_link->link_partner_caps |= QLNX_LINK_CAP_100000baseKR4_Full; 5571 5572 if (link_state.an_complete) 5573 if_link->link_partner_caps |= QLNX_LINK_CAP_Autoneg; 5574 5575 if (link_state.partner_adv_pause) 5576 if_link->link_partner_caps |= QLNX_LINK_CAP_Pause; 5577 5578 if ((link_state.partner_adv_pause == 5579 ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE) || 5580 (link_state.partner_adv_pause == 5581 ECORE_LINK_PARTNER_BOTH_PAUSE)) 5582 if_link->link_partner_caps |= QLNX_LINK_CAP_Asym_Pause; 5583 5584 return; 5585 } 5586 5587 void 5588 qlnx_schedule_recovery(void *p_hwfn) 5589 { 5590 qlnx_host_t *ha; 5591 5592 ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev; 5593 5594 if (qlnx_vf_device(ha) != 0) { 5595 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 5596 } 5597 5598 return; 5599 } 5600 5601 static int 5602 qlnx_nic_setup(struct ecore_dev *cdev, struct ecore_pf_params *func_params) 5603 { 5604 int rc, i; 5605 5606 for (i = 0; i < cdev->num_hwfns; i++) { 5607 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 5608 p_hwfn->pf_params = *func_params; 5609 5610 #ifdef QLNX_ENABLE_IWARP 5611 if (qlnx_vf_device((qlnx_host_t *)cdev) != 0) { 5612 p_hwfn->using_ll2 = true; 5613 } 5614 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5615 } 5616 5617 rc = ecore_resc_alloc(cdev); 5618 if (rc) 5619 goto qlnx_nic_setup_exit; 5620 5621 ecore_resc_setup(cdev); 5622 5623 qlnx_nic_setup_exit: 5624 5625 return rc; 5626 } 5627 5628 static int 5629 qlnx_nic_start(struct ecore_dev *cdev) 5630 { 5631 int rc; 5632 struct ecore_hw_init_params params; 5633 5634 bzero(¶ms, sizeof (struct ecore_hw_init_params)); 5635 5636 params.p_tunn = NULL; 5637 params.b_hw_start = true; 5638 params.int_mode = cdev->int_mode; 5639 params.allow_npar_tx_switch = true; 5640 params.bin_fw_data = NULL; 5641 5642 rc = ecore_hw_init(cdev, ¶ms); 5643 if (rc) { 5644 ecore_resc_free(cdev); 5645 return rc; 5646 } 5647 5648 return 0; 5649 } 5650 5651 static int 5652 qlnx_slowpath_start(qlnx_host_t *ha) 5653 { 5654 struct ecore_dev *cdev; 5655 struct ecore_pf_params pf_params; 5656 int rc; 5657 5658 memset(&pf_params, 0, sizeof(struct ecore_pf_params)); 5659 pf_params.eth_pf_params.num_cons = 5660 (ha->num_rss) * (ha->num_tc + 1); 5661 5662 #ifdef QLNX_ENABLE_IWARP 5663 if (qlnx_vf_device(ha) != 0) { 5664 if(ha->personality == ECORE_PCI_ETH_IWARP) { 5665 device_printf(ha->pci_dev, "setting parameters required by iWARP dev\n"); 5666 pf_params.rdma_pf_params.num_qps = 1024; 5667 pf_params.rdma_pf_params.num_srqs = 1024; 5668 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5669 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_IWARP; 5670 } else if(ha->personality == ECORE_PCI_ETH_ROCE) { 5671 device_printf(ha->pci_dev, "setting parameters required by RoCE dev\n"); 5672 pf_params.rdma_pf_params.num_qps = 8192; 5673 pf_params.rdma_pf_params.num_srqs = 8192; 5674 //pf_params.rdma_pf_params.min_dpis = 0; 5675 pf_params.rdma_pf_params.min_dpis = 8; 5676 pf_params.rdma_pf_params.roce_edpm_mode = 0; 5677 pf_params.rdma_pf_params.gl_pi = ECORE_ROCE_PROTOCOL_INDEX; 5678 pf_params.rdma_pf_params.rdma_protocol = ECORE_RDMA_PROTOCOL_ROCE; 5679 } 5680 } 5681 #endif /* #ifdef QLNX_ENABLE_IWARP */ 5682 5683 cdev = &ha->cdev; 5684 5685 rc = qlnx_nic_setup(cdev, &pf_params); 5686 if (rc) 5687 goto qlnx_slowpath_start_exit; 5688 5689 cdev->int_mode = ECORE_INT_MODE_MSIX; 5690 cdev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; 5691 5692 #ifdef QLNX_MAX_COALESCE 5693 cdev->rx_coalesce_usecs = 255; 5694 cdev->tx_coalesce_usecs = 255; 5695 #endif 5696 5697 rc = qlnx_nic_start(cdev); 5698 5699 ha->rx_coalesce_usecs = cdev->rx_coalesce_usecs; 5700 ha->tx_coalesce_usecs = cdev->tx_coalesce_usecs; 5701 5702 #ifdef QLNX_USER_LLDP 5703 (void)qlnx_set_lldp_tlvx(ha, NULL); 5704 #endif /* #ifdef QLNX_USER_LLDP */ 5705 5706 qlnx_slowpath_start_exit: 5707 5708 return (rc); 5709 } 5710 5711 static int 5712 qlnx_slowpath_stop(qlnx_host_t *ha) 5713 { 5714 struct ecore_dev *cdev; 5715 device_t dev = ha->pci_dev; 5716 int i; 5717 5718 cdev = &ha->cdev; 5719 5720 ecore_hw_stop(cdev); 5721 5722 for (i = 0; i < ha->cdev.num_hwfns; i++) { 5723 if (ha->sp_handle[i]) 5724 (void)bus_teardown_intr(dev, ha->sp_irq[i], 5725 ha->sp_handle[i]); 5726 5727 ha->sp_handle[i] = NULL; 5728 5729 if (ha->sp_irq[i]) 5730 (void) bus_release_resource(dev, SYS_RES_IRQ, 5731 ha->sp_irq_rid[i], ha->sp_irq[i]); 5732 ha->sp_irq[i] = NULL; 5733 } 5734 5735 ecore_resc_free(cdev); 5736 5737 return 0; 5738 } 5739 5740 static void 5741 qlnx_set_id(struct ecore_dev *cdev, char name[NAME_SIZE], 5742 char ver_str[VER_SIZE]) 5743 { 5744 int i; 5745 5746 memcpy(cdev->name, name, NAME_SIZE); 5747 5748 for_each_hwfn(cdev, i) { 5749 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); 5750 } 5751 5752 cdev->drv_type = DRV_ID_DRV_TYPE_FREEBSD; 5753 5754 return ; 5755 } 5756 5757 void 5758 qlnx_get_protocol_stats(void *cdev, int proto_type, void *proto_stats) 5759 { 5760 enum ecore_mcp_protocol_type type; 5761 union ecore_mcp_protocol_stats *stats; 5762 struct ecore_eth_stats eth_stats; 5763 qlnx_host_t *ha; 5764 5765 ha = cdev; 5766 stats = proto_stats; 5767 type = proto_type; 5768 5769 switch (type) { 5770 case ECORE_MCP_LAN_STATS: 5771 ecore_get_vport_stats((struct ecore_dev *)cdev, ð_stats); 5772 stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; 5773 stats->lan_stats.ucast_tx_pkts = eth_stats.common.tx_ucast_pkts; 5774 stats->lan_stats.fcs_err = -1; 5775 break; 5776 5777 default: 5778 ha->err_get_proto_invalid_type++; 5779 5780 QL_DPRINT1(ha, "invalid protocol type 0x%x\n", type); 5781 break; 5782 } 5783 return; 5784 } 5785 5786 static int 5787 qlnx_get_mfw_version(qlnx_host_t *ha, uint32_t *mfw_ver) 5788 { 5789 struct ecore_hwfn *p_hwfn; 5790 struct ecore_ptt *p_ptt; 5791 5792 p_hwfn = &ha->cdev.hwfns[0]; 5793 p_ptt = ecore_ptt_acquire(p_hwfn); 5794 5795 if (p_ptt == NULL) { 5796 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 5797 return (-1); 5798 } 5799 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, mfw_ver, NULL); 5800 5801 ecore_ptt_release(p_hwfn, p_ptt); 5802 5803 return (0); 5804 } 5805 5806 static int 5807 qlnx_get_flash_size(qlnx_host_t *ha, uint32_t *flash_size) 5808 { 5809 struct ecore_hwfn *p_hwfn; 5810 struct ecore_ptt *p_ptt; 5811 5812 p_hwfn = &ha->cdev.hwfns[0]; 5813 p_ptt = ecore_ptt_acquire(p_hwfn); 5814 5815 if (p_ptt == NULL) { 5816 QL_DPRINT1(ha,"ecore_ptt_acquire failed\n"); 5817 return (-1); 5818 } 5819 ecore_mcp_get_flash_size(p_hwfn, p_ptt, flash_size); 5820 5821 ecore_ptt_release(p_hwfn, p_ptt); 5822 5823 return (0); 5824 } 5825 5826 static int 5827 qlnx_alloc_mem_arrays(qlnx_host_t *ha) 5828 { 5829 bzero(&ha->txq_array[0], (sizeof(struct qlnx_tx_queue) * QLNX_MAX_RSS)); 5830 bzero(&ha->rxq_array[0], (sizeof(struct qlnx_rx_queue) * QLNX_MAX_RSS)); 5831 bzero(&ha->sb_array[0], (sizeof(struct ecore_sb_info) * QLNX_MAX_RSS)); 5832 5833 return 0; 5834 } 5835 5836 static void 5837 qlnx_init_fp(qlnx_host_t *ha) 5838 { 5839 int rss_id, txq_array_index, tc; 5840 5841 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 5842 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 5843 5844 fp->rss_id = rss_id; 5845 fp->edev = ha; 5846 fp->sb_info = &ha->sb_array[rss_id]; 5847 fp->rxq = &ha->rxq_array[rss_id]; 5848 fp->rxq->rxq_id = rss_id; 5849 5850 for (tc = 0; tc < ha->num_tc; tc++) { 5851 txq_array_index = tc * ha->num_rss + rss_id; 5852 fp->txq[tc] = &ha->txq_array[txq_array_index]; 5853 fp->txq[tc]->index = txq_array_index; 5854 } 5855 5856 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", qlnx_name_str, 5857 rss_id); 5858 5859 fp->tx_ring_full = 0; 5860 5861 /* reset all the statistics counters */ 5862 5863 fp->tx_pkts_processed = 0; 5864 fp->tx_pkts_freed = 0; 5865 fp->tx_pkts_transmitted = 0; 5866 fp->tx_pkts_completed = 0; 5867 5868 #ifdef QLNX_TRACE_PERF_DATA 5869 fp->tx_pkts_trans_ctx = 0; 5870 fp->tx_pkts_compl_ctx = 0; 5871 fp->tx_pkts_trans_fp = 0; 5872 fp->tx_pkts_compl_fp = 0; 5873 fp->tx_pkts_compl_intr = 0; 5874 #endif 5875 fp->tx_lso_wnd_min_len = 0; 5876 fp->tx_defrag = 0; 5877 fp->tx_nsegs_gt_elem_left = 0; 5878 fp->tx_tso_max_nsegs = 0; 5879 fp->tx_tso_min_nsegs = 0; 5880 fp->err_tx_nsegs_gt_elem_left = 0; 5881 fp->err_tx_dmamap_create = 0; 5882 fp->err_tx_defrag_dmamap_load = 0; 5883 fp->err_tx_non_tso_max_seg = 0; 5884 fp->err_tx_dmamap_load = 0; 5885 fp->err_tx_defrag = 0; 5886 fp->err_tx_free_pkt_null = 0; 5887 fp->err_tx_cons_idx_conflict = 0; 5888 5889 fp->rx_pkts = 0; 5890 fp->err_m_getcl = 0; 5891 fp->err_m_getjcl = 0; 5892 } 5893 return; 5894 } 5895 5896 void 5897 qlnx_free_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info) 5898 { 5899 struct ecore_dev *cdev; 5900 5901 cdev = &ha->cdev; 5902 5903 if (sb_info->sb_virt) { 5904 OSAL_DMA_FREE_COHERENT(cdev, ((void *)sb_info->sb_virt), 5905 (sb_info->sb_phys), (sizeof(*sb_info->sb_virt))); 5906 sb_info->sb_virt = NULL; 5907 } 5908 } 5909 5910 static int 5911 qlnx_sb_init(struct ecore_dev *cdev, struct ecore_sb_info *sb_info, 5912 void *sb_virt_addr, bus_addr_t sb_phy_addr, u16 sb_id) 5913 { 5914 struct ecore_hwfn *p_hwfn; 5915 int hwfn_index, rc; 5916 u16 rel_sb_id; 5917 5918 hwfn_index = sb_id % cdev->num_hwfns; 5919 p_hwfn = &cdev->hwfns[hwfn_index]; 5920 rel_sb_id = sb_id / cdev->num_hwfns; 5921 5922 QL_DPRINT2(((qlnx_host_t *)cdev), 5923 "hwfn_index = %d p_hwfn = %p sb_id = 0x%x rel_sb_id = 0x%x \ 5924 sb_info = %p sb_virt_addr = %p sb_phy_addr = %p\n", 5925 hwfn_index, p_hwfn, sb_id, rel_sb_id, sb_info, 5926 sb_virt_addr, (void *)sb_phy_addr); 5927 5928 rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, 5929 sb_virt_addr, sb_phy_addr, rel_sb_id); 5930 5931 return rc; 5932 } 5933 5934 /* This function allocates fast-path status block memory */ 5935 int 5936 qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info, u16 sb_id) 5937 { 5938 struct status_block_e4 *sb_virt; 5939 bus_addr_t sb_phys; 5940 int rc; 5941 uint32_t size; 5942 struct ecore_dev *cdev; 5943 5944 cdev = &ha->cdev; 5945 5946 size = sizeof(*sb_virt); 5947 sb_virt = OSAL_DMA_ALLOC_COHERENT(cdev, (&sb_phys), size); 5948 5949 if (!sb_virt) { 5950 QL_DPRINT1(ha, "Status block allocation failed\n"); 5951 return -ENOMEM; 5952 } 5953 5954 rc = qlnx_sb_init(cdev, sb_info, sb_virt, sb_phys, sb_id); 5955 if (rc) { 5956 OSAL_DMA_FREE_COHERENT(cdev, sb_virt, sb_phys, size); 5957 } 5958 5959 return rc; 5960 } 5961 5962 static void 5963 qlnx_free_rx_buffers(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5964 { 5965 int i; 5966 struct sw_rx_data *rx_buf; 5967 5968 for (i = 0; i < rxq->num_rx_buffers; i++) { 5969 rx_buf = &rxq->sw_rx_ring[i]; 5970 5971 if (rx_buf->data != NULL) { 5972 if (rx_buf->map != NULL) { 5973 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 5974 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 5975 rx_buf->map = NULL; 5976 } 5977 m_freem(rx_buf->data); 5978 rx_buf->data = NULL; 5979 } 5980 } 5981 return; 5982 } 5983 5984 static void 5985 qlnx_free_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 5986 { 5987 struct ecore_dev *cdev; 5988 int i; 5989 5990 cdev = &ha->cdev; 5991 5992 qlnx_free_rx_buffers(ha, rxq); 5993 5994 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 5995 qlnx_free_tpa_mbuf(ha, &rxq->tpa_info[i]); 5996 if (rxq->tpa_info[i].mpf != NULL) 5997 m_freem(rxq->tpa_info[i].mpf); 5998 } 5999 6000 bzero((void *)&rxq->sw_rx_ring[0], 6001 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 6002 6003 /* Free the real RQ ring used by FW */ 6004 if (rxq->rx_bd_ring.p_virt_addr) { 6005 ecore_chain_free(cdev, &rxq->rx_bd_ring); 6006 rxq->rx_bd_ring.p_virt_addr = NULL; 6007 } 6008 6009 /* Free the real completion ring used by FW */ 6010 if (rxq->rx_comp_ring.p_virt_addr && 6011 rxq->rx_comp_ring.pbl_sp.p_virt_table) { 6012 ecore_chain_free(cdev, &rxq->rx_comp_ring); 6013 rxq->rx_comp_ring.p_virt_addr = NULL; 6014 rxq->rx_comp_ring.pbl_sp.p_virt_table = NULL; 6015 } 6016 6017 #ifdef QLNX_SOFT_LRO 6018 { 6019 struct lro_ctrl *lro; 6020 6021 lro = &rxq->lro; 6022 tcp_lro_free(lro); 6023 } 6024 #endif /* #ifdef QLNX_SOFT_LRO */ 6025 6026 return; 6027 } 6028 6029 static int 6030 qlnx_alloc_rx_buffer(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6031 { 6032 register struct mbuf *mp; 6033 uint16_t rx_buf_size; 6034 struct sw_rx_data *sw_rx_data; 6035 struct eth_rx_bd *rx_bd; 6036 dma_addr_t dma_addr; 6037 bus_dmamap_t map; 6038 bus_dma_segment_t segs[1]; 6039 int nsegs; 6040 int ret; 6041 6042 rx_buf_size = rxq->rx_buf_size; 6043 6044 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6045 6046 if (mp == NULL) { 6047 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6048 return -ENOMEM; 6049 } 6050 6051 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6052 6053 map = (bus_dmamap_t)0; 6054 6055 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6056 BUS_DMA_NOWAIT); 6057 dma_addr = segs[0].ds_addr; 6058 6059 if (ret || !dma_addr || (nsegs != 1)) { 6060 m_freem(mp); 6061 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6062 ret, (long long unsigned int)dma_addr, nsegs); 6063 return -ENOMEM; 6064 } 6065 6066 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6067 sw_rx_data->data = mp; 6068 sw_rx_data->dma_addr = dma_addr; 6069 sw_rx_data->map = map; 6070 6071 /* Advance PROD and get BD pointer */ 6072 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); 6073 rx_bd->addr.hi = htole32(U64_HI(dma_addr)); 6074 rx_bd->addr.lo = htole32(U64_LO(dma_addr)); 6075 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6076 6077 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6078 6079 return 0; 6080 } 6081 6082 static int 6083 qlnx_alloc_tpa_mbuf(qlnx_host_t *ha, uint16_t rx_buf_size, 6084 struct qlnx_agg_info *tpa) 6085 { 6086 struct mbuf *mp; 6087 dma_addr_t dma_addr; 6088 bus_dmamap_t map; 6089 bus_dma_segment_t segs[1]; 6090 int nsegs; 6091 int ret; 6092 struct sw_rx_data *rx_buf; 6093 6094 mp = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx_buf_size); 6095 6096 if (mp == NULL) { 6097 QL_DPRINT1(ha, "Failed to allocate Rx data\n"); 6098 return -ENOMEM; 6099 } 6100 6101 mp->m_len = mp->m_pkthdr.len = rx_buf_size; 6102 6103 map = (bus_dmamap_t)0; 6104 6105 ret = bus_dmamap_load_mbuf_sg(ha->rx_tag, map, mp, segs, &nsegs, 6106 BUS_DMA_NOWAIT); 6107 dma_addr = segs[0].ds_addr; 6108 6109 if (ret || !dma_addr || (nsegs != 1)) { 6110 m_freem(mp); 6111 QL_DPRINT1(ha, "bus_dmamap_load failed[%d, 0x%016llx, %d]\n", 6112 ret, (long long unsigned int)dma_addr, nsegs); 6113 return -ENOMEM; 6114 } 6115 6116 rx_buf = &tpa->rx_buf; 6117 6118 memset(rx_buf, 0, sizeof (struct sw_rx_data)); 6119 6120 rx_buf->data = mp; 6121 rx_buf->dma_addr = dma_addr; 6122 rx_buf->map = map; 6123 6124 bus_dmamap_sync(ha->rx_tag, map, BUS_DMASYNC_PREREAD); 6125 6126 return (0); 6127 } 6128 6129 static void 6130 qlnx_free_tpa_mbuf(qlnx_host_t *ha, struct qlnx_agg_info *tpa) 6131 { 6132 struct sw_rx_data *rx_buf; 6133 6134 rx_buf = &tpa->rx_buf; 6135 6136 if (rx_buf->data != NULL) { 6137 if (rx_buf->map != NULL) { 6138 bus_dmamap_unload(ha->rx_tag, rx_buf->map); 6139 bus_dmamap_destroy(ha->rx_tag, rx_buf->map); 6140 rx_buf->map = NULL; 6141 } 6142 m_freem(rx_buf->data); 6143 rx_buf->data = NULL; 6144 } 6145 return; 6146 } 6147 6148 /* This function allocates all memory needed per Rx queue */ 6149 static int 6150 qlnx_alloc_mem_rxq(qlnx_host_t *ha, struct qlnx_rx_queue *rxq) 6151 { 6152 int i, rc, num_allocated; 6153 struct ecore_dev *cdev; 6154 6155 cdev = &ha->cdev; 6156 6157 rxq->num_rx_buffers = RX_RING_SIZE; 6158 6159 rxq->rx_buf_size = ha->rx_buf_size; 6160 6161 /* Allocate the parallel driver ring for Rx buffers */ 6162 bzero((void *)&rxq->sw_rx_ring[0], 6163 (sizeof (struct sw_rx_data) * RX_RING_SIZE)); 6164 6165 /* Allocate FW Rx ring */ 6166 6167 rc = ecore_chain_alloc(cdev, 6168 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6169 ECORE_CHAIN_MODE_NEXT_PTR, 6170 ECORE_CHAIN_CNT_TYPE_U16, 6171 RX_RING_SIZE, 6172 sizeof(struct eth_rx_bd), 6173 &rxq->rx_bd_ring, NULL); 6174 6175 if (rc) 6176 goto err; 6177 6178 /* Allocate FW completion ring */ 6179 rc = ecore_chain_alloc(cdev, 6180 ECORE_CHAIN_USE_TO_CONSUME, 6181 ECORE_CHAIN_MODE_PBL, 6182 ECORE_CHAIN_CNT_TYPE_U16, 6183 RX_RING_SIZE, 6184 sizeof(union eth_rx_cqe), 6185 &rxq->rx_comp_ring, NULL); 6186 6187 if (rc) 6188 goto err; 6189 6190 /* Allocate buffers for the Rx ring */ 6191 6192 for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) { 6193 rc = qlnx_alloc_tpa_mbuf(ha, rxq->rx_buf_size, 6194 &rxq->tpa_info[i]); 6195 if (rc) 6196 break; 6197 } 6198 6199 for (i = 0; i < rxq->num_rx_buffers; i++) { 6200 rc = qlnx_alloc_rx_buffer(ha, rxq); 6201 if (rc) 6202 break; 6203 } 6204 num_allocated = i; 6205 if (!num_allocated) { 6206 QL_DPRINT1(ha, "Rx buffers allocation failed\n"); 6207 goto err; 6208 } else if (num_allocated < rxq->num_rx_buffers) { 6209 QL_DPRINT1(ha, "Allocated less buffers than" 6210 " desired (%d allocated)\n", num_allocated); 6211 } 6212 6213 #ifdef QLNX_SOFT_LRO 6214 6215 { 6216 struct lro_ctrl *lro; 6217 6218 lro = &rxq->lro; 6219 6220 if (tcp_lro_init_args(lro, ifp, 0, rxq->num_rx_buffers)) { 6221 QL_DPRINT1(ha, "tcp_lro_init[%d] failed\n", 6222 rxq->rxq_id); 6223 goto err; 6224 } 6225 6226 lro->ifp = ha->ifp; 6227 } 6228 #endif /* #ifdef QLNX_SOFT_LRO */ 6229 return 0; 6230 6231 err: 6232 qlnx_free_mem_rxq(ha, rxq); 6233 return -ENOMEM; 6234 } 6235 6236 static void 6237 qlnx_free_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6238 struct qlnx_tx_queue *txq) 6239 { 6240 struct ecore_dev *cdev; 6241 6242 cdev = &ha->cdev; 6243 6244 bzero((void *)&txq->sw_tx_ring[0], 6245 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6246 6247 /* Free the real RQ ring used by FW */ 6248 if (txq->tx_pbl.p_virt_addr) { 6249 ecore_chain_free(cdev, &txq->tx_pbl); 6250 txq->tx_pbl.p_virt_addr = NULL; 6251 } 6252 return; 6253 } 6254 6255 /* This function allocates all memory needed per Tx queue */ 6256 static int 6257 qlnx_alloc_mem_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6258 struct qlnx_tx_queue *txq) 6259 { 6260 int ret = ECORE_SUCCESS; 6261 union eth_tx_bd_types *p_virt; 6262 struct ecore_dev *cdev; 6263 6264 cdev = &ha->cdev; 6265 6266 bzero((void *)&txq->sw_tx_ring[0], 6267 (sizeof (struct sw_tx_bd) * TX_RING_SIZE)); 6268 6269 /* Allocate the real Tx ring to be used by FW */ 6270 ret = ecore_chain_alloc(cdev, 6271 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, 6272 ECORE_CHAIN_MODE_PBL, 6273 ECORE_CHAIN_CNT_TYPE_U16, 6274 TX_RING_SIZE, 6275 sizeof(*p_virt), 6276 &txq->tx_pbl, NULL); 6277 6278 if (ret != ECORE_SUCCESS) { 6279 goto err; 6280 } 6281 6282 txq->num_tx_buffers = TX_RING_SIZE; 6283 6284 return 0; 6285 6286 err: 6287 qlnx_free_mem_txq(ha, fp, txq); 6288 return -ENOMEM; 6289 } 6290 6291 static void 6292 qlnx_free_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6293 { 6294 struct mbuf *mp; 6295 if_t ifp = ha->ifp; 6296 6297 if (mtx_initialized(&fp->tx_mtx)) { 6298 if (fp->tx_br != NULL) { 6299 mtx_lock(&fp->tx_mtx); 6300 6301 while ((mp = drbr_dequeue(ifp, fp->tx_br)) != NULL) { 6302 fp->tx_pkts_freed++; 6303 m_freem(mp); 6304 } 6305 6306 mtx_unlock(&fp->tx_mtx); 6307 6308 buf_ring_free(fp->tx_br, M_DEVBUF); 6309 fp->tx_br = NULL; 6310 } 6311 mtx_destroy(&fp->tx_mtx); 6312 } 6313 return; 6314 } 6315 6316 static void 6317 qlnx_free_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6318 { 6319 int tc; 6320 6321 qlnx_free_mem_sb(ha, fp->sb_info); 6322 6323 qlnx_free_mem_rxq(ha, fp->rxq); 6324 6325 for (tc = 0; tc < ha->num_tc; tc++) 6326 qlnx_free_mem_txq(ha, fp, fp->txq[tc]); 6327 6328 return; 6329 } 6330 6331 static int 6332 qlnx_alloc_tx_br(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6333 { 6334 snprintf(fp->tx_mtx_name, sizeof(fp->tx_mtx_name), 6335 "qlnx%d_fp%d_tx_mq_lock", ha->dev_unit, fp->rss_id); 6336 6337 mtx_init(&fp->tx_mtx, fp->tx_mtx_name, NULL, MTX_DEF); 6338 6339 fp->tx_br = buf_ring_alloc(TX_RING_SIZE, M_DEVBUF, 6340 M_NOWAIT, &fp->tx_mtx); 6341 if (fp->tx_br == NULL) { 6342 QL_DPRINT1(ha, "buf_ring_alloc failed for fp[%d, %d]\n", 6343 ha->dev_unit, fp->rss_id); 6344 return -ENOMEM; 6345 } 6346 return 0; 6347 } 6348 6349 static int 6350 qlnx_alloc_mem_fp(qlnx_host_t *ha, struct qlnx_fastpath *fp) 6351 { 6352 int rc, tc; 6353 6354 rc = qlnx_alloc_mem_sb(ha, fp->sb_info, fp->rss_id); 6355 if (rc) 6356 goto err; 6357 6358 if (ha->rx_jumbo_buf_eq_mtu) { 6359 if (ha->max_frame_size <= MCLBYTES) 6360 ha->rx_buf_size = MCLBYTES; 6361 else if (ha->max_frame_size <= MJUMPAGESIZE) 6362 ha->rx_buf_size = MJUMPAGESIZE; 6363 else if (ha->max_frame_size <= MJUM9BYTES) 6364 ha->rx_buf_size = MJUM9BYTES; 6365 else if (ha->max_frame_size <= MJUM16BYTES) 6366 ha->rx_buf_size = MJUM16BYTES; 6367 } else { 6368 if (ha->max_frame_size <= MCLBYTES) 6369 ha->rx_buf_size = MCLBYTES; 6370 else 6371 ha->rx_buf_size = MJUMPAGESIZE; 6372 } 6373 6374 rc = qlnx_alloc_mem_rxq(ha, fp->rxq); 6375 if (rc) 6376 goto err; 6377 6378 for (tc = 0; tc < ha->num_tc; tc++) { 6379 rc = qlnx_alloc_mem_txq(ha, fp, fp->txq[tc]); 6380 if (rc) 6381 goto err; 6382 } 6383 6384 return 0; 6385 6386 err: 6387 qlnx_free_mem_fp(ha, fp); 6388 return -ENOMEM; 6389 } 6390 6391 static void 6392 qlnx_free_mem_load(qlnx_host_t *ha) 6393 { 6394 int i; 6395 6396 for (i = 0; i < ha->num_rss; i++) { 6397 struct qlnx_fastpath *fp = &ha->fp_array[i]; 6398 6399 qlnx_free_mem_fp(ha, fp); 6400 } 6401 return; 6402 } 6403 6404 static int 6405 qlnx_alloc_mem_load(qlnx_host_t *ha) 6406 { 6407 int rc = 0, rss_id; 6408 6409 for (rss_id = 0; rss_id < ha->num_rss; rss_id++) { 6410 struct qlnx_fastpath *fp = &ha->fp_array[rss_id]; 6411 6412 rc = qlnx_alloc_mem_fp(ha, fp); 6413 if (rc) 6414 break; 6415 } 6416 return (rc); 6417 } 6418 6419 static int 6420 qlnx_start_vport(struct ecore_dev *cdev, 6421 u8 vport_id, 6422 u16 mtu, 6423 u8 drop_ttl0_flg, 6424 u8 inner_vlan_removal_en_flg, 6425 u8 tx_switching, 6426 u8 hw_lro_enable) 6427 { 6428 int rc, i; 6429 struct ecore_sp_vport_start_params vport_start_params = { 0 }; 6430 qlnx_host_t *ha __unused; 6431 6432 ha = (qlnx_host_t *)cdev; 6433 6434 vport_start_params.remove_inner_vlan = inner_vlan_removal_en_flg; 6435 vport_start_params.tx_switching = 0; 6436 vport_start_params.handle_ptp_pkts = 0; 6437 vport_start_params.only_untagged = 0; 6438 vport_start_params.drop_ttl0 = drop_ttl0_flg; 6439 6440 vport_start_params.tpa_mode = 6441 (hw_lro_enable ? ECORE_TPA_MODE_RSC : ECORE_TPA_MODE_NONE); 6442 vport_start_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6443 6444 vport_start_params.vport_id = vport_id; 6445 vport_start_params.mtu = mtu; 6446 6447 QL_DPRINT2(ha, "Setting mtu to %d and VPORT ID = %d\n", mtu, vport_id); 6448 6449 for_each_hwfn(cdev, i) { 6450 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6451 6452 vport_start_params.concrete_fid = p_hwfn->hw_info.concrete_fid; 6453 vport_start_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6454 6455 rc = ecore_sp_vport_start(p_hwfn, &vport_start_params); 6456 6457 if (rc) { 6458 QL_DPRINT1(ha, "Failed to start VPORT V-PORT %d" 6459 " with MTU %d\n" , vport_id, mtu); 6460 return -ENOMEM; 6461 } 6462 6463 ecore_hw_start_fastpath(p_hwfn); 6464 6465 QL_DPRINT2(ha, "Started V-PORT %d with MTU %d\n", 6466 vport_id, mtu); 6467 } 6468 return 0; 6469 } 6470 6471 static int 6472 qlnx_update_vport(struct ecore_dev *cdev, 6473 struct qlnx_update_vport_params *params) 6474 { 6475 struct ecore_sp_vport_update_params sp_params; 6476 int rc, i, j, fp_index; 6477 struct ecore_hwfn *p_hwfn; 6478 struct ecore_rss_params *rss; 6479 qlnx_host_t *ha = (qlnx_host_t *)cdev; 6480 struct qlnx_fastpath *fp; 6481 6482 memset(&sp_params, 0, sizeof(sp_params)); 6483 /* Translate protocol params into sp params */ 6484 sp_params.vport_id = params->vport_id; 6485 6486 sp_params.update_vport_active_rx_flg = 6487 params->update_vport_active_rx_flg; 6488 sp_params.vport_active_rx_flg = params->vport_active_rx_flg; 6489 6490 sp_params.update_vport_active_tx_flg = 6491 params->update_vport_active_tx_flg; 6492 sp_params.vport_active_tx_flg = params->vport_active_tx_flg; 6493 6494 sp_params.update_inner_vlan_removal_flg = 6495 params->update_inner_vlan_removal_flg; 6496 sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; 6497 6498 sp_params.sge_tpa_params = params->sge_tpa_params; 6499 6500 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. 6501 * We need to re-fix the rss values per engine for CMT. 6502 */ 6503 if (params->rss_params->update_rss_config) 6504 sp_params.rss_params = params->rss_params; 6505 else 6506 sp_params.rss_params = NULL; 6507 6508 for_each_hwfn(cdev, i) { 6509 p_hwfn = &cdev->hwfns[i]; 6510 6511 if ((cdev->num_hwfns > 1) && 6512 params->rss_params->update_rss_config && 6513 params->rss_params->rss_enable) { 6514 rss = params->rss_params; 6515 6516 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE; j++) { 6517 fp_index = ((cdev->num_hwfns * j) + i) % 6518 ha->num_rss; 6519 6520 fp = &ha->fp_array[fp_index]; 6521 rss->rss_ind_table[j] = fp->rxq->handle; 6522 } 6523 6524 for (j = 0; j < ECORE_RSS_IND_TABLE_SIZE;) { 6525 QL_DPRINT3(ha, "%p %p %p %p %p %p %p %p \n", 6526 rss->rss_ind_table[j], 6527 rss->rss_ind_table[j+1], 6528 rss->rss_ind_table[j+2], 6529 rss->rss_ind_table[j+3], 6530 rss->rss_ind_table[j+4], 6531 rss->rss_ind_table[j+5], 6532 rss->rss_ind_table[j+6], 6533 rss->rss_ind_table[j+7]); 6534 j += 8; 6535 } 6536 } 6537 6538 sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; 6539 6540 QL_DPRINT1(ha, "Update sp vport ID=%d\n", params->vport_id); 6541 6542 rc = ecore_sp_vport_update(p_hwfn, &sp_params, 6543 ECORE_SPQ_MODE_EBLOCK, NULL); 6544 if (rc) { 6545 QL_DPRINT1(ha, "Failed to update VPORT\n"); 6546 return rc; 6547 } 6548 6549 QL_DPRINT2(ha, "Updated V-PORT %d: tx_active_flag %d, \ 6550 rx_active_flag %d [tx_update %d], [rx_update %d]\n", 6551 params->vport_id, params->vport_active_tx_flg, 6552 params->vport_active_rx_flg, 6553 params->update_vport_active_tx_flg, 6554 params->update_vport_active_rx_flg); 6555 } 6556 6557 return 0; 6558 } 6559 6560 static void 6561 qlnx_reuse_rx_data(struct qlnx_rx_queue *rxq) 6562 { 6563 struct eth_rx_bd *rx_bd_cons = 6564 ecore_chain_consume(&rxq->rx_bd_ring); 6565 struct eth_rx_bd *rx_bd_prod = 6566 ecore_chain_produce(&rxq->rx_bd_ring); 6567 struct sw_rx_data *sw_rx_data_cons = 6568 &rxq->sw_rx_ring[rxq->sw_rx_cons]; 6569 struct sw_rx_data *sw_rx_data_prod = 6570 &rxq->sw_rx_ring[rxq->sw_rx_prod]; 6571 6572 sw_rx_data_prod->data = sw_rx_data_cons->data; 6573 memcpy(rx_bd_prod, rx_bd_cons, sizeof(struct eth_rx_bd)); 6574 6575 rxq->sw_rx_cons = (rxq->sw_rx_cons + 1) & (RX_RING_SIZE - 1); 6576 rxq->sw_rx_prod = (rxq->sw_rx_prod + 1) & (RX_RING_SIZE - 1); 6577 6578 return; 6579 } 6580 6581 static void 6582 qlnx_update_rx_prod(struct ecore_hwfn *p_hwfn, struct qlnx_rx_queue *rxq) 6583 { 6584 6585 uint16_t bd_prod; 6586 uint16_t cqe_prod; 6587 union { 6588 struct eth_rx_prod_data rx_prod_data; 6589 uint32_t data32; 6590 } rx_prods; 6591 6592 bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); 6593 cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); 6594 6595 /* Update producers */ 6596 rx_prods.rx_prod_data.bd_prod = htole16(bd_prod); 6597 rx_prods.rx_prod_data.cqe_prod = htole16(cqe_prod); 6598 6599 /* Make sure that the BD and SGE data is updated before updating the 6600 * producers since FW might read the BD/SGE right after the producer 6601 * is updated. 6602 */ 6603 wmb(); 6604 6605 #ifdef ECORE_CONFIG_DIRECT_HWFN 6606 internal_ram_wr(p_hwfn, rxq->hw_rxq_prod_addr, 6607 sizeof(rx_prods), &rx_prods.data32); 6608 #else 6609 internal_ram_wr(rxq->hw_rxq_prod_addr, 6610 sizeof(rx_prods), &rx_prods.data32); 6611 #endif 6612 6613 /* mmiowb is needed to synchronize doorbell writes from more than one 6614 * processor. It guarantees that the write arrives to the device before 6615 * the napi lock is released and another qlnx_poll is called (possibly 6616 * on another CPU). Without this barrier, the next doorbell can bypass 6617 * this doorbell. This is applicable to IA64/Altix systems. 6618 */ 6619 wmb(); 6620 6621 return; 6622 } 6623 6624 static uint32_t qlnx_hash_key[] = { 6625 ((0x6d << 24)|(0x5a << 16)|(0x56 << 8)|0xda), 6626 ((0x25 << 24)|(0x5b << 16)|(0x0e << 8)|0xc2), 6627 ((0x41 << 24)|(0x67 << 16)|(0x25 << 8)|0x3d), 6628 ((0x43 << 24)|(0xa3 << 16)|(0x8f << 8)|0xb0), 6629 ((0xd0 << 24)|(0xca << 16)|(0x2b << 8)|0xcb), 6630 ((0xae << 24)|(0x7b << 16)|(0x30 << 8)|0xb4), 6631 ((0x77 << 24)|(0xcb << 16)|(0x2d << 8)|0xa3), 6632 ((0x80 << 24)|(0x30 << 16)|(0xf2 << 8)|0x0c), 6633 ((0x6a << 24)|(0x42 << 16)|(0xb7 << 8)|0x3b), 6634 ((0xbe << 24)|(0xac << 16)|(0x01 << 8)|0xfa)}; 6635 6636 static int 6637 qlnx_start_queues(qlnx_host_t *ha) 6638 { 6639 int rc, tc, i, vport_id = 0, 6640 drop_ttl0_flg = 1, vlan_removal_en = 1, 6641 tx_switching = 0, hw_lro_enable = 0; 6642 struct ecore_dev *cdev = &ha->cdev; 6643 struct ecore_rss_params *rss_params = &ha->rss_params; 6644 struct qlnx_update_vport_params vport_update_params; 6645 if_t ifp; 6646 struct ecore_hwfn *p_hwfn; 6647 struct ecore_sge_tpa_params tpa_params; 6648 struct ecore_queue_start_common_params qparams; 6649 struct qlnx_fastpath *fp; 6650 6651 ifp = ha->ifp; 6652 6653 QL_DPRINT1(ha, "Num RSS = %d\n", ha->num_rss); 6654 6655 if (!ha->num_rss) { 6656 QL_DPRINT1(ha, "Cannot update V-VPORT as active as there" 6657 " are no Rx queues\n"); 6658 return -EINVAL; 6659 } 6660 6661 #ifndef QLNX_SOFT_LRO 6662 hw_lro_enable = if_getcapenable(ifp) & IFCAP_LRO; 6663 #endif /* #ifndef QLNX_SOFT_LRO */ 6664 6665 rc = qlnx_start_vport(cdev, vport_id, if_getmtu(ifp), drop_ttl0_flg, 6666 vlan_removal_en, tx_switching, hw_lro_enable); 6667 6668 if (rc) { 6669 QL_DPRINT1(ha, "Start V-PORT failed %d\n", rc); 6670 return rc; 6671 } 6672 6673 QL_DPRINT2(ha, "Start vport ramrod passed, " 6674 "vport_id = %d, MTU = %d, vlan_removal_en = %d\n", 6675 vport_id, (int)(if_getmtu(ifp) + 0xe), vlan_removal_en); 6676 6677 for_each_rss(i) { 6678 struct ecore_rxq_start_ret_params rx_ret_params; 6679 struct ecore_txq_start_ret_params tx_ret_params; 6680 6681 fp = &ha->fp_array[i]; 6682 p_hwfn = &cdev->hwfns[(fp->rss_id % cdev->num_hwfns)]; 6683 6684 bzero(&qparams, sizeof(struct ecore_queue_start_common_params)); 6685 bzero(&rx_ret_params, 6686 sizeof (struct ecore_rxq_start_ret_params)); 6687 6688 qparams.queue_id = i ; 6689 qparams.vport_id = vport_id; 6690 qparams.stats_id = vport_id; 6691 qparams.p_sb = fp->sb_info; 6692 qparams.sb_idx = RX_PI; 6693 6694 6695 rc = ecore_eth_rx_queue_start(p_hwfn, 6696 p_hwfn->hw_info.opaque_fid, 6697 &qparams, 6698 fp->rxq->rx_buf_size, /* bd_max_bytes */ 6699 /* bd_chain_phys_addr */ 6700 fp->rxq->rx_bd_ring.p_phys_addr, 6701 /* cqe_pbl_addr */ 6702 ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring), 6703 /* cqe_pbl_size */ 6704 ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring), 6705 &rx_ret_params); 6706 6707 if (rc) { 6708 QL_DPRINT1(ha, "Start RXQ #%d failed %d\n", i, rc); 6709 return rc; 6710 } 6711 6712 fp->rxq->hw_rxq_prod_addr = rx_ret_params.p_prod; 6713 fp->rxq->handle = rx_ret_params.p_handle; 6714 fp->rxq->hw_cons_ptr = 6715 &fp->sb_info->sb_virt->pi_array[RX_PI]; 6716 6717 qlnx_update_rx_prod(p_hwfn, fp->rxq); 6718 6719 for (tc = 0; tc < ha->num_tc; tc++) { 6720 struct qlnx_tx_queue *txq = fp->txq[tc]; 6721 6722 bzero(&qparams, 6723 sizeof(struct ecore_queue_start_common_params)); 6724 bzero(&tx_ret_params, 6725 sizeof (struct ecore_txq_start_ret_params)); 6726 6727 qparams.queue_id = txq->index / cdev->num_hwfns ; 6728 qparams.vport_id = vport_id; 6729 qparams.stats_id = vport_id; 6730 qparams.p_sb = fp->sb_info; 6731 qparams.sb_idx = TX_PI(tc); 6732 6733 rc = ecore_eth_tx_queue_start(p_hwfn, 6734 p_hwfn->hw_info.opaque_fid, 6735 &qparams, tc, 6736 /* bd_chain_phys_addr */ 6737 ecore_chain_get_pbl_phys(&txq->tx_pbl), 6738 ecore_chain_get_page_cnt(&txq->tx_pbl), 6739 &tx_ret_params); 6740 6741 if (rc) { 6742 QL_DPRINT1(ha, "Start TXQ #%d failed %d\n", 6743 txq->index, rc); 6744 return rc; 6745 } 6746 6747 txq->doorbell_addr = tx_ret_params.p_doorbell; 6748 txq->handle = tx_ret_params.p_handle; 6749 6750 txq->hw_cons_ptr = 6751 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; 6752 SET_FIELD(txq->tx_db.data.params, 6753 ETH_DB_DATA_DEST, DB_DEST_XCM); 6754 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, 6755 DB_AGG_CMD_SET); 6756 SET_FIELD(txq->tx_db.data.params, 6757 ETH_DB_DATA_AGG_VAL_SEL, 6758 DQ_XCM_ETH_TX_BD_PROD_CMD); 6759 6760 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; 6761 } 6762 } 6763 6764 /* Fill struct with RSS params */ 6765 if (ha->num_rss > 1) { 6766 rss_params->update_rss_config = 1; 6767 rss_params->rss_enable = 1; 6768 rss_params->update_rss_capabilities = 1; 6769 rss_params->update_rss_ind_table = 1; 6770 rss_params->update_rss_key = 1; 6771 rss_params->rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | 6772 ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; 6773 rss_params->rss_table_size_log = 7; /* 2^7 = 128 */ 6774 6775 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { 6776 fp = &ha->fp_array[(i % ha->num_rss)]; 6777 rss_params->rss_ind_table[i] = fp->rxq->handle; 6778 } 6779 6780 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) 6781 rss_params->rss_key[i] = (__le32)qlnx_hash_key[i]; 6782 6783 } else { 6784 memset(rss_params, 0, sizeof(*rss_params)); 6785 } 6786 6787 /* Prepare and send the vport enable */ 6788 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6789 vport_update_params.vport_id = vport_id; 6790 vport_update_params.update_vport_active_tx_flg = 1; 6791 vport_update_params.vport_active_tx_flg = 1; 6792 vport_update_params.update_vport_active_rx_flg = 1; 6793 vport_update_params.vport_active_rx_flg = 1; 6794 vport_update_params.rss_params = rss_params; 6795 vport_update_params.update_inner_vlan_removal_flg = 1; 6796 vport_update_params.inner_vlan_removal_flg = 1; 6797 6798 if (hw_lro_enable) { 6799 memset(&tpa_params, 0, sizeof (struct ecore_sge_tpa_params)); 6800 6801 tpa_params.max_buffers_per_cqe = QLNX_TPA_MAX_AGG_BUFFERS; 6802 6803 tpa_params.update_tpa_en_flg = 1; 6804 tpa_params.tpa_ipv4_en_flg = 1; 6805 tpa_params.tpa_ipv6_en_flg = 1; 6806 6807 tpa_params.update_tpa_param_flg = 1; 6808 tpa_params.tpa_pkt_split_flg = 0; 6809 tpa_params.tpa_hdr_data_split_flg = 0; 6810 tpa_params.tpa_gro_consistent_flg = 0; 6811 tpa_params.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; 6812 tpa_params.tpa_max_size = (uint16_t)(-1); 6813 tpa_params.tpa_min_size_to_start = if_getmtu(ifp) / 2; 6814 tpa_params.tpa_min_size_to_cont = if_getmtu(ifp) / 2; 6815 6816 vport_update_params.sge_tpa_params = &tpa_params; 6817 } 6818 6819 rc = qlnx_update_vport(cdev, &vport_update_params); 6820 if (rc) { 6821 QL_DPRINT1(ha, "Update V-PORT failed %d\n", rc); 6822 return rc; 6823 } 6824 6825 return 0; 6826 } 6827 6828 static int 6829 qlnx_drain_txq(qlnx_host_t *ha, struct qlnx_fastpath *fp, 6830 struct qlnx_tx_queue *txq) 6831 { 6832 uint16_t hw_bd_cons; 6833 uint16_t ecore_cons_idx; 6834 6835 QL_DPRINT2(ha, "enter\n"); 6836 6837 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6838 6839 while (hw_bd_cons != 6840 (ecore_cons_idx = ecore_chain_get_cons_idx(&txq->tx_pbl))) { 6841 mtx_lock(&fp->tx_mtx); 6842 6843 (void)qlnx_tx_int(ha, fp, txq); 6844 6845 mtx_unlock(&fp->tx_mtx); 6846 6847 qlnx_mdelay(__func__, 2); 6848 6849 hw_bd_cons = le16toh(*txq->hw_cons_ptr); 6850 } 6851 6852 QL_DPRINT2(ha, "[%d, %d]: done\n", fp->rss_id, txq->index); 6853 6854 return 0; 6855 } 6856 6857 static int 6858 qlnx_stop_queues(qlnx_host_t *ha) 6859 { 6860 struct qlnx_update_vport_params vport_update_params; 6861 struct ecore_dev *cdev; 6862 struct qlnx_fastpath *fp; 6863 int rc, tc, i; 6864 6865 cdev = &ha->cdev; 6866 6867 /* Disable the vport */ 6868 6869 memset(&vport_update_params, 0, sizeof(vport_update_params)); 6870 6871 vport_update_params.vport_id = 0; 6872 vport_update_params.update_vport_active_tx_flg = 1; 6873 vport_update_params.vport_active_tx_flg = 0; 6874 vport_update_params.update_vport_active_rx_flg = 1; 6875 vport_update_params.vport_active_rx_flg = 0; 6876 vport_update_params.rss_params = &ha->rss_params; 6877 vport_update_params.rss_params->update_rss_config = 0; 6878 vport_update_params.rss_params->rss_enable = 0; 6879 vport_update_params.update_inner_vlan_removal_flg = 0; 6880 vport_update_params.inner_vlan_removal_flg = 0; 6881 6882 QL_DPRINT1(ha, "Update vport ID= %d\n", vport_update_params.vport_id); 6883 6884 rc = qlnx_update_vport(cdev, &vport_update_params); 6885 if (rc) { 6886 QL_DPRINT1(ha, "Failed to update vport\n"); 6887 return rc; 6888 } 6889 6890 /* Flush Tx queues. If needed, request drain from MCP */ 6891 for_each_rss(i) { 6892 fp = &ha->fp_array[i]; 6893 6894 for (tc = 0; tc < ha->num_tc; tc++) { 6895 struct qlnx_tx_queue *txq = fp->txq[tc]; 6896 6897 rc = qlnx_drain_txq(ha, fp, txq); 6898 if (rc) 6899 return rc; 6900 } 6901 } 6902 6903 /* Stop all Queues in reverse order*/ 6904 for (i = ha->num_rss - 1; i >= 0; i--) { 6905 struct ecore_hwfn *p_hwfn = &cdev->hwfns[(i % cdev->num_hwfns)]; 6906 6907 fp = &ha->fp_array[i]; 6908 6909 /* Stop the Tx Queue(s)*/ 6910 for (tc = 0; tc < ha->num_tc; tc++) { 6911 int tx_queue_id __unused; 6912 6913 tx_queue_id = tc * ha->num_rss + i; 6914 rc = ecore_eth_tx_queue_stop(p_hwfn, 6915 fp->txq[tc]->handle); 6916 6917 if (rc) { 6918 QL_DPRINT1(ha, "Failed to stop TXQ #%d\n", 6919 tx_queue_id); 6920 return rc; 6921 } 6922 } 6923 6924 /* Stop the Rx Queue*/ 6925 rc = ecore_eth_rx_queue_stop(p_hwfn, fp->rxq->handle, false, 6926 false); 6927 if (rc) { 6928 QL_DPRINT1(ha, "Failed to stop RXQ #%d\n", i); 6929 return rc; 6930 } 6931 } 6932 6933 /* Stop the vport */ 6934 for_each_hwfn(cdev, i) { 6935 struct ecore_hwfn *p_hwfn = &cdev->hwfns[i]; 6936 6937 rc = ecore_sp_vport_stop(p_hwfn, p_hwfn->hw_info.opaque_fid, 0); 6938 6939 if (rc) { 6940 QL_DPRINT1(ha, "Failed to stop VPORT\n"); 6941 return rc; 6942 } 6943 } 6944 6945 return rc; 6946 } 6947 6948 static int 6949 qlnx_set_ucast_rx_mac(qlnx_host_t *ha, 6950 enum ecore_filter_opcode opcode, 6951 unsigned char mac[ETH_ALEN]) 6952 { 6953 struct ecore_filter_ucast ucast; 6954 struct ecore_dev *cdev; 6955 int rc; 6956 6957 cdev = &ha->cdev; 6958 6959 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6960 6961 ucast.opcode = opcode; 6962 ucast.type = ECORE_FILTER_MAC; 6963 ucast.is_rx_filter = 1; 6964 ucast.vport_to_add_to = 0; 6965 memcpy(&ucast.mac[0], mac, ETH_ALEN); 6966 6967 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6968 6969 return (rc); 6970 } 6971 6972 static int 6973 qlnx_remove_all_ucast_mac(qlnx_host_t *ha) 6974 { 6975 struct ecore_filter_ucast ucast; 6976 struct ecore_dev *cdev; 6977 int rc; 6978 6979 bzero(&ucast, sizeof(struct ecore_filter_ucast)); 6980 6981 ucast.opcode = ECORE_FILTER_REPLACE; 6982 ucast.type = ECORE_FILTER_MAC; 6983 ucast.is_rx_filter = 1; 6984 6985 cdev = &ha->cdev; 6986 6987 rc = ecore_filter_ucast_cmd(cdev, &ucast, ECORE_SPQ_MODE_CB, NULL); 6988 6989 return (rc); 6990 } 6991 6992 static int 6993 qlnx_remove_all_mcast_mac(qlnx_host_t *ha) 6994 { 6995 struct ecore_filter_mcast *mcast; 6996 struct ecore_dev *cdev; 6997 int rc, i; 6998 6999 cdev = &ha->cdev; 7000 7001 mcast = &ha->ecore_mcast; 7002 bzero(mcast, sizeof(struct ecore_filter_mcast)); 7003 7004 mcast->opcode = ECORE_FILTER_REMOVE; 7005 7006 for (i = 0; i < QLNX_MAX_NUM_MULTICAST_ADDRS; i++) { 7007 if (ha->mcast[i].addr[0] || ha->mcast[i].addr[1] || 7008 ha->mcast[i].addr[2] || ha->mcast[i].addr[3] || 7009 ha->mcast[i].addr[4] || ha->mcast[i].addr[5]) { 7010 memcpy(&mcast->mac[i][0], &ha->mcast[i].addr[0], ETH_ALEN); 7011 mcast->num_mc_addrs++; 7012 } 7013 } 7014 mcast = &ha->ecore_mcast; 7015 7016 rc = ecore_filter_mcast_cmd(cdev, mcast, ECORE_SPQ_MODE_CB, NULL); 7017 7018 bzero(ha->mcast, (sizeof(qlnx_mcast_t) * QLNX_MAX_NUM_MULTICAST_ADDRS)); 7019 ha->nmcast = 0; 7020 7021 return (rc); 7022 } 7023 7024 static int 7025 qlnx_clean_filters(qlnx_host_t *ha) 7026 { 7027 int rc = 0; 7028 7029 /* Remove all unicast macs */ 7030 rc = qlnx_remove_all_ucast_mac(ha); 7031 if (rc) 7032 return rc; 7033 7034 /* Remove all multicast macs */ 7035 rc = qlnx_remove_all_mcast_mac(ha); 7036 if (rc) 7037 return rc; 7038 7039 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_FLUSH, ha->primary_mac); 7040 7041 return (rc); 7042 } 7043 7044 static int 7045 qlnx_set_rx_accept_filter(qlnx_host_t *ha, uint8_t filter) 7046 { 7047 struct ecore_filter_accept_flags accept; 7048 int rc = 0; 7049 struct ecore_dev *cdev; 7050 7051 cdev = &ha->cdev; 7052 7053 bzero(&accept, sizeof(struct ecore_filter_accept_flags)); 7054 7055 accept.update_rx_mode_config = 1; 7056 accept.rx_accept_filter = filter; 7057 7058 accept.update_tx_mode_config = 1; 7059 accept.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 7060 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST; 7061 7062 rc = ecore_filter_accept_cmd(cdev, 0, accept, false, false, 7063 ECORE_SPQ_MODE_CB, NULL); 7064 7065 return (rc); 7066 } 7067 7068 static int 7069 qlnx_set_rx_mode(qlnx_host_t *ha) 7070 { 7071 int rc = 0; 7072 uint8_t filter; 7073 7074 rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac); 7075 if (rc) 7076 return rc; 7077 7078 rc = qlnx_remove_all_mcast_mac(ha); 7079 if (rc) 7080 return rc; 7081 7082 filter = ECORE_ACCEPT_UCAST_MATCHED | 7083 ECORE_ACCEPT_MCAST_MATCHED | 7084 ECORE_ACCEPT_BCAST; 7085 7086 if (qlnx_vf_device(ha) == 0) { 7087 filter |= ECORE_ACCEPT_UCAST_UNMATCHED; 7088 filter |= ECORE_ACCEPT_MCAST_UNMATCHED; 7089 } 7090 ha->filter = filter; 7091 7092 rc = qlnx_set_rx_accept_filter(ha, filter); 7093 7094 return (rc); 7095 } 7096 7097 static int 7098 qlnx_set_link(qlnx_host_t *ha, bool link_up) 7099 { 7100 int i, rc = 0; 7101 struct ecore_dev *cdev; 7102 struct ecore_hwfn *hwfn; 7103 struct ecore_ptt *ptt; 7104 7105 if (qlnx_vf_device(ha) == 0) 7106 return (0); 7107 7108 cdev = &ha->cdev; 7109 7110 for_each_hwfn(cdev, i) { 7111 hwfn = &cdev->hwfns[i]; 7112 7113 ptt = ecore_ptt_acquire(hwfn); 7114 if (!ptt) 7115 return -EBUSY; 7116 7117 rc = ecore_mcp_set_link(hwfn, ptt, link_up); 7118 7119 ecore_ptt_release(hwfn, ptt); 7120 7121 if (rc) 7122 return rc; 7123 } 7124 return (rc); 7125 } 7126 7127 static uint64_t 7128 qlnx_get_counter(if_t ifp, ift_counter cnt) 7129 { 7130 qlnx_host_t *ha; 7131 uint64_t count; 7132 7133 ha = (qlnx_host_t *)if_getsoftc(ifp); 7134 7135 switch (cnt) { 7136 case IFCOUNTER_IPACKETS: 7137 count = ha->hw_stats.common.rx_ucast_pkts + 7138 ha->hw_stats.common.rx_mcast_pkts + 7139 ha->hw_stats.common.rx_bcast_pkts; 7140 break; 7141 7142 case IFCOUNTER_IERRORS: 7143 count = ha->hw_stats.common.rx_crc_errors + 7144 ha->hw_stats.common.rx_align_errors + 7145 ha->hw_stats.common.rx_oversize_packets + 7146 ha->hw_stats.common.rx_undersize_packets; 7147 break; 7148 7149 case IFCOUNTER_OPACKETS: 7150 count = ha->hw_stats.common.tx_ucast_pkts + 7151 ha->hw_stats.common.tx_mcast_pkts + 7152 ha->hw_stats.common.tx_bcast_pkts; 7153 break; 7154 7155 case IFCOUNTER_OERRORS: 7156 count = ha->hw_stats.common.tx_err_drop_pkts; 7157 break; 7158 7159 case IFCOUNTER_COLLISIONS: 7160 return (0); 7161 7162 case IFCOUNTER_IBYTES: 7163 count = ha->hw_stats.common.rx_ucast_bytes + 7164 ha->hw_stats.common.rx_mcast_bytes + 7165 ha->hw_stats.common.rx_bcast_bytes; 7166 break; 7167 7168 case IFCOUNTER_OBYTES: 7169 count = ha->hw_stats.common.tx_ucast_bytes + 7170 ha->hw_stats.common.tx_mcast_bytes + 7171 ha->hw_stats.common.tx_bcast_bytes; 7172 break; 7173 7174 case IFCOUNTER_IMCASTS: 7175 count = ha->hw_stats.common.rx_mcast_bytes; 7176 break; 7177 7178 case IFCOUNTER_OMCASTS: 7179 count = ha->hw_stats.common.tx_mcast_bytes; 7180 break; 7181 7182 case IFCOUNTER_IQDROPS: 7183 case IFCOUNTER_OQDROPS: 7184 case IFCOUNTER_NOPROTO: 7185 7186 default: 7187 return (if_get_counter_default(ifp, cnt)); 7188 } 7189 return (count); 7190 } 7191 7192 static void 7193 qlnx_timer(void *arg) 7194 { 7195 qlnx_host_t *ha; 7196 7197 ha = (qlnx_host_t *)arg; 7198 7199 if (ha->error_recovery) { 7200 ha->error_recovery = 0; 7201 taskqueue_enqueue(ha->err_taskqueue, &ha->err_task); 7202 return; 7203 } 7204 7205 ecore_get_vport_stats(&ha->cdev, &ha->hw_stats); 7206 7207 if (ha->storm_stats_gather) 7208 qlnx_sample_storm_stats(ha); 7209 7210 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7211 7212 return; 7213 } 7214 7215 static int 7216 qlnx_load(qlnx_host_t *ha) 7217 { 7218 int i; 7219 int rc = 0; 7220 device_t dev; 7221 7222 dev = ha->pci_dev; 7223 7224 QL_DPRINT2(ha, "enter\n"); 7225 7226 rc = qlnx_alloc_mem_arrays(ha); 7227 if (rc) 7228 goto qlnx_load_exit0; 7229 7230 qlnx_init_fp(ha); 7231 7232 rc = qlnx_alloc_mem_load(ha); 7233 if (rc) 7234 goto qlnx_load_exit1; 7235 7236 QL_DPRINT2(ha, "Allocated %d RSS queues on %d TC/s\n", 7237 ha->num_rss, ha->num_tc); 7238 7239 for (i = 0; i < ha->num_rss; i++) { 7240 if ((rc = bus_setup_intr(dev, ha->irq_vec[i].irq, 7241 (INTR_TYPE_NET | INTR_MPSAFE), 7242 NULL, qlnx_fp_isr, &ha->irq_vec[i], 7243 &ha->irq_vec[i].handle))) { 7244 QL_DPRINT1(ha, "could not setup interrupt\n"); 7245 goto qlnx_load_exit2; 7246 } 7247 7248 QL_DPRINT2(ha, "rss_id = %d irq_rid %d \ 7249 irq %p handle %p\n", i, 7250 ha->irq_vec[i].irq_rid, 7251 ha->irq_vec[i].irq, ha->irq_vec[i].handle); 7252 7253 bus_bind_intr(dev, ha->irq_vec[i].irq, (i % mp_ncpus)); 7254 } 7255 7256 rc = qlnx_start_queues(ha); 7257 if (rc) 7258 goto qlnx_load_exit2; 7259 7260 QL_DPRINT2(ha, "Start VPORT, RXQ and TXQ succeeded\n"); 7261 7262 /* Add primary mac and set Rx filters */ 7263 rc = qlnx_set_rx_mode(ha); 7264 if (rc) 7265 goto qlnx_load_exit2; 7266 7267 /* Ask for link-up using current configuration */ 7268 qlnx_set_link(ha, true); 7269 7270 if (qlnx_vf_device(ha) == 0) 7271 qlnx_link_update(&ha->cdev.hwfns[0]); 7272 7273 ha->state = QLNX_STATE_OPEN; 7274 7275 bzero(&ha->hw_stats, sizeof(struct ecore_eth_stats)); 7276 7277 if (ha->flags.callout_init) 7278 callout_reset(&ha->qlnx_callout, hz, qlnx_timer, ha); 7279 7280 goto qlnx_load_exit0; 7281 7282 qlnx_load_exit2: 7283 qlnx_free_mem_load(ha); 7284 7285 qlnx_load_exit1: 7286 ha->num_rss = 0; 7287 7288 qlnx_load_exit0: 7289 QL_DPRINT2(ha, "exit [%d]\n", rc); 7290 return rc; 7291 } 7292 7293 static void 7294 qlnx_drain_soft_lro(qlnx_host_t *ha) 7295 { 7296 #ifdef QLNX_SOFT_LRO 7297 7298 if_t ifp; 7299 int i; 7300 7301 ifp = ha->ifp; 7302 7303 if (if_getcapenable(ifp) & IFCAP_LRO) { 7304 for (i = 0; i < ha->num_rss; i++) { 7305 struct qlnx_fastpath *fp = &ha->fp_array[i]; 7306 struct lro_ctrl *lro; 7307 7308 lro = &fp->rxq->lro; 7309 7310 tcp_lro_flush_all(lro); 7311 } 7312 } 7313 7314 #endif /* #ifdef QLNX_SOFT_LRO */ 7315 7316 return; 7317 } 7318 7319 static void 7320 qlnx_unload(qlnx_host_t *ha) 7321 { 7322 struct ecore_dev *cdev; 7323 device_t dev; 7324 int i; 7325 7326 cdev = &ha->cdev; 7327 dev = ha->pci_dev; 7328 7329 QL_DPRINT2(ha, "enter\n"); 7330 QL_DPRINT1(ha, " QLNX STATE = %d\n",ha->state); 7331 7332 if (ha->state == QLNX_STATE_OPEN) { 7333 qlnx_set_link(ha, false); 7334 qlnx_clean_filters(ha); 7335 qlnx_stop_queues(ha); 7336 ecore_hw_stop_fastpath(cdev); 7337 7338 for (i = 0; i < ha->num_rss; i++) { 7339 if (ha->irq_vec[i].handle) { 7340 (void)bus_teardown_intr(dev, 7341 ha->irq_vec[i].irq, 7342 ha->irq_vec[i].handle); 7343 ha->irq_vec[i].handle = NULL; 7344 } 7345 } 7346 7347 qlnx_drain_fp_taskqueues(ha); 7348 qlnx_drain_soft_lro(ha); 7349 qlnx_free_mem_load(ha); 7350 } 7351 7352 if (ha->flags.callout_init) 7353 callout_drain(&ha->qlnx_callout); 7354 7355 qlnx_mdelay(__func__, 1000); 7356 7357 ha->state = QLNX_STATE_CLOSED; 7358 7359 QL_DPRINT2(ha, "exit\n"); 7360 return; 7361 } 7362 7363 static int 7364 qlnx_grc_dumpsize(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7365 { 7366 int rval = -1; 7367 struct ecore_hwfn *p_hwfn; 7368 struct ecore_ptt *p_ptt; 7369 7370 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7371 7372 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7373 p_ptt = ecore_ptt_acquire(p_hwfn); 7374 7375 if (!p_ptt) { 7376 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7377 return (rval); 7378 } 7379 7380 rval = ecore_dbg_grc_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7381 7382 if (rval == DBG_STATUS_OK) 7383 rval = 0; 7384 else { 7385 QL_DPRINT1(ha, "ecore_dbg_grc_get_dump_buf_size failed" 7386 "[0x%x]\n", rval); 7387 } 7388 7389 ecore_ptt_release(p_hwfn, p_ptt); 7390 7391 return (rval); 7392 } 7393 7394 static int 7395 qlnx_idle_chk_size(qlnx_host_t *ha, uint32_t *num_dwords, int hwfn_index) 7396 { 7397 int rval = -1; 7398 struct ecore_hwfn *p_hwfn; 7399 struct ecore_ptt *p_ptt; 7400 7401 ecore_dbg_set_app_ver(ecore_dbg_get_fw_func_ver()); 7402 7403 p_hwfn = &ha->cdev.hwfns[hwfn_index]; 7404 p_ptt = ecore_ptt_acquire(p_hwfn); 7405 7406 if (!p_ptt) { 7407 QL_DPRINT1(ha, "ecore_ptt_acquire failed\n"); 7408 return (rval); 7409 } 7410 7411 rval = ecore_dbg_idle_chk_get_dump_buf_size(p_hwfn, p_ptt, num_dwords); 7412 7413 if (rval == DBG_STATUS_OK) 7414 rval = 0; 7415 else { 7416 QL_DPRINT1(ha, "ecore_dbg_idle_chk_get_dump_buf_size failed" 7417 " [0x%x]\n", rval); 7418 } 7419 7420 ecore_ptt_release(p_hwfn, p_ptt); 7421 7422 return (rval); 7423 } 7424 7425 static void 7426 qlnx_sample_storm_stats(qlnx_host_t *ha) 7427 { 7428 int i, index; 7429 struct ecore_dev *cdev; 7430 qlnx_storm_stats_t *s_stats; 7431 uint32_t reg; 7432 struct ecore_ptt *p_ptt; 7433 struct ecore_hwfn *hwfn; 7434 7435 if (ha->storm_stats_index >= QLNX_STORM_STATS_SAMPLES_PER_HWFN) { 7436 ha->storm_stats_gather = 0; 7437 return; 7438 } 7439 7440 cdev = &ha->cdev; 7441 7442 for_each_hwfn(cdev, i) { 7443 hwfn = &cdev->hwfns[i]; 7444 7445 p_ptt = ecore_ptt_acquire(hwfn); 7446 if (!p_ptt) 7447 return; 7448 7449 index = ha->storm_stats_index + 7450 (i * QLNX_STORM_STATS_SAMPLES_PER_HWFN); 7451 7452 s_stats = &ha->storm_stats[index]; 7453 7454 /* XSTORM */ 7455 reg = XSEM_REG_FAST_MEMORY + 7456 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7457 s_stats->xstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7458 7459 reg = XSEM_REG_FAST_MEMORY + 7460 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7461 s_stats->xstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7462 7463 reg = XSEM_REG_FAST_MEMORY + 7464 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7465 s_stats->xstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7466 7467 reg = XSEM_REG_FAST_MEMORY + 7468 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7469 s_stats->xstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7470 7471 /* YSTORM */ 7472 reg = YSEM_REG_FAST_MEMORY + 7473 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7474 s_stats->ystorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7475 7476 reg = YSEM_REG_FAST_MEMORY + 7477 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7478 s_stats->ystorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7479 7480 reg = YSEM_REG_FAST_MEMORY + 7481 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7482 s_stats->ystorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7483 7484 reg = YSEM_REG_FAST_MEMORY + 7485 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7486 s_stats->ystorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7487 7488 /* PSTORM */ 7489 reg = PSEM_REG_FAST_MEMORY + 7490 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7491 s_stats->pstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7492 7493 reg = PSEM_REG_FAST_MEMORY + 7494 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7495 s_stats->pstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7496 7497 reg = PSEM_REG_FAST_MEMORY + 7498 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7499 s_stats->pstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7500 7501 reg = PSEM_REG_FAST_MEMORY + 7502 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7503 s_stats->pstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7504 7505 /* TSTORM */ 7506 reg = TSEM_REG_FAST_MEMORY + 7507 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7508 s_stats->tstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7509 7510 reg = TSEM_REG_FAST_MEMORY + 7511 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7512 s_stats->tstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7513 7514 reg = TSEM_REG_FAST_MEMORY + 7515 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7516 s_stats->tstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7517 7518 reg = TSEM_REG_FAST_MEMORY + 7519 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7520 s_stats->tstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7521 7522 /* MSTORM */ 7523 reg = MSEM_REG_FAST_MEMORY + 7524 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7525 s_stats->mstorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7526 7527 reg = MSEM_REG_FAST_MEMORY + 7528 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7529 s_stats->mstorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7530 7531 reg = MSEM_REG_FAST_MEMORY + 7532 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7533 s_stats->mstorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7534 7535 reg = MSEM_REG_FAST_MEMORY + 7536 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7537 s_stats->mstorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7538 7539 /* USTORM */ 7540 reg = USEM_REG_FAST_MEMORY + 7541 SEM_FAST_REG_STORM_ACTIVE_CYCLES_BB_K2; 7542 s_stats->ustorm_active_cycles = ecore_rd(hwfn, p_ptt, reg); 7543 7544 reg = USEM_REG_FAST_MEMORY + 7545 SEM_FAST_REG_STORM_STALL_CYCLES_BB_K2; 7546 s_stats->ustorm_stall_cycles = ecore_rd(hwfn, p_ptt, reg); 7547 7548 reg = USEM_REG_FAST_MEMORY + 7549 SEM_FAST_REG_IDLE_SLEEPING_CYCLES_BB_K2; 7550 s_stats->ustorm_sleeping_cycles = ecore_rd(hwfn, p_ptt, reg); 7551 7552 reg = USEM_REG_FAST_MEMORY + 7553 SEM_FAST_REG_IDLE_INACTIVE_CYCLES_BB_K2; 7554 s_stats->ustorm_inactive_cycles = ecore_rd(hwfn, p_ptt, reg); 7555 7556 ecore_ptt_release(hwfn, p_ptt); 7557 } 7558 7559 ha->storm_stats_index++; 7560 7561 return; 7562 } 7563 7564 /* 7565 * Name: qlnx_dump_buf8 7566 * Function: dumps a buffer as bytes 7567 */ 7568 static void 7569 qlnx_dump_buf8(qlnx_host_t *ha, const char *msg, void *dbuf, uint32_t len) 7570 { 7571 device_t dev; 7572 uint32_t i = 0; 7573 uint8_t *buf; 7574 7575 dev = ha->pci_dev; 7576 buf = dbuf; 7577 7578 device_printf(dev, "%s: %s 0x%x dump start\n", __func__, msg, len); 7579 7580 while (len >= 16) { 7581 device_printf(dev,"0x%08x:" 7582 " %02x %02x %02x %02x %02x %02x %02x %02x" 7583 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7584 buf[0], buf[1], buf[2], buf[3], 7585 buf[4], buf[5], buf[6], buf[7], 7586 buf[8], buf[9], buf[10], buf[11], 7587 buf[12], buf[13], buf[14], buf[15]); 7588 i += 16; 7589 len -= 16; 7590 buf += 16; 7591 } 7592 switch (len) { 7593 case 1: 7594 device_printf(dev,"0x%08x: %02x\n", i, buf[0]); 7595 break; 7596 case 2: 7597 device_printf(dev,"0x%08x: %02x %02x\n", i, buf[0], buf[1]); 7598 break; 7599 case 3: 7600 device_printf(dev,"0x%08x: %02x %02x %02x\n", 7601 i, buf[0], buf[1], buf[2]); 7602 break; 7603 case 4: 7604 device_printf(dev,"0x%08x: %02x %02x %02x %02x\n", i, 7605 buf[0], buf[1], buf[2], buf[3]); 7606 break; 7607 case 5: 7608 device_printf(dev,"0x%08x:" 7609 " %02x %02x %02x %02x %02x\n", i, 7610 buf[0], buf[1], buf[2], buf[3], buf[4]); 7611 break; 7612 case 6: 7613 device_printf(dev,"0x%08x:" 7614 " %02x %02x %02x %02x %02x %02x\n", i, 7615 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5]); 7616 break; 7617 case 7: 7618 device_printf(dev,"0x%08x:" 7619 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7620 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); 7621 break; 7622 case 8: 7623 device_printf(dev,"0x%08x:" 7624 " %02x %02x %02x %02x %02x %02x %02x %02x\n", i, 7625 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7626 buf[7]); 7627 break; 7628 case 9: 7629 device_printf(dev,"0x%08x:" 7630 " %02x %02x %02x %02x %02x %02x %02x %02x" 7631 " %02x\n", i, 7632 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7633 buf[7], buf[8]); 7634 break; 7635 case 10: 7636 device_printf(dev,"0x%08x:" 7637 " %02x %02x %02x %02x %02x %02x %02x %02x" 7638 " %02x %02x\n", i, 7639 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7640 buf[7], buf[8], buf[9]); 7641 break; 7642 case 11: 7643 device_printf(dev,"0x%08x:" 7644 " %02x %02x %02x %02x %02x %02x %02x %02x" 7645 " %02x %02x %02x\n", i, 7646 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7647 buf[7], buf[8], buf[9], buf[10]); 7648 break; 7649 case 12: 7650 device_printf(dev,"0x%08x:" 7651 " %02x %02x %02x %02x %02x %02x %02x %02x" 7652 " %02x %02x %02x %02x\n", i, 7653 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7654 buf[7], buf[8], buf[9], buf[10], buf[11]); 7655 break; 7656 case 13: 7657 device_printf(dev,"0x%08x:" 7658 " %02x %02x %02x %02x %02x %02x %02x %02x" 7659 " %02x %02x %02x %02x %02x\n", i, 7660 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7661 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12]); 7662 break; 7663 case 14: 7664 device_printf(dev,"0x%08x:" 7665 " %02x %02x %02x %02x %02x %02x %02x %02x" 7666 " %02x %02x %02x %02x %02x %02x\n", i, 7667 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7668 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7669 buf[13]); 7670 break; 7671 case 15: 7672 device_printf(dev,"0x%08x:" 7673 " %02x %02x %02x %02x %02x %02x %02x %02x" 7674 " %02x %02x %02x %02x %02x %02x %02x\n", i, 7675 buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], 7676 buf[7], buf[8], buf[9], buf[10], buf[11], buf[12], 7677 buf[13], buf[14]); 7678 break; 7679 default: 7680 break; 7681 } 7682 7683 device_printf(dev, "%s: %s dump end\n", __func__, msg); 7684 7685 return; 7686 } 7687 7688 #ifdef CONFIG_ECORE_SRIOV 7689 7690 static void 7691 __qlnx_osal_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, uint8_t rel_vf_id) 7692 { 7693 struct ecore_public_vf_info *vf_info; 7694 7695 vf_info = ecore_iov_get_public_vf_info(p_hwfn, rel_vf_id, false); 7696 7697 if (!vf_info) 7698 return; 7699 7700 /* Clear the VF mac */ 7701 memset(vf_info->forced_mac, 0, ETH_ALEN); 7702 7703 vf_info->forced_vlan = 0; 7704 7705 return; 7706 } 7707 7708 void 7709 qlnx_osal_iov_vf_cleanup(void *p_hwfn, uint8_t relative_vf_id) 7710 { 7711 __qlnx_osal_iov_vf_cleanup(p_hwfn, relative_vf_id); 7712 return; 7713 } 7714 7715 static int 7716 __qlnx_iov_chk_ucast(struct ecore_hwfn *p_hwfn, int vfid, 7717 struct ecore_filter_ucast *params) 7718 { 7719 struct ecore_public_vf_info *vf; 7720 7721 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) { 7722 QL_DPRINT1(((qlnx_host_t *)p_hwfn->p_dev), 7723 "VF[%d] vport not initialized\n", vfid); 7724 return ECORE_INVAL; 7725 } 7726 7727 vf = ecore_iov_get_public_vf_info(p_hwfn, vfid, true); 7728 if (!vf) 7729 return -EINVAL; 7730 7731 /* No real decision to make; Store the configured MAC */ 7732 if (params->type == ECORE_FILTER_MAC || 7733 params->type == ECORE_FILTER_MAC_VLAN) 7734 memcpy(params->mac, vf->forced_mac, ETH_ALEN); 7735 7736 return 0; 7737 } 7738 7739 int 7740 qlnx_iov_chk_ucast(void *p_hwfn, int vfid, void *params) 7741 { 7742 return (__qlnx_iov_chk_ucast(p_hwfn, vfid, params)); 7743 } 7744 7745 static int 7746 __qlnx_iov_update_vport(struct ecore_hwfn *hwfn, uint8_t vfid, 7747 struct ecore_sp_vport_update_params *params, uint16_t * tlvs) 7748 { 7749 if (!ecore_iov_vf_has_vport_instance(hwfn, vfid)) { 7750 QL_DPRINT1(((qlnx_host_t *)hwfn->p_dev), 7751 "VF[%d] vport not initialized\n", vfid); 7752 return ECORE_INVAL; 7753 } 7754 7755 /* Untrusted VFs can't even be trusted to know that fact. 7756 * Simply indicate everything is configured fine, and trace 7757 * configuration 'behind their back'. 7758 */ 7759 if (!(*tlvs & BIT(ECORE_IOV_VP_UPDATE_ACCEPT_PARAM))) 7760 return 0; 7761 7762 return 0; 7763 7764 } 7765 int 7766 qlnx_iov_update_vport(void *hwfn, uint8_t vfid, void *params, uint16_t *tlvs) 7767 { 7768 return(__qlnx_iov_update_vport(hwfn, vfid, params, tlvs)); 7769 } 7770 7771 static int 7772 qlnx_find_hwfn_index(struct ecore_hwfn *p_hwfn) 7773 { 7774 int i; 7775 struct ecore_dev *cdev; 7776 7777 cdev = p_hwfn->p_dev; 7778 7779 for (i = 0; i < cdev->num_hwfns; i++) { 7780 if (&cdev->hwfns[i] == p_hwfn) 7781 break; 7782 } 7783 7784 if (i >= cdev->num_hwfns) 7785 return (-1); 7786 7787 return (i); 7788 } 7789 7790 static int 7791 __qlnx_pf_vf_msg(struct ecore_hwfn *p_hwfn, uint16_t rel_vf_id) 7792 { 7793 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 7794 int i; 7795 7796 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p rel_vf_id = %d\n", 7797 ha, p_hwfn->p_dev, p_hwfn, rel_vf_id); 7798 7799 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 7800 return (-1); 7801 7802 if (ha->sriov_task[i].pf_taskqueue != NULL) { 7803 atomic_testandset_32(&ha->sriov_task[i].flags, 7804 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG); 7805 7806 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 7807 &ha->sriov_task[i].pf_task); 7808 } 7809 7810 return (ECORE_SUCCESS); 7811 } 7812 7813 int 7814 qlnx_pf_vf_msg(void *p_hwfn, uint16_t relative_vf_id) 7815 { 7816 return (__qlnx_pf_vf_msg(p_hwfn, relative_vf_id)); 7817 } 7818 7819 static void 7820 __qlnx_vf_flr_update(struct ecore_hwfn *p_hwfn) 7821 { 7822 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 7823 int i; 7824 7825 if (!ha->sriov_initialized) 7826 return; 7827 7828 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 7829 ha, p_hwfn->p_dev, p_hwfn); 7830 7831 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 7832 return; 7833 7834 if (ha->sriov_task[i].pf_taskqueue != NULL) { 7835 atomic_testandset_32(&ha->sriov_task[i].flags, 7836 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE); 7837 7838 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 7839 &ha->sriov_task[i].pf_task); 7840 } 7841 7842 return; 7843 } 7844 7845 void 7846 qlnx_vf_flr_update(void *p_hwfn) 7847 { 7848 __qlnx_vf_flr_update(p_hwfn); 7849 7850 return; 7851 } 7852 7853 #ifndef QLNX_VF 7854 7855 static void 7856 qlnx_vf_bulleting_update(struct ecore_hwfn *p_hwfn) 7857 { 7858 qlnx_host_t *ha = (qlnx_host_t *)p_hwfn->p_dev; 7859 int i; 7860 7861 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p \n", 7862 ha, p_hwfn->p_dev, p_hwfn); 7863 7864 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 7865 return; 7866 7867 QL_DPRINT2(ha, "ha = %p cdev = %p p_hwfn = %p i = %d\n", 7868 ha, p_hwfn->p_dev, p_hwfn, i); 7869 7870 if (ha->sriov_task[i].pf_taskqueue != NULL) { 7871 atomic_testandset_32(&ha->sriov_task[i].flags, 7872 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE); 7873 7874 taskqueue_enqueue(ha->sriov_task[i].pf_taskqueue, 7875 &ha->sriov_task[i].pf_task); 7876 } 7877 } 7878 7879 static void 7880 qlnx_initialize_sriov(qlnx_host_t *ha) 7881 { 7882 device_t dev; 7883 nvlist_t *pf_schema, *vf_schema; 7884 int iov_error; 7885 7886 dev = ha->pci_dev; 7887 7888 pf_schema = pci_iov_schema_alloc_node(); 7889 vf_schema = pci_iov_schema_alloc_node(); 7890 7891 pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL); 7892 pci_iov_schema_add_bool(vf_schema, "allow-set-mac", 7893 IOV_SCHEMA_HASDEFAULT, FALSE); 7894 pci_iov_schema_add_bool(vf_schema, "allow-promisc", 7895 IOV_SCHEMA_HASDEFAULT, FALSE); 7896 pci_iov_schema_add_uint16(vf_schema, "num-queues", 7897 IOV_SCHEMA_HASDEFAULT, 1); 7898 7899 iov_error = pci_iov_attach(dev, pf_schema, vf_schema); 7900 7901 if (iov_error != 0) { 7902 ha->sriov_initialized = 0; 7903 } else { 7904 device_printf(dev, "SRIOV initialized\n"); 7905 ha->sriov_initialized = 1; 7906 } 7907 7908 return; 7909 } 7910 7911 static void 7912 qlnx_sriov_disable(qlnx_host_t *ha) 7913 { 7914 struct ecore_dev *cdev; 7915 int i, j; 7916 7917 cdev = &ha->cdev; 7918 7919 ecore_iov_set_vfs_to_disable(cdev, true); 7920 7921 for_each_hwfn(cdev, i) { 7922 struct ecore_hwfn *hwfn = &cdev->hwfns[i]; 7923 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 7924 7925 if (!ptt) { 7926 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 7927 return; 7928 } 7929 /* Clean WFQ db and configure equal weight for all vports */ 7930 ecore_clean_wfq_db(hwfn, ptt); 7931 7932 ecore_for_each_vf(hwfn, j) { 7933 int k = 0; 7934 7935 if (!ecore_iov_is_valid_vfid(hwfn, j, true, false)) 7936 continue; 7937 7938 if (ecore_iov_is_vf_started(hwfn, j)) { 7939 /* Wait until VF is disabled before releasing */ 7940 7941 for (k = 0; k < 100; k++) { 7942 if (!ecore_iov_is_vf_stopped(hwfn, j)) { 7943 qlnx_mdelay(__func__, 10); 7944 } else 7945 break; 7946 } 7947 } 7948 7949 if (k < 100) 7950 ecore_iov_release_hw_for_vf(&cdev->hwfns[i], 7951 ptt, j); 7952 else { 7953 QL_DPRINT1(ha, 7954 "Timeout waiting for VF's FLR to end\n"); 7955 } 7956 } 7957 ecore_ptt_release(hwfn, ptt); 7958 } 7959 7960 ecore_iov_set_vfs_to_disable(cdev, false); 7961 7962 return; 7963 } 7964 7965 static void 7966 qlnx_sriov_enable_qid_config(struct ecore_hwfn *hwfn, u16 vfid, 7967 struct ecore_iov_vf_init_params *params) 7968 { 7969 u16 base, i; 7970 7971 /* Since we have an equal resource distribution per-VF, and we assume 7972 * PF has acquired the ECORE_PF_L2_QUE first queues, we start setting 7973 * sequentially from there. 7974 */ 7975 base = FEAT_NUM(hwfn, ECORE_PF_L2_QUE) + vfid * params->num_queues; 7976 7977 params->rel_vf_id = vfid; 7978 7979 for (i = 0; i < params->num_queues; i++) { 7980 params->req_rx_queue[i] = base + i; 7981 params->req_tx_queue[i] = base + i; 7982 } 7983 7984 /* PF uses indices 0 for itself; Set vport/RSS afterwards */ 7985 params->vport_id = vfid + 1; 7986 params->rss_eng_id = vfid + 1; 7987 7988 return; 7989 } 7990 7991 static int 7992 qlnx_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *nvlist_params) 7993 { 7994 qlnx_host_t *ha; 7995 struct ecore_dev *cdev; 7996 struct ecore_iov_vf_init_params params; 7997 int ret, j, i; 7998 uint32_t max_vfs; 7999 8000 if ((ha = device_get_softc(dev)) == NULL) { 8001 device_printf(dev, "%s: cannot get softc\n", __func__); 8002 return (-1); 8003 } 8004 8005 if (qlnx_create_pf_taskqueues(ha) != 0) 8006 goto qlnx_iov_init_err0; 8007 8008 cdev = &ha->cdev; 8009 8010 max_vfs = RESC_NUM(&cdev->hwfns[0], ECORE_VPORT); 8011 8012 QL_DPRINT2(ha," dev = %p enter num_vfs = %d max_vfs = %d\n", 8013 dev, num_vfs, max_vfs); 8014 8015 if (num_vfs >= max_vfs) { 8016 QL_DPRINT1(ha, "Can start at most %d VFs\n", 8017 (RESC_NUM(&cdev->hwfns[0], ECORE_VPORT) - 1)); 8018 goto qlnx_iov_init_err0; 8019 } 8020 8021 ha->vf_attr = malloc(((sizeof (qlnx_vf_attr_t) * num_vfs)), M_QLNXBUF, 8022 M_NOWAIT); 8023 8024 if (ha->vf_attr == NULL) 8025 goto qlnx_iov_init_err0; 8026 8027 memset(¶ms, 0, sizeof(params)); 8028 8029 /* Initialize HW for VF access */ 8030 for_each_hwfn(cdev, j) { 8031 struct ecore_hwfn *hwfn = &cdev->hwfns[j]; 8032 struct ecore_ptt *ptt = ecore_ptt_acquire(hwfn); 8033 8034 /* Make sure not to use more than 16 queues per VF */ 8035 params.num_queues = min_t(int, 8036 (FEAT_NUM(hwfn, ECORE_VF_L2_QUE) / num_vfs), 8037 16); 8038 8039 if (!ptt) { 8040 QL_DPRINT1(ha, "Failed to acquire ptt\n"); 8041 goto qlnx_iov_init_err1; 8042 } 8043 8044 for (i = 0; i < num_vfs; i++) { 8045 if (!ecore_iov_is_valid_vfid(hwfn, i, false, true)) 8046 continue; 8047 8048 qlnx_sriov_enable_qid_config(hwfn, i, ¶ms); 8049 8050 ret = ecore_iov_init_hw_for_vf(hwfn, ptt, ¶ms); 8051 8052 if (ret) { 8053 QL_DPRINT1(ha, "Failed to enable VF[%d]\n", i); 8054 ecore_ptt_release(hwfn, ptt); 8055 goto qlnx_iov_init_err1; 8056 } 8057 } 8058 8059 ecore_ptt_release(hwfn, ptt); 8060 } 8061 8062 ha->num_vfs = num_vfs; 8063 qlnx_inform_vf_link_state(&cdev->hwfns[0], ha); 8064 8065 QL_DPRINT2(ha," dev = %p exit num_vfs = %d\n", dev, num_vfs); 8066 8067 return (0); 8068 8069 qlnx_iov_init_err1: 8070 qlnx_sriov_disable(ha); 8071 8072 qlnx_iov_init_err0: 8073 qlnx_destroy_pf_taskqueues(ha); 8074 ha->num_vfs = 0; 8075 8076 return (-1); 8077 } 8078 8079 static void 8080 qlnx_iov_uninit(device_t dev) 8081 { 8082 qlnx_host_t *ha; 8083 8084 if ((ha = device_get_softc(dev)) == NULL) { 8085 device_printf(dev, "%s: cannot get softc\n", __func__); 8086 return; 8087 } 8088 8089 QL_DPRINT2(ha," dev = %p enter\n", dev); 8090 8091 qlnx_sriov_disable(ha); 8092 qlnx_destroy_pf_taskqueues(ha); 8093 8094 free(ha->vf_attr, M_QLNXBUF); 8095 ha->vf_attr = NULL; 8096 8097 ha->num_vfs = 0; 8098 8099 QL_DPRINT2(ha," dev = %p exit\n", dev); 8100 return; 8101 } 8102 8103 static int 8104 qlnx_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 8105 { 8106 qlnx_host_t *ha; 8107 qlnx_vf_attr_t *vf_attr; 8108 unsigned const char *mac; 8109 size_t size; 8110 struct ecore_hwfn *p_hwfn; 8111 8112 if ((ha = device_get_softc(dev)) == NULL) { 8113 device_printf(dev, "%s: cannot get softc\n", __func__); 8114 return (-1); 8115 } 8116 8117 QL_DPRINT2(ha," dev = %p enter vfnum = %d\n", dev, vfnum); 8118 8119 if (vfnum > (ha->num_vfs - 1)) { 8120 QL_DPRINT1(ha, " VF[%d] is greater than max allowed [%d]\n", 8121 vfnum, (ha->num_vfs - 1)); 8122 } 8123 8124 vf_attr = &ha->vf_attr[vfnum]; 8125 8126 if (nvlist_exists_binary(params, "mac-addr")) { 8127 mac = nvlist_get_binary(params, "mac-addr", &size); 8128 bcopy(mac, vf_attr->mac_addr, ETHER_ADDR_LEN); 8129 device_printf(dev, 8130 "%s: mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n", 8131 __func__, vf_attr->mac_addr[0], 8132 vf_attr->mac_addr[1], vf_attr->mac_addr[2], 8133 vf_attr->mac_addr[3], vf_attr->mac_addr[4], 8134 vf_attr->mac_addr[5]); 8135 p_hwfn = &ha->cdev.hwfns[0]; 8136 ecore_iov_bulletin_set_mac(p_hwfn, vf_attr->mac_addr, 8137 vfnum); 8138 } 8139 8140 QL_DPRINT2(ha," dev = %p exit vfnum = %d\n", dev, vfnum); 8141 return (0); 8142 } 8143 8144 static void 8145 qlnx_handle_vf_msg(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8146 { 8147 uint64_t events[ECORE_VF_ARRAY_LENGTH]; 8148 struct ecore_ptt *ptt; 8149 int i; 8150 8151 ptt = ecore_ptt_acquire(p_hwfn); 8152 if (!ptt) { 8153 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8154 __qlnx_pf_vf_msg(p_hwfn, 0); 8155 return; 8156 } 8157 8158 ecore_iov_pf_get_pending_events(p_hwfn, events); 8159 8160 QL_DPRINT2(ha, "Event mask of VF events:" 8161 "0x%" PRIu64 "0x%" PRIu64 " 0x%" PRIu64 "\n", 8162 events[0], events[1], events[2]); 8163 8164 ecore_for_each_vf(p_hwfn, i) { 8165 /* Skip VFs with no pending messages */ 8166 if (!(events[i / 64] & (1ULL << (i % 64)))) 8167 continue; 8168 8169 QL_DPRINT2(ha, 8170 "Handling VF message from VF 0x%02x [Abs 0x%02x]\n", 8171 i, p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i); 8172 8173 /* Copy VF's message to PF's request buffer for that VF */ 8174 if (ecore_iov_copy_vf_msg(p_hwfn, ptt, i)) 8175 continue; 8176 8177 ecore_iov_process_mbx_req(p_hwfn, ptt, i); 8178 } 8179 8180 ecore_ptt_release(p_hwfn, ptt); 8181 8182 return; 8183 } 8184 8185 static void 8186 qlnx_handle_vf_flr_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8187 { 8188 struct ecore_ptt *ptt; 8189 int ret; 8190 8191 ptt = ecore_ptt_acquire(p_hwfn); 8192 8193 if (!ptt) { 8194 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8195 __qlnx_vf_flr_update(p_hwfn); 8196 return; 8197 } 8198 8199 ret = ecore_iov_vf_flr_cleanup(p_hwfn, ptt); 8200 8201 if (ret) { 8202 QL_DPRINT1(ha, "ecore_iov_vf_flr_cleanup failed; re-scheduling\n"); 8203 } 8204 8205 ecore_ptt_release(p_hwfn, ptt); 8206 8207 return; 8208 } 8209 8210 static void 8211 qlnx_handle_bulletin_update(qlnx_host_t *ha, struct ecore_hwfn *p_hwfn) 8212 { 8213 struct ecore_ptt *ptt; 8214 int i; 8215 8216 ptt = ecore_ptt_acquire(p_hwfn); 8217 8218 if (!ptt) { 8219 QL_DPRINT1(ha, "Can't acquire PTT; re-scheduling\n"); 8220 qlnx_vf_bulleting_update(p_hwfn); 8221 return; 8222 } 8223 8224 ecore_for_each_vf(p_hwfn, i) { 8225 QL_DPRINT1(ha, "ecore_iov_post_vf_bulletin[%p, %d]\n", 8226 p_hwfn, i); 8227 ecore_iov_post_vf_bulletin(p_hwfn, i, ptt); 8228 } 8229 8230 ecore_ptt_release(p_hwfn, ptt); 8231 8232 return; 8233 } 8234 8235 static void 8236 qlnx_pf_taskqueue(void *context, int pending) 8237 { 8238 struct ecore_hwfn *p_hwfn; 8239 qlnx_host_t *ha; 8240 int i; 8241 8242 p_hwfn = context; 8243 8244 if (p_hwfn == NULL) 8245 return; 8246 8247 ha = (qlnx_host_t *)(p_hwfn->p_dev); 8248 8249 if ((i = qlnx_find_hwfn_index(p_hwfn)) == -1) 8250 return; 8251 8252 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8253 QLNX_SRIOV_TASK_FLAGS_VF_PF_MSG)) 8254 qlnx_handle_vf_msg(ha, p_hwfn); 8255 8256 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8257 QLNX_SRIOV_TASK_FLAGS_VF_FLR_UPDATE)) 8258 qlnx_handle_vf_flr_update(ha, p_hwfn); 8259 8260 if (atomic_testandclear_32(&ha->sriov_task[i].flags, 8261 QLNX_SRIOV_TASK_FLAGS_BULLETIN_UPDATE)) 8262 qlnx_handle_bulletin_update(ha, p_hwfn); 8263 8264 return; 8265 } 8266 8267 static int 8268 qlnx_create_pf_taskqueues(qlnx_host_t *ha) 8269 { 8270 int i; 8271 uint8_t tq_name[32]; 8272 8273 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8274 struct ecore_hwfn *p_hwfn = &ha->cdev.hwfns[i]; 8275 8276 bzero(tq_name, sizeof (tq_name)); 8277 snprintf(tq_name, sizeof (tq_name), "ql_pf_tq_%d", i); 8278 8279 TASK_INIT(&ha->sriov_task[i].pf_task, 0, qlnx_pf_taskqueue, p_hwfn); 8280 8281 ha->sriov_task[i].pf_taskqueue = taskqueue_create(tq_name, M_NOWAIT, 8282 taskqueue_thread_enqueue, 8283 &ha->sriov_task[i].pf_taskqueue); 8284 8285 if (ha->sriov_task[i].pf_taskqueue == NULL) 8286 return (-1); 8287 8288 taskqueue_start_threads(&ha->sriov_task[i].pf_taskqueue, 1, 8289 PI_NET, "%s", tq_name); 8290 8291 QL_DPRINT1(ha, "%p\n", ha->sriov_task[i].pf_taskqueue); 8292 } 8293 8294 return (0); 8295 } 8296 8297 static void 8298 qlnx_destroy_pf_taskqueues(qlnx_host_t *ha) 8299 { 8300 int i; 8301 8302 for (i = 0; i < ha->cdev.num_hwfns; i++) { 8303 if (ha->sriov_task[i].pf_taskqueue != NULL) { 8304 taskqueue_drain(ha->sriov_task[i].pf_taskqueue, 8305 &ha->sriov_task[i].pf_task); 8306 taskqueue_free(ha->sriov_task[i].pf_taskqueue); 8307 ha->sriov_task[i].pf_taskqueue = NULL; 8308 } 8309 } 8310 return; 8311 } 8312 8313 static void 8314 qlnx_inform_vf_link_state(struct ecore_hwfn *p_hwfn, qlnx_host_t *ha) 8315 { 8316 struct ecore_mcp_link_capabilities caps; 8317 struct ecore_mcp_link_params params; 8318 struct ecore_mcp_link_state link; 8319 int i; 8320 8321 if (!p_hwfn->pf_iov_info) 8322 return; 8323 8324 memset(¶ms, 0, sizeof(struct ecore_mcp_link_params)); 8325 memset(&link, 0, sizeof(struct ecore_mcp_link_state)); 8326 memset(&caps, 0, sizeof(struct ecore_mcp_link_capabilities)); 8327 8328 memcpy(&caps, ecore_mcp_get_link_capabilities(p_hwfn), sizeof(caps)); 8329 memcpy(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link)); 8330 memcpy(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params)); 8331 8332 QL_DPRINT2(ha, "called\n"); 8333 8334 /* Update bulletin of all future possible VFs with link configuration */ 8335 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) { 8336 /* Modify link according to the VF's configured link state */ 8337 8338 link.link_up = false; 8339 8340 if (ha->link_up) { 8341 link.link_up = true; 8342 /* Set speed according to maximum supported by HW. 8343 * that is 40G for regular devices and 100G for CMT 8344 * mode devices. 8345 */ 8346 link.speed = (p_hwfn->p_dev->num_hwfns > 1) ? 8347 100000 : link.speed; 8348 } 8349 QL_DPRINT2(ha, "link [%d] = %d\n", i, link.link_up); 8350 ecore_iov_set_link(p_hwfn, i, ¶ms, &link, &caps); 8351 } 8352 8353 qlnx_vf_bulleting_update(p_hwfn); 8354 8355 return; 8356 } 8357 #endif /* #ifndef QLNX_VF */ 8358 #endif /* #ifdef CONFIG_ECORE_SRIOV */ 8359