1 /* 2 * BSD LICENSE 3 * 4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Cavium, Inc. nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "lio_bsd.h" 35 #include "lio_common.h" 36 37 #include "lio_droq.h" 38 #include "lio_iq.h" 39 #include "lio_response_manager.h" 40 #include "lio_device.h" 41 #include "lio_ctrl.h" 42 #include "lio_main.h" 43 #include "lio_network.h" 44 #include "cn23xx_pf_device.h" 45 #include "lio_image.h" 46 #include "lio_ioctl.h" 47 #include "lio_rxtx.h" 48 #include "lio_rss.h" 49 50 /* Number of milliseconds to wait for DDR initialization */ 51 #define LIO_DDR_TIMEOUT 10000 52 #define LIO_MAX_FW_TYPE_LEN 8 53 54 static char fw_type[LIO_MAX_FW_TYPE_LEN]; 55 TUNABLE_STR("hw.lio.fw_type", fw_type, sizeof(fw_type)); 56 57 /* 58 * Integers that specify number of queues per PF. 59 * Valid range is 0 to 64. 60 * Use 0 to derive from CPU count. 61 */ 62 static int num_queues_per_pf0; 63 static int num_queues_per_pf1; 64 TUNABLE_INT("hw.lio.num_queues_per_pf0", &num_queues_per_pf0); 65 TUNABLE_INT("hw.lio.num_queues_per_pf1", &num_queues_per_pf1); 66 67 #ifdef RSS 68 static int lio_rss = 1; 69 TUNABLE_INT("hw.lio.rss", &lio_rss); 70 #endif /* RSS */ 71 72 /* Hardware LRO */ 73 unsigned int lio_hwlro = 0; 74 TUNABLE_INT("hw.lio.hwlro", &lio_hwlro); 75 76 /* 77 * Bitmask indicating which consoles have debug 78 * output redirected to syslog. 79 */ 80 static unsigned long console_bitmask; 81 TUNABLE_ULONG("hw.lio.console_bitmask", &console_bitmask); 82 83 /* 84 * \brief determines if a given console has debug enabled. 85 * @param console console to check 86 * @returns 1 = enabled. 0 otherwise 87 */ 88 int 89 lio_console_debug_enabled(uint32_t console) 90 { 91 92 return (console_bitmask >> (console)) & 0x1; 93 } 94 95 static int lio_detach(device_t dev); 96 97 static int lio_device_init(struct octeon_device *octeon_dev); 98 static int lio_chip_specific_setup(struct octeon_device *oct); 99 static void lio_watchdog(void *param); 100 static int lio_load_firmware(struct octeon_device *oct); 101 static int lio_nic_starter(struct octeon_device *oct); 102 static int lio_init_nic_module(struct octeon_device *oct); 103 static int lio_setup_nic_devices(struct octeon_device *octeon_dev); 104 static int lio_link_info(struct lio_recv_info *recv_info, void *ptr); 105 static void lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, 106 void *buf); 107 static int lio_set_rxcsum_command(if_t ifp, int command, 108 uint8_t rx_cmd); 109 static int lio_setup_glists(struct octeon_device *oct, struct lio *lio, 110 int num_iqs); 111 static void lio_destroy_nic_device(struct octeon_device *oct, int ifidx); 112 static inline void lio_update_link_status(if_t ifp, 113 union octeon_link_status *ls); 114 static void lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop); 115 static int lio_stop_nic_module(struct octeon_device *oct); 116 static void lio_destroy_resources(struct octeon_device *oct); 117 static int lio_setup_rx_oom_poll_fn(if_t ifp); 118 119 static void lio_vlan_rx_add_vid(void *arg, if_t ifp, uint16_t vid); 120 static void lio_vlan_rx_kill_vid(void *arg, if_t ifp, 121 uint16_t vid); 122 static struct octeon_device * 123 lio_get_other_octeon_device(struct octeon_device *oct); 124 125 static int lio_wait_for_oq_pkts(struct octeon_device *oct); 126 127 int lio_send_rss_param(struct lio *lio); 128 static int lio_dbg_console_print(struct octeon_device *oct, 129 uint32_t console_num, char *prefix, 130 char *suffix); 131 132 /* Polling interval for determining when NIC application is alive */ 133 #define LIO_STARTER_POLL_INTERVAL_MS 100 134 135 /* 136 * vendor_info_array. 137 * This array contains the list of IDs on which the driver should load. 138 */ 139 struct lio_vendor_info { 140 uint16_t vendor_id; 141 uint16_t device_id; 142 uint16_t subdevice_id; 143 uint8_t revision_id; 144 uint8_t index; 145 }; 146 147 static struct lio_vendor_info lio_pci_tbl[] = { 148 /* CN2350 10G */ 149 {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE, 150 0x02, 0}, 151 152 /* CN2350 10G */ 153 {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE1, 154 0x02, 0}, 155 156 /* CN2360 10G */ 157 {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_10G_SUBDEVICE, 158 0x02, 1}, 159 160 /* CN2350 25G */ 161 {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_25G_SUBDEVICE, 162 0x02, 2}, 163 164 /* CN2360 25G */ 165 {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_25G_SUBDEVICE, 166 0x02, 3}, 167 168 {0, 0, 0, 0, 0} 169 }; 170 171 static char *lio_strings[] = { 172 "LiquidIO 2350 10GbE Server Adapter", 173 "LiquidIO 2360 10GbE Server Adapter", 174 "LiquidIO 2350 25GbE Server Adapter", 175 "LiquidIO 2360 25GbE Server Adapter", 176 }; 177 178 struct lio_if_cfg_resp { 179 uint64_t rh; 180 struct octeon_if_cfg_info cfg_info; 181 uint64_t status; 182 }; 183 184 struct lio_if_cfg_context { 185 int octeon_id; 186 volatile int cond; 187 }; 188 189 struct lio_rx_ctl_context { 190 int octeon_id; 191 volatile int cond; 192 }; 193 194 static int 195 lio_probe(device_t dev) 196 { 197 struct lio_vendor_info *tbl; 198 199 uint16_t vendor_id; 200 uint16_t device_id; 201 uint16_t subdevice_id; 202 uint8_t revision_id; 203 char device_ver[256]; 204 205 vendor_id = pci_get_vendor(dev); 206 if (vendor_id != PCI_VENDOR_ID_CAVIUM) 207 return (ENXIO); 208 209 device_id = pci_get_device(dev); 210 subdevice_id = pci_get_subdevice(dev); 211 revision_id = pci_get_revid(dev); 212 213 tbl = lio_pci_tbl; 214 while (tbl->vendor_id) { 215 if ((vendor_id == tbl->vendor_id) && 216 (device_id == tbl->device_id) && 217 (subdevice_id == tbl->subdevice_id) && 218 (revision_id == tbl->revision_id)) { 219 sprintf(device_ver, "%s, Version - %s", 220 lio_strings[tbl->index], LIO_VERSION); 221 device_set_desc_copy(dev, device_ver); 222 return (BUS_PROBE_DEFAULT); 223 } 224 225 tbl++; 226 } 227 228 return (ENXIO); 229 } 230 231 static int 232 lio_attach(device_t device) 233 { 234 struct octeon_device *oct_dev = NULL; 235 uint64_t scratch1; 236 uint32_t error; 237 int timeout, ret = 1; 238 uint8_t bus, dev, function; 239 240 oct_dev = lio_allocate_device(device); 241 if (oct_dev == NULL) { 242 device_printf(device, "Error: Unable to allocate device\n"); 243 return (-ENOMEM); 244 } 245 246 oct_dev->tx_budget = LIO_DEFAULT_TX_PKTS_PROCESS_BUDGET; 247 oct_dev->rx_budget = LIO_DEFAULT_RX_PKTS_PROCESS_BUDGET; 248 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 249 250 oct_dev->device = device; 251 bus = pci_get_bus(device); 252 dev = pci_get_slot(device); 253 function = pci_get_function(device); 254 255 lio_dev_info(oct_dev, "Initializing device %x:%x %02x:%02x.%01x\n", 256 pci_get_vendor(device), pci_get_device(device), bus, dev, 257 function); 258 259 if (lio_device_init(oct_dev)) { 260 lio_dev_err(oct_dev, "Failed to init device\n"); 261 lio_detach(device); 262 return (-ENOMEM); 263 } 264 265 scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1); 266 if (!(scratch1 & 4ULL)) { 267 /* 268 * Bit 2 of SLI_SCRATCH_1 is a flag that indicates that 269 * the lio watchdog kernel thread is running for this 270 * NIC. Each NIC gets one watchdog kernel thread. 271 */ 272 scratch1 |= 4ULL; 273 lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1); 274 275 error = kproc_create(lio_watchdog, oct_dev, 276 &oct_dev->watchdog_task, 0, 0, 277 "liowd/%02hhx:%02hhx.%hhx", bus, 278 dev, function); 279 if (!error) { 280 kproc_resume(oct_dev->watchdog_task); 281 } else { 282 oct_dev->watchdog_task = NULL; 283 lio_dev_err(oct_dev, 284 "failed to create kernel_thread\n"); 285 lio_detach(device); 286 return (-1); 287 } 288 } 289 oct_dev->rx_pause = 1; 290 oct_dev->tx_pause = 1; 291 292 timeout = 0; 293 while (timeout < LIO_NIC_STARTER_TIMEOUT) { 294 lio_mdelay(LIO_STARTER_POLL_INTERVAL_MS); 295 timeout += LIO_STARTER_POLL_INTERVAL_MS; 296 297 /* 298 * During the boot process interrupts are not available. 299 * So polling for first control message from FW. 300 */ 301 if (cold) 302 lio_droq_bh(oct_dev->droq[0], 0); 303 304 if (atomic_load_acq_int(&oct_dev->status) == LIO_DEV_CORE_OK) { 305 ret = lio_nic_starter(oct_dev); 306 break; 307 } 308 } 309 310 if (ret) { 311 lio_dev_err(oct_dev, "Firmware failed to start\n"); 312 lio_detach(device); 313 return (-EIO); 314 } 315 316 lio_dev_dbg(oct_dev, "Device is ready\n"); 317 318 return (0); 319 } 320 321 static int 322 lio_detach(device_t dev) 323 { 324 struct octeon_device *oct_dev = device_get_softc(dev); 325 326 lio_dev_dbg(oct_dev, "Stopping device\n"); 327 if (oct_dev->watchdog_task) { 328 uint64_t scratch1; 329 330 kproc_suspend(oct_dev->watchdog_task, 0); 331 332 scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1); 333 scratch1 &= ~4ULL; 334 lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1); 335 } 336 337 if (oct_dev->app_mode && (oct_dev->app_mode == LIO_DRV_NIC_APP)) 338 lio_stop_nic_module(oct_dev); 339 340 /* 341 * Reset the octeon device and cleanup all memory allocated for 342 * the octeon device by driver. 343 */ 344 lio_destroy_resources(oct_dev); 345 346 lio_dev_info(oct_dev, "Device removed\n"); 347 348 /* 349 * This octeon device has been removed. Update the global 350 * data structure to reflect this. Free the device structure. 351 */ 352 lio_free_device_mem(oct_dev); 353 return (0); 354 } 355 356 static int 357 lio_shutdown(device_t dev) 358 { 359 struct octeon_device *oct_dev = device_get_softc(dev); 360 struct lio *lio = if_getsoftc(oct_dev->props.ifp); 361 362 lio_send_rx_ctrl_cmd(lio, 0); 363 364 return (0); 365 } 366 367 static int 368 lio_suspend(device_t dev) 369 { 370 371 return (ENXIO); 372 } 373 374 static int 375 lio_resume(device_t dev) 376 { 377 378 return (ENXIO); 379 } 380 381 static int 382 lio_event(struct module *mod, int event, void *junk) 383 { 384 385 switch (event) { 386 case MOD_LOAD: 387 lio_init_device_list(LIO_CFG_TYPE_DEFAULT); 388 break; 389 default: 390 break; 391 } 392 393 return (0); 394 } 395 396 /********************************************************************* 397 * FreeBSD Device Interface Entry Points 398 * *******************************************************************/ 399 static device_method_t lio_methods[] = { 400 /* Device interface */ 401 DEVMETHOD(device_probe, lio_probe), 402 DEVMETHOD(device_attach, lio_attach), 403 DEVMETHOD(device_detach, lio_detach), 404 DEVMETHOD(device_shutdown, lio_shutdown), 405 DEVMETHOD(device_suspend, lio_suspend), 406 DEVMETHOD(device_resume, lio_resume), 407 DEVMETHOD_END 408 }; 409 410 static driver_t lio_driver = { 411 LIO_DRV_NAME, lio_methods, sizeof(struct octeon_device), 412 }; 413 414 DRIVER_MODULE(lio, pci, lio_driver, lio_event, NULL); 415 416 MODULE_DEPEND(lio, pci, 1, 1, 1); 417 MODULE_DEPEND(lio, ether, 1, 1, 1); 418 MODULE_DEPEND(lio, firmware, 1, 1, 1); 419 420 static bool 421 fw_type_is_none(void) 422 { 423 return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE, 424 sizeof(LIO_FW_NAME_TYPE_NONE)) == 0; 425 } 426 427 /* 428 * \brief Device initialization for each Octeon device that is probed 429 * @param octeon_dev octeon device 430 */ 431 static int 432 lio_device_init(struct octeon_device *octeon_dev) 433 { 434 unsigned long ddr_timeout = LIO_DDR_TIMEOUT; 435 char *dbg_enb = NULL; 436 int fw_loaded = 0; 437 int i, j, ret; 438 uint8_t bus, dev, function; 439 char bootcmd[] = "\n"; 440 441 bus = pci_get_bus(octeon_dev->device); 442 dev = pci_get_slot(octeon_dev->device); 443 function = pci_get_function(octeon_dev->device); 444 445 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_BEGIN_STATE); 446 447 /* Enable access to the octeon device */ 448 if (pci_enable_busmaster(octeon_dev->device)) { 449 lio_dev_err(octeon_dev, "pci_enable_device failed\n"); 450 return (1); 451 } 452 453 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_ENABLE_DONE); 454 455 /* Identify the Octeon type and map the BAR address space. */ 456 if (lio_chip_specific_setup(octeon_dev)) { 457 lio_dev_err(octeon_dev, "Chip specific setup failed\n"); 458 return (1); 459 } 460 461 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_MAP_DONE); 462 463 /* 464 * Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE', 465 * since that is what is required for the reference to be removed 466 * during de-initialization (see 'octeon_destroy_resources'). 467 */ 468 lio_register_device(octeon_dev, bus, dev, function, true); 469 470 471 octeon_dev->app_mode = LIO_DRV_INVALID_APP; 472 473 if (!lio_cn23xx_pf_fw_loaded(octeon_dev) && !fw_type_is_none()) { 474 fw_loaded = 0; 475 /* Do a soft reset of the Octeon device. */ 476 if (octeon_dev->fn_list.soft_reset(octeon_dev)) 477 return (1); 478 479 /* things might have changed */ 480 if (!lio_cn23xx_pf_fw_loaded(octeon_dev)) 481 fw_loaded = 0; 482 else 483 fw_loaded = 1; 484 } else { 485 fw_loaded = 1; 486 } 487 488 /* 489 * Initialize the dispatch mechanism used to push packets arriving on 490 * Octeon Output queues. 491 */ 492 if (lio_init_dispatch_list(octeon_dev)) 493 return (1); 494 495 lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC, 496 LIO_OPCODE_NIC_CORE_DRV_ACTIVE, 497 lio_core_drv_init, octeon_dev); 498 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DISPATCH_INIT_DONE); 499 500 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 501 if (ret) { 502 lio_dev_err(octeon_dev, 503 "Failed to configure device registers\n"); 504 return (ret); 505 } 506 507 /* Initialize soft command buffer pool */ 508 if (lio_setup_sc_buffer_pool(octeon_dev)) { 509 lio_dev_err(octeon_dev, "sc buffer pool allocation failed\n"); 510 return (1); 511 } 512 513 atomic_store_rel_int(&octeon_dev->status, 514 LIO_DEV_SC_BUFF_POOL_INIT_DONE); 515 516 if (lio_allocate_ioq_vector(octeon_dev)) { 517 lio_dev_err(octeon_dev, 518 "IOQ vector allocation failed\n"); 519 return (1); 520 } 521 522 atomic_store_rel_int(&octeon_dev->status, 523 LIO_DEV_MSIX_ALLOC_VECTOR_DONE); 524 525 for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) { 526 octeon_dev->instr_queue[i] = 527 malloc(sizeof(struct lio_instr_queue), 528 M_DEVBUF, M_NOWAIT | M_ZERO); 529 if (octeon_dev->instr_queue[i] == NULL) 530 return (1); 531 } 532 533 /* Setup the data structures that manage this Octeon's Input queues. */ 534 if (lio_setup_instr_queue0(octeon_dev)) { 535 lio_dev_err(octeon_dev, 536 "Instruction queue initialization failed\n"); 537 return (1); 538 } 539 540 atomic_store_rel_int(&octeon_dev->status, 541 LIO_DEV_INSTR_QUEUE_INIT_DONE); 542 543 /* 544 * Initialize lists to manage the requests of different types that 545 * arrive from user & kernel applications for this octeon device. 546 */ 547 548 if (lio_setup_response_list(octeon_dev)) { 549 lio_dev_err(octeon_dev, "Response list allocation failed\n"); 550 return (1); 551 } 552 553 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_RESP_LIST_INIT_DONE); 554 555 for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) { 556 octeon_dev->droq[i] = malloc(sizeof(*octeon_dev->droq[i]), 557 M_DEVBUF, M_NOWAIT | M_ZERO); 558 if (octeon_dev->droq[i] == NULL) 559 return (1); 560 } 561 562 if (lio_setup_output_queue0(octeon_dev)) { 563 lio_dev_err(octeon_dev, "Output queue initialization failed\n"); 564 return (1); 565 } 566 567 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DROQ_INIT_DONE); 568 569 /* 570 * Setup the interrupt handler and record the INT SUM register address 571 */ 572 if (lio_setup_interrupt(octeon_dev, 573 octeon_dev->sriov_info.num_pf_rings)) 574 return (1); 575 576 /* Enable Octeon device interrupts */ 577 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); 578 579 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_INTR_SET_DONE); 580 581 /* 582 * Send Credit for Octeon Output queues. Credits are always sent BEFORE 583 * the output queue is enabled. 584 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in 585 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0. 586 * Otherwise, it is possible that the DRV_ACTIVE message will be sent 587 * before any credits have been issued, causing the ring to be reset 588 * (and the f/w appear to never have started). 589 */ 590 for (j = 0; j < octeon_dev->num_oqs; j++) 591 lio_write_csr32(octeon_dev, 592 octeon_dev->droq[j]->pkts_credit_reg, 593 octeon_dev->droq[j]->max_count); 594 595 /* Enable the input and output queues for this Octeon device */ 596 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); 597 if (ret) { 598 lio_dev_err(octeon_dev, "Failed to enable input/output queues"); 599 return (ret); 600 } 601 602 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_IO_QUEUES_DONE); 603 604 if (!fw_loaded) { 605 lio_dev_dbg(octeon_dev, "Waiting for DDR initialization...\n"); 606 if (!ddr_timeout) { 607 lio_dev_info(octeon_dev, 608 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); 609 } 610 611 lio_sleep_timeout(LIO_RESET_MSECS); 612 613 /* 614 * Wait for the octeon to initialize DDR after the 615 * soft-reset. 616 */ 617 while (!ddr_timeout) { 618 if (pause("-", lio_ms_to_ticks(100))) { 619 /* user probably pressed Control-C */ 620 return (1); 621 } 622 } 623 624 ret = lio_wait_for_ddr_init(octeon_dev, &ddr_timeout); 625 if (ret) { 626 lio_dev_err(octeon_dev, 627 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", 628 ret); 629 return (1); 630 } 631 632 if (lio_wait_for_bootloader(octeon_dev, 1100)) { 633 lio_dev_err(octeon_dev, "Board not responding\n"); 634 return (1); 635 } 636 637 /* Divert uboot to take commands from host instead. */ 638 ret = lio_console_send_cmd(octeon_dev, bootcmd, 50); 639 640 lio_dev_dbg(octeon_dev, "Initializing consoles\n"); 641 ret = lio_init_consoles(octeon_dev); 642 if (ret) { 643 lio_dev_err(octeon_dev, "Could not access board consoles\n"); 644 return (1); 645 } 646 647 /* 648 * If console debug enabled, specify empty string to 649 * use default enablement ELSE specify NULL string for 650 * 'disabled'. 651 */ 652 dbg_enb = lio_console_debug_enabled(0) ? "" : NULL; 653 ret = lio_add_console(octeon_dev, 0, dbg_enb); 654 655 if (ret) { 656 lio_dev_err(octeon_dev, "Could not access board console\n"); 657 return (1); 658 } else if (lio_console_debug_enabled(0)) { 659 /* 660 * If console was added AND we're logging console output 661 * then set our console print function. 662 */ 663 octeon_dev->console[0].print = lio_dbg_console_print; 664 } 665 666 atomic_store_rel_int(&octeon_dev->status, 667 LIO_DEV_CONSOLE_INIT_DONE); 668 669 lio_dev_dbg(octeon_dev, "Loading firmware\n"); 670 671 ret = lio_load_firmware(octeon_dev); 672 if (ret) { 673 lio_dev_err(octeon_dev, "Could not load firmware to board\n"); 674 return (1); 675 } 676 } 677 678 atomic_store_rel_int(&octeon_dev->status, LIO_DEV_HOST_OK); 679 680 return (0); 681 } 682 683 /* 684 * \brief PCI FLR for each Octeon device. 685 * @param oct octeon device 686 */ 687 static void 688 lio_pci_flr(struct octeon_device *oct) 689 { 690 uint32_t exppos, status; 691 692 pci_find_cap(oct->device, PCIY_EXPRESS, &exppos); 693 694 pci_save_state(oct->device); 695 696 /* Quiesce the device completely */ 697 pci_write_config(oct->device, PCIR_COMMAND, PCIM_CMD_INTxDIS, 2); 698 699 /* Wait for Transaction Pending bit clean */ 700 lio_mdelay(100); 701 702 status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2); 703 if (status & PCIEM_STA_TRANSACTION_PND) { 704 lio_dev_info(oct, "Function reset incomplete after 100ms, sleeping for 5 seconds\n"); 705 lio_mdelay(5); 706 707 status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2); 708 if (status & PCIEM_STA_TRANSACTION_PND) 709 lio_dev_info(oct, "Function reset still incomplete after 5s, reset anyway\n"); 710 } 711 712 pci_write_config(oct->device, exppos + PCIER_DEVICE_CTL, PCIEM_CTL_INITIATE_FLR, 2); 713 lio_mdelay(100); 714 715 pci_restore_state(oct->device); 716 } 717 718 /* 719 * \brief Debug console print function 720 * @param octeon_dev octeon device 721 * @param console_num console number 722 * @param prefix first portion of line to display 723 * @param suffix second portion of line to display 724 * 725 * The OCTEON debug console outputs entire lines (excluding '\n'). 726 * Normally, the line will be passed in the 'prefix' parameter. 727 * However, due to buffering, it is possible for a line to be split into two 728 * parts, in which case they will be passed as the 'prefix' parameter and 729 * 'suffix' parameter. 730 */ 731 static int 732 lio_dbg_console_print(struct octeon_device *oct, uint32_t console_num, 733 char *prefix, char *suffix) 734 { 735 736 if (prefix != NULL && suffix != NULL) 737 lio_dev_info(oct, "%u: %s%s\n", console_num, prefix, suffix); 738 else if (prefix != NULL) 739 lio_dev_info(oct, "%u: %s\n", console_num, prefix); 740 else if (suffix != NULL) 741 lio_dev_info(oct, "%u: %s\n", console_num, suffix); 742 743 return (0); 744 } 745 746 static void 747 lio_watchdog(void *param) 748 { 749 int core_num; 750 uint16_t mask_of_crashed_or_stuck_cores = 0; 751 struct octeon_device *oct = param; 752 bool err_msg_was_printed[12]; 753 754 bzero(err_msg_was_printed, sizeof(err_msg_was_printed)); 755 756 while (1) { 757 kproc_suspend_check(oct->watchdog_task); 758 mask_of_crashed_or_stuck_cores = 759 (uint16_t)lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH2); 760 761 if (mask_of_crashed_or_stuck_cores) { 762 struct octeon_device *other_oct; 763 764 oct->cores_crashed = true; 765 other_oct = lio_get_other_octeon_device(oct); 766 if (other_oct != NULL) 767 other_oct->cores_crashed = true; 768 769 for (core_num = 0; core_num < LIO_MAX_CORES; 770 core_num++) { 771 bool core_crashed_or_got_stuck; 772 773 core_crashed_or_got_stuck = 774 (mask_of_crashed_or_stuck_cores >> 775 core_num) & 1; 776 if (core_crashed_or_got_stuck && 777 !err_msg_was_printed[core_num]) { 778 lio_dev_err(oct, 779 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n", 780 core_num); 781 err_msg_was_printed[core_num] = true; 782 } 783 } 784 785 } 786 787 /* sleep for two seconds */ 788 pause("-", lio_ms_to_ticks(2000)); 789 } 790 } 791 792 static int 793 lio_chip_specific_setup(struct octeon_device *oct) 794 { 795 char *s; 796 uint32_t dev_id; 797 int ret = 1; 798 799 dev_id = lio_read_pci_cfg(oct, 0); 800 oct->subdevice_id = pci_get_subdevice(oct->device); 801 802 switch (dev_id) { 803 case LIO_CN23XX_PF_PCIID: 804 oct->chip_id = LIO_CN23XX_PF_VID; 805 if (pci_get_function(oct->device) == 0) { 806 if (num_queues_per_pf0 < 0) { 807 lio_dev_info(oct, "Invalid num_queues_per_pf0: %d, Setting it to default\n", 808 num_queues_per_pf0); 809 num_queues_per_pf0 = 0; 810 } 811 812 oct->sriov_info.num_pf_rings = num_queues_per_pf0; 813 } else { 814 if (num_queues_per_pf1 < 0) { 815 lio_dev_info(oct, "Invalid num_queues_per_pf1: %d, Setting it to default\n", 816 num_queues_per_pf1); 817 num_queues_per_pf1 = 0; 818 } 819 820 oct->sriov_info.num_pf_rings = num_queues_per_pf1; 821 } 822 823 ret = lio_cn23xx_pf_setup_device(oct); 824 s = "CN23XX"; 825 break; 826 827 default: 828 s = "?"; 829 lio_dev_err(oct, "Unknown device found (dev_id: %x)\n", dev_id); 830 } 831 832 if (!ret) 833 lio_dev_info(oct, "%s PASS%d.%d %s Version: %s\n", s, 834 OCTEON_MAJOR_REV(oct), OCTEON_MINOR_REV(oct), 835 lio_get_conf(oct)->card_name, LIO_VERSION); 836 837 return (ret); 838 } 839 840 static struct octeon_device * 841 lio_get_other_octeon_device(struct octeon_device *oct) 842 { 843 struct octeon_device *other_oct; 844 845 other_oct = lio_get_device(oct->octeon_id + 1); 846 847 if ((other_oct != NULL) && other_oct->device) { 848 int oct_busnum, other_oct_busnum; 849 850 oct_busnum = pci_get_bus(oct->device); 851 other_oct_busnum = pci_get_bus(other_oct->device); 852 853 if (oct_busnum == other_oct_busnum) { 854 int oct_slot, other_oct_slot; 855 856 oct_slot = pci_get_slot(oct->device); 857 other_oct_slot = pci_get_slot(other_oct->device); 858 859 if (oct_slot == other_oct_slot) 860 return (other_oct); 861 } 862 } 863 return (NULL); 864 } 865 866 /* 867 * \brief Load firmware to device 868 * @param oct octeon device 869 * 870 * Maps device to firmware filename, requests firmware, and downloads it 871 */ 872 static int 873 lio_load_firmware(struct octeon_device *oct) 874 { 875 const struct firmware *fw; 876 char *tmp_fw_type = NULL; 877 int ret = 0; 878 char fw_name[LIO_MAX_FW_FILENAME_LEN]; 879 880 if (fw_type[0] == '\0') 881 tmp_fw_type = LIO_FW_NAME_TYPE_NIC; 882 else 883 tmp_fw_type = fw_type; 884 885 sprintf(fw_name, "%s%s_%s%s", LIO_FW_BASE_NAME, 886 lio_get_conf(oct)->card_name, tmp_fw_type, LIO_FW_NAME_SUFFIX); 887 888 fw = firmware_get(fw_name); 889 if (fw == NULL) { 890 lio_dev_err(oct, "Request firmware failed. Could not find file %s.\n", 891 fw_name); 892 return (EINVAL); 893 } 894 895 ret = lio_download_firmware(oct, fw->data, fw->datasize); 896 897 firmware_put(fw, FIRMWARE_UNLOAD); 898 899 return (ret); 900 } 901 902 static int 903 lio_nic_starter(struct octeon_device *oct) 904 { 905 int ret = 0; 906 907 atomic_store_rel_int(&oct->status, LIO_DEV_RUNNING); 908 909 if (oct->app_mode && oct->app_mode == LIO_DRV_NIC_APP) { 910 if (lio_init_nic_module(oct)) { 911 lio_dev_err(oct, "NIC initialization failed\n"); 912 ret = -1; 913 #ifdef CAVIUM_ONiLY_23XX_VF 914 } else { 915 if (octeon_enable_sriov(oct) < 0) 916 ret = -1; 917 #endif 918 } 919 } else { 920 lio_dev_err(oct, 921 "Unexpected application running on NIC (%d). Check firmware.\n", 922 oct->app_mode); 923 ret = -1; 924 } 925 926 return (ret); 927 } 928 929 static int 930 lio_init_nic_module(struct octeon_device *oct) 931 { 932 int num_nic_ports = LIO_GET_NUM_NIC_PORTS_CFG(lio_get_conf(oct)); 933 int retval = 0; 934 935 lio_dev_dbg(oct, "Initializing network interfaces\n"); 936 937 /* 938 * only default iq and oq were initialized 939 * initialize the rest as well 940 */ 941 942 /* run port_config command for each port */ 943 oct->ifcount = num_nic_ports; 944 945 bzero(&oct->props, sizeof(struct lio_if_props)); 946 947 oct->props.gmxport = -1; 948 949 retval = lio_setup_nic_devices(oct); 950 if (retval) { 951 lio_dev_err(oct, "Setup NIC devices failed\n"); 952 goto lio_init_failure; 953 } 954 955 lio_dev_dbg(oct, "Network interfaces ready\n"); 956 957 return (retval); 958 959 lio_init_failure: 960 961 oct->ifcount = 0; 962 963 return (retval); 964 } 965 966 static int 967 lio_ifmedia_update(if_t ifp) 968 { 969 struct lio *lio = if_getsoftc(ifp); 970 struct ifmedia *ifm; 971 972 ifm = &lio->ifmedia; 973 974 /* We only support Ethernet media type. */ 975 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 976 return (EINVAL); 977 978 switch (IFM_SUBTYPE(ifm->ifm_media)) { 979 case IFM_AUTO: 980 break; 981 case IFM_10G_CX4: 982 case IFM_10G_SR: 983 case IFM_10G_T: 984 case IFM_10G_TWINAX: 985 default: 986 /* We don't support changing the media type. */ 987 lio_dev_err(lio->oct_dev, "Invalid media type (%d)\n", 988 IFM_SUBTYPE(ifm->ifm_media)); 989 return (EINVAL); 990 } 991 992 return (0); 993 } 994 995 static int 996 lio_get_media_subtype(struct octeon_device *oct) 997 { 998 999 switch(oct->subdevice_id) { 1000 case LIO_CN2350_10G_SUBDEVICE: 1001 case LIO_CN2350_10G_SUBDEVICE1: 1002 case LIO_CN2360_10G_SUBDEVICE: 1003 return (IFM_10G_SR); 1004 1005 case LIO_CN2350_25G_SUBDEVICE: 1006 case LIO_CN2360_25G_SUBDEVICE: 1007 return (IFM_25G_SR); 1008 } 1009 1010 return (IFM_10G_SR); 1011 } 1012 1013 static uint64_t 1014 lio_get_baudrate(struct octeon_device *oct) 1015 { 1016 1017 switch(oct->subdevice_id) { 1018 case LIO_CN2350_10G_SUBDEVICE: 1019 case LIO_CN2350_10G_SUBDEVICE1: 1020 case LIO_CN2360_10G_SUBDEVICE: 1021 return (IF_Gbps(10)); 1022 1023 case LIO_CN2350_25G_SUBDEVICE: 1024 case LIO_CN2360_25G_SUBDEVICE: 1025 return (IF_Gbps(25)); 1026 } 1027 1028 return (IF_Gbps(10)); 1029 } 1030 1031 static void 1032 lio_ifmedia_status(if_t ifp, struct ifmediareq *ifmr) 1033 { 1034 struct lio *lio = if_getsoftc(ifp); 1035 1036 /* Report link down if the driver isn't running. */ 1037 if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) { 1038 ifmr->ifm_active |= IFM_NONE; 1039 return; 1040 } 1041 1042 /* Setup the default interface info. */ 1043 ifmr->ifm_status = IFM_AVALID; 1044 ifmr->ifm_active = IFM_ETHER; 1045 1046 if (lio->linfo.link.s.link_up) { 1047 ifmr->ifm_status |= IFM_ACTIVE; 1048 } else { 1049 ifmr->ifm_active |= IFM_NONE; 1050 return; 1051 } 1052 1053 ifmr->ifm_active |= lio_get_media_subtype(lio->oct_dev); 1054 1055 if (lio->linfo.link.s.duplex) 1056 ifmr->ifm_active |= IFM_FDX; 1057 else 1058 ifmr->ifm_active |= IFM_HDX; 1059 } 1060 1061 static uint64_t 1062 lio_get_counter(if_t ifp, ift_counter cnt) 1063 { 1064 struct lio *lio = if_getsoftc(ifp); 1065 struct octeon_device *oct = lio->oct_dev; 1066 uint64_t counter = 0; 1067 int i, q_no; 1068 1069 switch (cnt) { 1070 case IFCOUNTER_IPACKETS: 1071 for (i = 0; i < oct->num_oqs; i++) { 1072 q_no = lio->linfo.rxpciq[i].s.q_no; 1073 counter += oct->droq[q_no]->stats.rx_pkts_received; 1074 } 1075 break; 1076 case IFCOUNTER_OPACKETS: 1077 for (i = 0; i < oct->num_iqs; i++) { 1078 q_no = lio->linfo.txpciq[i].s.q_no; 1079 counter += oct->instr_queue[q_no]->stats.tx_done; 1080 } 1081 break; 1082 case IFCOUNTER_IBYTES: 1083 for (i = 0; i < oct->num_oqs; i++) { 1084 q_no = lio->linfo.rxpciq[i].s.q_no; 1085 counter += oct->droq[q_no]->stats.rx_bytes_received; 1086 } 1087 break; 1088 case IFCOUNTER_OBYTES: 1089 for (i = 0; i < oct->num_iqs; i++) { 1090 q_no = lio->linfo.txpciq[i].s.q_no; 1091 counter += oct->instr_queue[q_no]->stats.tx_tot_bytes; 1092 } 1093 break; 1094 case IFCOUNTER_IQDROPS: 1095 for (i = 0; i < oct->num_oqs; i++) { 1096 q_no = lio->linfo.rxpciq[i].s.q_no; 1097 counter += oct->droq[q_no]->stats.rx_dropped; 1098 } 1099 break; 1100 case IFCOUNTER_OQDROPS: 1101 for (i = 0; i < oct->num_iqs; i++) { 1102 q_no = lio->linfo.txpciq[i].s.q_no; 1103 counter += oct->instr_queue[q_no]->stats.tx_dropped; 1104 } 1105 break; 1106 case IFCOUNTER_IMCASTS: 1107 counter = oct->link_stats.fromwire.total_mcst; 1108 break; 1109 case IFCOUNTER_OMCASTS: 1110 counter = oct->link_stats.fromhost.mcast_pkts_sent; 1111 break; 1112 case IFCOUNTER_COLLISIONS: 1113 counter = oct->link_stats.fromhost.total_collisions; 1114 break; 1115 case IFCOUNTER_IERRORS: 1116 counter = oct->link_stats.fromwire.fcs_err + 1117 oct->link_stats.fromwire.l2_err + 1118 oct->link_stats.fromwire.frame_err; 1119 break; 1120 default: 1121 return (if_get_counter_default(ifp, cnt)); 1122 } 1123 1124 return (counter); 1125 } 1126 1127 static int 1128 lio_init_ifnet(struct lio *lio) 1129 { 1130 struct octeon_device *oct = lio->oct_dev; 1131 if_t ifp = lio->ifp; 1132 1133 /* ifconfig entrypoint for media type/status reporting */ 1134 ifmedia_init(&lio->ifmedia, IFM_IMASK, lio_ifmedia_update, 1135 lio_ifmedia_status); 1136 1137 /* set the default interface values */ 1138 ifmedia_add(&lio->ifmedia, 1139 (IFM_ETHER | IFM_FDX | lio_get_media_subtype(oct)), 1140 0, NULL); 1141 ifmedia_add(&lio->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); 1142 ifmedia_set(&lio->ifmedia, (IFM_ETHER | IFM_AUTO)); 1143 1144 lio->ifmedia.ifm_media = lio->ifmedia.ifm_cur->ifm_media; 1145 lio_dev_dbg(oct, "IFMEDIA flags : %x\n", lio->ifmedia.ifm_media); 1146 1147 if_initname(ifp, device_get_name(oct->device), 1148 device_get_unit(oct->device)); 1149 if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); 1150 if_setioctlfn(ifp, lio_ioctl); 1151 if_setgetcounterfn(ifp, lio_get_counter); 1152 if_settransmitfn(ifp, lio_mq_start); 1153 if_setqflushfn(ifp, lio_qflush); 1154 if_setinitfn(ifp, lio_open); 1155 if_setmtu(ifp, lio->linfo.link.s.mtu); 1156 lio->mtu = lio->linfo.link.s.mtu; 1157 if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | 1158 CSUM_TCP_IPV6 | CSUM_UDP_IPV6)); 1159 1160 if_setcapabilitiesbit(ifp, (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | 1161 IFCAP_TSO | IFCAP_LRO | 1162 IFCAP_JUMBO_MTU | IFCAP_HWSTATS | 1163 IFCAP_LINKSTATE | IFCAP_VLAN_HWFILTER | 1164 IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTAGGING | 1165 IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU), 0); 1166 1167 if_setcapenable(ifp, if_getcapabilities(ifp)); 1168 if_setbaudrate(ifp, lio_get_baudrate(oct)); 1169 1170 return (0); 1171 } 1172 1173 static void 1174 lio_tcp_lro_free(struct octeon_device *octeon_dev, if_t ifp) 1175 { 1176 struct lio *lio = if_getsoftc(ifp); 1177 struct lio_droq *droq; 1178 int q_no; 1179 int i; 1180 1181 for (i = 0; i < octeon_dev->num_oqs; i++) { 1182 q_no = lio->linfo.rxpciq[i].s.q_no; 1183 droq = octeon_dev->droq[q_no]; 1184 if (droq->lro.ifp) { 1185 tcp_lro_free(&droq->lro); 1186 droq->lro.ifp = NULL; 1187 } 1188 } 1189 } 1190 1191 static int 1192 lio_tcp_lro_init(struct octeon_device *octeon_dev, if_t ifp) 1193 { 1194 struct lio *lio = if_getsoftc(ifp); 1195 struct lio_droq *droq; 1196 struct lro_ctrl *lro; 1197 int i, q_no, ret = 0; 1198 1199 for (i = 0; i < octeon_dev->num_oqs; i++) { 1200 q_no = lio->linfo.rxpciq[i].s.q_no; 1201 droq = octeon_dev->droq[q_no]; 1202 lro = &droq->lro; 1203 ret = tcp_lro_init(lro); 1204 if (ret) { 1205 lio_dev_err(octeon_dev, "LRO Initialization failed ret %d\n", 1206 ret); 1207 goto lro_init_failed; 1208 } 1209 1210 lro->ifp = ifp; 1211 } 1212 1213 return (ret); 1214 1215 lro_init_failed: 1216 lio_tcp_lro_free(octeon_dev, ifp); 1217 1218 return (ret); 1219 } 1220 1221 static int 1222 lio_setup_nic_devices(struct octeon_device *octeon_dev) 1223 { 1224 union octeon_if_cfg if_cfg; 1225 struct lio *lio = NULL; 1226 if_t ifp = NULL; 1227 struct lio_version *vdata; 1228 struct lio_soft_command *sc; 1229 struct lio_if_cfg_context *ctx; 1230 struct lio_if_cfg_resp *resp; 1231 struct lio_if_props *props; 1232 int num_iqueues, num_oqueues, retval; 1233 unsigned int base_queue; 1234 unsigned int gmx_port_id; 1235 uint32_t ctx_size, data_size; 1236 uint32_t ifidx_or_pfnum, resp_size; 1237 uint8_t mac[ETHER_HDR_LEN], i, j; 1238 1239 /* This is to handle link status changes */ 1240 lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC, 1241 LIO_OPCODE_NIC_INFO, 1242 lio_link_info, octeon_dev); 1243 1244 for (i = 0; i < octeon_dev->ifcount; i++) { 1245 resp_size = sizeof(struct lio_if_cfg_resp); 1246 ctx_size = sizeof(struct lio_if_cfg_context); 1247 data_size = sizeof(struct lio_version); 1248 sc = lio_alloc_soft_command(octeon_dev, data_size, resp_size, 1249 ctx_size); 1250 if (sc == NULL) 1251 return (ENOMEM); 1252 1253 resp = (struct lio_if_cfg_resp *)sc->virtrptr; 1254 ctx = (struct lio_if_cfg_context *)sc->ctxptr; 1255 vdata = (struct lio_version *)sc->virtdptr; 1256 1257 *((uint64_t *)vdata) = 0; 1258 vdata->major = htobe16(LIO_BASE_MAJOR_VERSION); 1259 vdata->minor = htobe16(LIO_BASE_MINOR_VERSION); 1260 vdata->micro = htobe16(LIO_BASE_MICRO_VERSION); 1261 1262 num_iqueues = octeon_dev->sriov_info.num_pf_rings; 1263 num_oqueues = octeon_dev->sriov_info.num_pf_rings; 1264 base_queue = octeon_dev->sriov_info.pf_srn; 1265 1266 gmx_port_id = octeon_dev->pf_num; 1267 ifidx_or_pfnum = octeon_dev->pf_num; 1268 1269 lio_dev_dbg(octeon_dev, "requesting config for interface %d, iqs %d, oqs %d\n", 1270 ifidx_or_pfnum, num_iqueues, num_oqueues); 1271 ctx->cond = 0; 1272 ctx->octeon_id = lio_get_device_id(octeon_dev); 1273 1274 if_cfg.if_cfg64 = 0; 1275 if_cfg.s.num_iqueues = num_iqueues; 1276 if_cfg.s.num_oqueues = num_oqueues; 1277 if_cfg.s.base_queue = base_queue; 1278 if_cfg.s.gmx_port_id = gmx_port_id; 1279 1280 sc->iq_no = 0; 1281 1282 lio_prepare_soft_command(octeon_dev, sc, LIO_OPCODE_NIC, 1283 LIO_OPCODE_NIC_IF_CFG, 0, 1284 if_cfg.if_cfg64, 0); 1285 1286 sc->callback = lio_if_cfg_callback; 1287 sc->callback_arg = sc; 1288 sc->wait_time = 3000; 1289 1290 retval = lio_send_soft_command(octeon_dev, sc); 1291 if (retval == LIO_IQ_SEND_FAILED) { 1292 lio_dev_err(octeon_dev, "iq/oq config failed status: %x\n", 1293 retval); 1294 /* Soft instr is freed by driver in case of failure. */ 1295 goto setup_nic_dev_fail; 1296 } 1297 1298 /* 1299 * Sleep on a wait queue till the cond flag indicates that the 1300 * response arrived or timed-out. 1301 */ 1302 lio_sleep_cond(octeon_dev, &ctx->cond); 1303 1304 retval = resp->status; 1305 if (retval) { 1306 lio_dev_err(octeon_dev, "iq/oq config failed\n"); 1307 goto setup_nic_dev_fail; 1308 } 1309 1310 lio_swap_8B_data((uint64_t *)(&resp->cfg_info), 1311 (sizeof(struct octeon_if_cfg_info)) >> 3); 1312 1313 num_iqueues = bitcount64(resp->cfg_info.iqmask); 1314 num_oqueues = bitcount64(resp->cfg_info.oqmask); 1315 1316 if (!(num_iqueues) || !(num_oqueues)) { 1317 lio_dev_err(octeon_dev, 1318 "Got bad iqueues (%016llX) or oqueues (%016llX) from firmware.\n", 1319 LIO_CAST64(resp->cfg_info.iqmask), 1320 LIO_CAST64(resp->cfg_info.oqmask)); 1321 goto setup_nic_dev_fail; 1322 } 1323 1324 lio_dev_dbg(octeon_dev, 1325 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", 1326 i, LIO_CAST64(resp->cfg_info.iqmask), 1327 LIO_CAST64(resp->cfg_info.oqmask), 1328 num_iqueues, num_oqueues); 1329 1330 ifp = if_alloc(IFT_ETHER); 1331 1332 if (ifp == NULL) { 1333 lio_dev_err(octeon_dev, "Device allocation failed\n"); 1334 goto setup_nic_dev_fail; 1335 } 1336 1337 lio = malloc(sizeof(struct lio), M_DEVBUF, M_NOWAIT | M_ZERO); 1338 1339 if (lio == NULL) { 1340 lio_dev_err(octeon_dev, "Lio allocation failed\n"); 1341 goto setup_nic_dev_fail; 1342 } 1343 1344 if_setsoftc(ifp, lio); 1345 1346 if_sethwtsomax(ifp, LIO_MAX_FRAME_SIZE); 1347 if_sethwtsomaxsegcount(ifp, LIO_MAX_SG); 1348 if_sethwtsomaxsegsize(ifp, PAGE_SIZE); 1349 1350 lio->ifidx = ifidx_or_pfnum; 1351 1352 props = &octeon_dev->props; 1353 props->gmxport = resp->cfg_info.linfo.gmxport; 1354 props->ifp = ifp; 1355 1356 lio->linfo.num_rxpciq = num_oqueues; 1357 lio->linfo.num_txpciq = num_iqueues; 1358 for (j = 0; j < num_oqueues; j++) { 1359 lio->linfo.rxpciq[j].rxpciq64 = 1360 resp->cfg_info.linfo.rxpciq[j].rxpciq64; 1361 } 1362 1363 for (j = 0; j < num_iqueues; j++) { 1364 lio->linfo.txpciq[j].txpciq64 = 1365 resp->cfg_info.linfo.txpciq[j].txpciq64; 1366 } 1367 1368 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 1369 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 1370 lio->linfo.link.link_status64 = 1371 resp->cfg_info.linfo.link.link_status64; 1372 1373 /* 1374 * Point to the properties for octeon device to which this 1375 * interface belongs. 1376 */ 1377 lio->oct_dev = octeon_dev; 1378 lio->ifp = ifp; 1379 1380 lio_dev_dbg(octeon_dev, "if%d gmx: %d hw_addr: 0x%llx\n", i, 1381 lio->linfo.gmxport, LIO_CAST64(lio->linfo.hw_addr)); 1382 lio_init_ifnet(lio); 1383 /* 64-bit swap required on LE machines */ 1384 lio_swap_8B_data(&lio->linfo.hw_addr, 1); 1385 for (j = 0; j < 6; j++) 1386 mac[j] = *((uint8_t *)( 1387 ((uint8_t *)&lio->linfo.hw_addr) + 2 + j)); 1388 1389 ether_ifattach(ifp, mac); 1390 1391 /* 1392 * By default all interfaces on a single Octeon uses the same 1393 * tx and rx queues 1394 */ 1395 lio->txq = lio->linfo.txpciq[0].s.q_no; 1396 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 1397 if (lio_setup_io_queues(octeon_dev, i, lio->linfo.num_txpciq, 1398 lio->linfo.num_rxpciq)) { 1399 lio_dev_err(octeon_dev, "I/O queues creation failed\n"); 1400 goto setup_nic_dev_fail; 1401 } 1402 1403 lio_ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 1404 1405 lio->tx_qsize = lio_get_tx_qsize(octeon_dev, lio->txq); 1406 lio->rx_qsize = lio_get_rx_qsize(octeon_dev, lio->rxq); 1407 1408 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { 1409 lio_dev_err(octeon_dev, "Gather list allocation failed\n"); 1410 goto setup_nic_dev_fail; 1411 } 1412 1413 if ((lio_hwlro == 0) && lio_tcp_lro_init(octeon_dev, ifp)) 1414 goto setup_nic_dev_fail; 1415 1416 if (lio_hwlro && 1417 (if_getcapenable(ifp) & IFCAP_LRO) && 1418 (if_getcapenable(ifp) & IFCAP_RXCSUM) && 1419 (if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6)) 1420 lio_set_feature(ifp, LIO_CMD_LRO_ENABLE, 1421 LIO_LROIPV4 | LIO_LROIPV6); 1422 1423 if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)) 1424 lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 1); 1425 else 1426 lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 0); 1427 1428 if (lio_setup_rx_oom_poll_fn(ifp)) 1429 goto setup_nic_dev_fail; 1430 1431 lio_dev_dbg(octeon_dev, "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 1432 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 1433 lio->link_changes++; 1434 1435 lio_ifstate_set(lio, LIO_IFSTATE_REGISTERED); 1436 1437 /* 1438 * Sending command to firmware to enable Rx checksum offload 1439 * by default at the time of setup of Liquidio driver for 1440 * this device 1441 */ 1442 lio_set_rxcsum_command(ifp, LIO_CMD_TNL_RX_CSUM_CTL, 1443 LIO_CMD_RXCSUM_ENABLE); 1444 lio_set_feature(ifp, LIO_CMD_TNL_TX_CSUM_CTL, 1445 LIO_CMD_TXCSUM_ENABLE); 1446 1447 #ifdef RSS 1448 if (lio_rss) { 1449 if (lio_send_rss_param(lio)) 1450 goto setup_nic_dev_fail; 1451 } else 1452 #endif /* RSS */ 1453 1454 lio_set_feature(ifp, LIO_CMD_SET_FNV, 1455 LIO_CMD_FNV_ENABLE); 1456 1457 lio_dev_dbg(octeon_dev, "NIC ifidx:%d Setup successful\n", i); 1458 1459 lio_free_soft_command(octeon_dev, sc); 1460 lio->vlan_attach = 1461 EVENTHANDLER_REGISTER(vlan_config, 1462 lio_vlan_rx_add_vid, lio, 1463 EVENTHANDLER_PRI_FIRST); 1464 lio->vlan_detach = 1465 EVENTHANDLER_REGISTER(vlan_unconfig, 1466 lio_vlan_rx_kill_vid, lio, 1467 EVENTHANDLER_PRI_FIRST); 1468 1469 /* Update stats periodically */ 1470 callout_init(&lio->stats_timer, 0); 1471 lio->stats_interval = LIO_DEFAULT_STATS_INTERVAL; 1472 1473 lio_add_hw_stats(lio); 1474 } 1475 1476 return (0); 1477 1478 setup_nic_dev_fail: 1479 1480 lio_free_soft_command(octeon_dev, sc); 1481 1482 while (i--) { 1483 lio_dev_err(octeon_dev, "NIC ifidx:%d Setup failed\n", i); 1484 lio_destroy_nic_device(octeon_dev, i); 1485 } 1486 1487 return (ENODEV); 1488 } 1489 1490 static int 1491 lio_link_info(struct lio_recv_info *recv_info, void *ptr) 1492 { 1493 struct octeon_device *oct = (struct octeon_device *)ptr; 1494 struct lio_recv_pkt *recv_pkt = recv_info->recv_pkt; 1495 union octeon_link_status *ls; 1496 int gmxport = 0, i; 1497 1498 lio_dev_dbg(oct, "%s Called\n", __func__); 1499 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + LIO_DROQ_INFO_SIZE)) { 1500 lio_dev_err(oct, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 1501 recv_pkt->buffer_size[0], 1502 recv_pkt->rh.r_nic_info.gmxport); 1503 goto nic_info_err; 1504 } 1505 gmxport = recv_pkt->rh.r_nic_info.gmxport; 1506 ls = (union octeon_link_status *)(recv_pkt->buffer_ptr[0]->m_data + 1507 LIO_DROQ_INFO_SIZE); 1508 lio_swap_8B_data((uint64_t *)ls, 1509 (sizeof(union octeon_link_status)) >> 3); 1510 1511 if (oct->props.gmxport == gmxport) 1512 lio_update_link_status(oct->props.ifp, ls); 1513 1514 nic_info_err: 1515 for (i = 0; i < recv_pkt->buffer_count; i++) 1516 lio_recv_buffer_free(recv_pkt->buffer_ptr[i]); 1517 1518 lio_free_recv_info(recv_info); 1519 return (0); 1520 } 1521 1522 void 1523 lio_free_mbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo) 1524 { 1525 1526 bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE); 1527 bus_dmamap_unload(iq->txtag, finfo->map); 1528 m_freem(finfo->mb); 1529 } 1530 1531 void 1532 lio_free_sgmbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo) 1533 { 1534 struct lio_gather *g; 1535 struct octeon_device *oct; 1536 struct lio *lio; 1537 int iq_no; 1538 1539 g = finfo->g; 1540 iq_no = iq->txpciq.s.q_no; 1541 oct = iq->oct_dev; 1542 lio = if_getsoftc(oct->props.ifp); 1543 1544 mtx_lock(&lio->glist_lock[iq_no]); 1545 STAILQ_INSERT_TAIL(&lio->ghead[iq_no], &g->node, entries); 1546 mtx_unlock(&lio->glist_lock[iq_no]); 1547 1548 bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE); 1549 bus_dmamap_unload(iq->txtag, finfo->map); 1550 m_freem(finfo->mb); 1551 } 1552 1553 static void 1554 lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, void *buf) 1555 { 1556 struct lio_soft_command *sc = (struct lio_soft_command *)buf; 1557 struct lio_if_cfg_resp *resp; 1558 struct lio_if_cfg_context *ctx; 1559 1560 resp = (struct lio_if_cfg_resp *)sc->virtrptr; 1561 ctx = (struct lio_if_cfg_context *)sc->ctxptr; 1562 1563 oct = lio_get_device(ctx->octeon_id); 1564 if (resp->status) 1565 lio_dev_err(oct, "nic if cfg instruction failed. Status: %llx (0x%08x)\n", 1566 LIO_CAST64(resp->status), status); 1567 ctx->cond = 1; 1568 1569 snprintf(oct->fw_info.lio_firmware_version, 32, "%s", 1570 resp->cfg_info.lio_firmware_version); 1571 1572 /* 1573 * This barrier is required to be sure that the response has been 1574 * written fully before waking up the handler 1575 */ 1576 wmb(); 1577 } 1578 1579 static int 1580 lio_is_mac_changed(uint8_t *new, uint8_t *old) 1581 { 1582 1583 return ((new[0] != old[0]) || (new[1] != old[1]) || 1584 (new[2] != old[2]) || (new[3] != old[3]) || 1585 (new[4] != old[4]) || (new[5] != old[5])); 1586 } 1587 1588 void 1589 lio_open(void *arg) 1590 { 1591 struct lio *lio = arg; 1592 if_t ifp = lio->ifp; 1593 struct octeon_device *oct = lio->oct_dev; 1594 uint8_t *mac_new, mac_old[ETHER_HDR_LEN]; 1595 int ret = 0; 1596 1597 lio_ifstate_set(lio, LIO_IFSTATE_RUNNING); 1598 1599 /* Ready for link status updates */ 1600 lio->intf_open = 1; 1601 1602 lio_dev_info(oct, "Interface Open, ready for traffic\n"); 1603 1604 /* tell Octeon to start forwarding packets to host */ 1605 lio_send_rx_ctrl_cmd(lio, 1); 1606 1607 mac_new = if_getlladdr(ifp); 1608 memcpy(mac_old, ((uint8_t *)&lio->linfo.hw_addr) + 2, ETHER_HDR_LEN); 1609 1610 if (lio_is_mac_changed(mac_new, mac_old)) { 1611 ret = lio_set_mac(ifp, mac_new); 1612 if (ret) 1613 lio_dev_err(oct, "MAC change failed, error: %d\n", ret); 1614 } 1615 1616 /* Now inform the stack we're ready */ 1617 if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); 1618 1619 lio_dev_info(oct, "Interface is opened\n"); 1620 } 1621 1622 static int 1623 lio_set_rxcsum_command(if_t ifp, int command, uint8_t rx_cmd) 1624 { 1625 struct lio_ctrl_pkt nctrl; 1626 struct lio *lio = if_getsoftc(ifp); 1627 struct octeon_device *oct = lio->oct_dev; 1628 int ret = 0; 1629 1630 nctrl.ncmd.cmd64 = 0; 1631 nctrl.ncmd.s.cmd = command; 1632 nctrl.ncmd.s.param1 = rx_cmd; 1633 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1634 nctrl.wait_time = 100; 1635 nctrl.lio = lio; 1636 nctrl.cb_fn = lio_ctrl_cmd_completion; 1637 1638 ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); 1639 if (ret < 0) { 1640 lio_dev_err(oct, "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", 1641 ret); 1642 } 1643 1644 return (ret); 1645 } 1646 1647 static int 1648 lio_stop_nic_module(struct octeon_device *oct) 1649 { 1650 int i, j; 1651 struct lio *lio; 1652 1653 lio_dev_dbg(oct, "Stopping network interfaces\n"); 1654 if (!oct->ifcount) { 1655 lio_dev_err(oct, "Init for Octeon was not completed\n"); 1656 return (1); 1657 } 1658 1659 mtx_lock(&oct->cmd_resp_wqlock); 1660 oct->cmd_resp_state = LIO_DRV_OFFLINE; 1661 mtx_unlock(&oct->cmd_resp_wqlock); 1662 1663 for (i = 0; i < oct->ifcount; i++) { 1664 lio = if_getsoftc(oct->props.ifp); 1665 for (j = 0; j < oct->num_oqs; j++) 1666 lio_unregister_droq_ops(oct, 1667 lio->linfo.rxpciq[j].s.q_no); 1668 } 1669 1670 callout_drain(&lio->stats_timer); 1671 1672 for (i = 0; i < oct->ifcount; i++) 1673 lio_destroy_nic_device(oct, i); 1674 1675 lio_dev_dbg(oct, "Network interface stopped\n"); 1676 1677 return (0); 1678 } 1679 1680 static void 1681 lio_delete_glists(struct octeon_device *oct, struct lio *lio) 1682 { 1683 struct lio_gather *g; 1684 int i; 1685 1686 if (lio->glist_lock != NULL) { 1687 free((void *)lio->glist_lock, M_DEVBUF); 1688 lio->glist_lock = NULL; 1689 } 1690 1691 if (lio->ghead == NULL) 1692 return; 1693 1694 for (i = 0; i < lio->linfo.num_txpciq; i++) { 1695 do { 1696 g = (struct lio_gather *) 1697 lio_delete_first_node(&lio->ghead[i]); 1698 free(g, M_DEVBUF); 1699 } while (g); 1700 1701 if ((lio->glists_virt_base != NULL) && 1702 (lio->glists_virt_base[i] != NULL)) { 1703 lio_dma_free(lio->glist_entry_size * lio->tx_qsize, 1704 lio->glists_virt_base[i]); 1705 } 1706 } 1707 1708 free(lio->glists_virt_base, M_DEVBUF); 1709 lio->glists_virt_base = NULL; 1710 1711 free(lio->glists_dma_base, M_DEVBUF); 1712 lio->glists_dma_base = NULL; 1713 1714 free(lio->ghead, M_DEVBUF); 1715 lio->ghead = NULL; 1716 } 1717 1718 static int 1719 lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) 1720 { 1721 struct lio_gather *g; 1722 int i, j; 1723 1724 lio->glist_lock = malloc(num_iqs * sizeof(*lio->glist_lock), M_DEVBUF, 1725 M_NOWAIT | M_ZERO); 1726 if (lio->glist_lock == NULL) 1727 return (1); 1728 1729 lio->ghead = malloc(num_iqs * sizeof(*lio->ghead), M_DEVBUF, 1730 M_NOWAIT | M_ZERO); 1731 if (lio->ghead == NULL) { 1732 free((void *)lio->glist_lock, M_DEVBUF); 1733 lio->glist_lock = NULL; 1734 return (1); 1735 } 1736 1737 lio->glist_entry_size = ROUNDUP8((ROUNDUP4(LIO_MAX_SG) >> 2) * 1738 LIO_SG_ENTRY_SIZE); 1739 /* 1740 * allocate memory to store virtual and dma base address of 1741 * per glist consistent memory 1742 */ 1743 lio->glists_virt_base = malloc(num_iqs * sizeof(void *), M_DEVBUF, 1744 M_NOWAIT | M_ZERO); 1745 lio->glists_dma_base = malloc(num_iqs * sizeof(vm_paddr_t), M_DEVBUF, 1746 M_NOWAIT | M_ZERO); 1747 if ((lio->glists_virt_base == NULL) || (lio->glists_dma_base == NULL)) { 1748 lio_delete_glists(oct, lio); 1749 return (1); 1750 } 1751 1752 for (i = 0; i < num_iqs; i++) { 1753 mtx_init(&lio->glist_lock[i], "glist_lock", NULL, MTX_DEF); 1754 1755 STAILQ_INIT(&lio->ghead[i]); 1756 1757 lio->glists_virt_base[i] = 1758 lio_dma_alloc(lio->glist_entry_size * lio->tx_qsize, 1759 (vm_paddr_t *)&lio->glists_dma_base[i]); 1760 if (lio->glists_virt_base[i] == NULL) { 1761 lio_delete_glists(oct, lio); 1762 return (1); 1763 } 1764 1765 for (j = 0; j < lio->tx_qsize; j++) { 1766 g = malloc(sizeof(*g), M_DEVBUF, M_NOWAIT | M_ZERO); 1767 if (g == NULL) 1768 break; 1769 1770 g->sg = (struct lio_sg_entry *)(uintptr_t) 1771 ((uint64_t)(uintptr_t)lio->glists_virt_base[i] + 1772 (j * lio->glist_entry_size)); 1773 g->sg_dma_ptr = (uint64_t)lio->glists_dma_base[i] + 1774 (j * lio->glist_entry_size); 1775 STAILQ_INSERT_TAIL(&lio->ghead[i], &g->node, entries); 1776 } 1777 1778 if (j != lio->tx_qsize) { 1779 lio_delete_glists(oct, lio); 1780 return (1); 1781 } 1782 } 1783 1784 return (0); 1785 } 1786 1787 void 1788 lio_stop(if_t ifp) 1789 { 1790 struct lio *lio = if_getsoftc(ifp); 1791 struct octeon_device *oct = lio->oct_dev; 1792 1793 lio_ifstate_reset(lio, LIO_IFSTATE_RUNNING); 1794 if_link_state_change(ifp, LINK_STATE_DOWN); 1795 1796 lio->intf_open = 0; 1797 lio->linfo.link.s.link_up = 0; 1798 lio->link_changes++; 1799 1800 lio_send_rx_ctrl_cmd(lio, 0); 1801 1802 /* Tell the stack that the interface is no longer active */ 1803 if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); 1804 1805 lio_dev_info(oct, "Interface is stopped\n"); 1806 } 1807 1808 static void 1809 lio_check_rx_oom_status(struct lio *lio) 1810 { 1811 struct lio_droq *droq; 1812 struct octeon_device *oct = lio->oct_dev; 1813 int desc_refilled; 1814 int q, q_no = 0; 1815 1816 for (q = 0; q < oct->num_oqs; q++) { 1817 q_no = lio->linfo.rxpciq[q].s.q_no; 1818 droq = oct->droq[q_no]; 1819 if (droq == NULL) 1820 continue; 1821 if (lio_read_csr32(oct, droq->pkts_credit_reg) <= 0x40) { 1822 mtx_lock(&droq->lock); 1823 desc_refilled = lio_droq_refill(oct, droq); 1824 /* 1825 * Flush the droq descriptor data to memory to be sure 1826 * that when we update the credits the data in memory 1827 * is accurate. 1828 */ 1829 wmb(); 1830 lio_write_csr32(oct, droq->pkts_credit_reg, 1831 desc_refilled); 1832 /* make sure mmio write completes */ 1833 __compiler_membar(); 1834 mtx_unlock(&droq->lock); 1835 } 1836 } 1837 } 1838 1839 static void 1840 lio_poll_check_rx_oom_status(void *arg, int pending __unused) 1841 { 1842 struct lio_tq *rx_status_tq = arg; 1843 struct lio *lio = rx_status_tq->ctxptr; 1844 1845 if (lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) 1846 lio_check_rx_oom_status(lio); 1847 1848 taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work, 1849 lio_ms_to_ticks(50)); 1850 } 1851 1852 static int 1853 lio_setup_rx_oom_poll_fn(if_t ifp) 1854 { 1855 struct lio *lio = if_getsoftc(ifp); 1856 struct octeon_device *oct = lio->oct_dev; 1857 struct lio_tq *rx_status_tq; 1858 1859 rx_status_tq = &lio->rx_status_tq; 1860 1861 rx_status_tq->tq = taskqueue_create("lio_rx_oom_status", M_WAITOK, 1862 taskqueue_thread_enqueue, 1863 &rx_status_tq->tq); 1864 if (rx_status_tq->tq == NULL) { 1865 lio_dev_err(oct, "unable to create lio rx oom status tq\n"); 1866 return (-1); 1867 } 1868 1869 TIMEOUT_TASK_INIT(rx_status_tq->tq, &rx_status_tq->work, 0, 1870 lio_poll_check_rx_oom_status, (void *)rx_status_tq); 1871 1872 rx_status_tq->ctxptr = lio; 1873 1874 taskqueue_start_threads(&rx_status_tq->tq, 1, PI_NET, 1875 "lio%d_rx_oom_status", 1876 oct->octeon_id); 1877 1878 taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work, 1879 lio_ms_to_ticks(50)); 1880 1881 return (0); 1882 } 1883 1884 static void 1885 lio_cleanup_rx_oom_poll_fn(if_t ifp) 1886 { 1887 struct lio *lio = if_getsoftc(ifp); 1888 1889 if (lio->rx_status_tq.tq != NULL) { 1890 while (taskqueue_cancel_timeout(lio->rx_status_tq.tq, 1891 &lio->rx_status_tq.work, NULL)) 1892 taskqueue_drain_timeout(lio->rx_status_tq.tq, 1893 &lio->rx_status_tq.work); 1894 1895 taskqueue_free(lio->rx_status_tq.tq); 1896 1897 lio->rx_status_tq.tq = NULL; 1898 } 1899 } 1900 1901 static void 1902 lio_destroy_nic_device(struct octeon_device *oct, int ifidx) 1903 { 1904 if_t ifp = oct->props.ifp; 1905 struct lio *lio; 1906 1907 if (ifp == NULL) { 1908 lio_dev_err(oct, "%s No ifp ptr for index %d\n", 1909 __func__, ifidx); 1910 return; 1911 } 1912 1913 lio = if_getsoftc(ifp); 1914 1915 lio_ifstate_set(lio, LIO_IFSTATE_DETACH); 1916 1917 lio_dev_dbg(oct, "NIC device cleanup\n"); 1918 1919 if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING) 1920 lio_stop(ifp); 1921 1922 if (lio_wait_for_pending_requests(oct)) 1923 lio_dev_err(oct, "There were pending requests\n"); 1924 1925 if (lio_wait_for_instr_fetch(oct)) 1926 lio_dev_err(oct, "IQ had pending instructions\n"); 1927 1928 if (lio_wait_for_oq_pkts(oct)) 1929 lio_dev_err(oct, "OQ had pending packets\n"); 1930 1931 if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 1932 ether_ifdetach(ifp); 1933 1934 lio_tcp_lro_free(oct, ifp); 1935 1936 lio_cleanup_rx_oom_poll_fn(ifp); 1937 1938 lio_delete_glists(oct, lio); 1939 1940 EVENTHANDLER_DEREGISTER(vlan_config, lio->vlan_attach); 1941 EVENTHANDLER_DEREGISTER(vlan_unconfig, lio->vlan_detach); 1942 1943 free(lio, M_DEVBUF); 1944 1945 if_free(ifp); 1946 1947 oct->props.gmxport = -1; 1948 1949 oct->props.ifp = NULL; 1950 } 1951 1952 static void 1953 print_link_info(if_t ifp) 1954 { 1955 struct lio *lio = if_getsoftc(ifp); 1956 1957 if (!lio_ifstate_check(lio, LIO_IFSTATE_RESETTING) && 1958 lio_ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { 1959 struct octeon_link_info *linfo = &lio->linfo; 1960 1961 if (linfo->link.s.link_up) { 1962 lio_dev_info(lio->oct_dev, "%d Mbps %s Duplex UP\n", 1963 linfo->link.s.speed, 1964 (linfo->link.s.duplex) ? "Full" : "Half"); 1965 } else { 1966 lio_dev_info(lio->oct_dev, "Link Down\n"); 1967 } 1968 } 1969 } 1970 1971 static inline void 1972 lio_update_link_status(if_t ifp, union octeon_link_status *ls) 1973 { 1974 struct lio *lio = if_getsoftc(ifp); 1975 int changed = (lio->linfo.link.link_status64 != ls->link_status64); 1976 1977 lio->linfo.link.link_status64 = ls->link_status64; 1978 1979 if ((lio->intf_open) && (changed)) { 1980 print_link_info(ifp); 1981 lio->link_changes++; 1982 if (lio->linfo.link.s.link_up) 1983 if_link_state_change(ifp, LINK_STATE_UP); 1984 else 1985 if_link_state_change(ifp, LINK_STATE_DOWN); 1986 } 1987 } 1988 1989 /* 1990 * \brief Callback for rx ctrl 1991 * @param status status of request 1992 * @param buf pointer to resp structure 1993 */ 1994 static void 1995 lio_rx_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf) 1996 { 1997 struct lio_soft_command *sc = (struct lio_soft_command *)buf; 1998 struct lio_rx_ctl_context *ctx; 1999 2000 ctx = (struct lio_rx_ctl_context *)sc->ctxptr; 2001 2002 oct = lio_get_device(ctx->octeon_id); 2003 if (status) 2004 lio_dev_err(oct, "rx ctl instruction failed. Status: %llx\n", 2005 LIO_CAST64(status)); 2006 ctx->cond = 1; 2007 2008 /* 2009 * This barrier is required to be sure that the response has been 2010 * written fully before waking up the handler 2011 */ 2012 wmb(); 2013 } 2014 2015 static void 2016 lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop) 2017 { 2018 struct lio_soft_command *sc; 2019 struct lio_rx_ctl_context *ctx; 2020 union octeon_cmd *ncmd; 2021 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 2022 int ctx_size = sizeof(struct lio_rx_ctl_context); 2023 int retval; 2024 2025 if (oct->props.rx_on == start_stop) 2026 return; 2027 2028 sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16, ctx_size); 2029 if (sc == NULL) 2030 return; 2031 2032 ncmd = (union octeon_cmd *)sc->virtdptr; 2033 ctx = (struct lio_rx_ctl_context *)sc->ctxptr; 2034 2035 ctx->cond = 0; 2036 ctx->octeon_id = lio_get_device_id(oct); 2037 ncmd->cmd64 = 0; 2038 ncmd->s.cmd = LIO_CMD_RX_CTL; 2039 ncmd->s.param1 = start_stop; 2040 2041 lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3)); 2042 2043 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 2044 2045 lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CMD, 0, 2046 0, 0); 2047 2048 sc->callback = lio_rx_ctl_callback; 2049 sc->callback_arg = sc; 2050 sc->wait_time = 5000; 2051 2052 retval = lio_send_soft_command(oct, sc); 2053 if (retval == LIO_IQ_SEND_FAILED) { 2054 lio_dev_err(oct, "Failed to send RX Control message\n"); 2055 } else { 2056 /* 2057 * Sleep on a wait queue till the cond flag indicates that the 2058 * response arrived or timed-out. 2059 */ 2060 lio_sleep_cond(oct, &ctx->cond); 2061 oct->props.rx_on = start_stop; 2062 } 2063 2064 lio_free_soft_command(oct, sc); 2065 } 2066 2067 static void 2068 lio_vlan_rx_add_vid(void *arg, if_t ifp, uint16_t vid) 2069 { 2070 struct lio_ctrl_pkt nctrl; 2071 struct lio *lio = if_getsoftc(ifp); 2072 struct octeon_device *oct = lio->oct_dev; 2073 int ret = 0; 2074 2075 if (if_getsoftc(ifp) != arg) /* Not our event */ 2076 return; 2077 2078 if ((vid == 0) || (vid > 4095)) /* Invalid */ 2079 return; 2080 2081 bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); 2082 2083 nctrl.ncmd.cmd64 = 0; 2084 nctrl.ncmd.s.cmd = LIO_CMD_ADD_VLAN_FILTER; 2085 nctrl.ncmd.s.param1 = vid; 2086 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2087 nctrl.wait_time = 100; 2088 nctrl.lio = lio; 2089 nctrl.cb_fn = lio_ctrl_cmd_completion; 2090 2091 ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); 2092 if (ret < 0) { 2093 lio_dev_err(oct, "Add VLAN filter failed in core (ret: 0x%x)\n", 2094 ret); 2095 } 2096 } 2097 2098 static void 2099 lio_vlan_rx_kill_vid(void *arg, if_t ifp, uint16_t vid) 2100 { 2101 struct lio_ctrl_pkt nctrl; 2102 struct lio *lio = if_getsoftc(ifp); 2103 struct octeon_device *oct = lio->oct_dev; 2104 int ret = 0; 2105 2106 if (if_getsoftc(ifp) != arg) /* Not our event */ 2107 return; 2108 2109 if ((vid == 0) || (vid > 4095)) /* Invalid */ 2110 return; 2111 2112 bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); 2113 2114 nctrl.ncmd.cmd64 = 0; 2115 nctrl.ncmd.s.cmd = LIO_CMD_DEL_VLAN_FILTER; 2116 nctrl.ncmd.s.param1 = vid; 2117 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2118 nctrl.wait_time = 100; 2119 nctrl.lio = lio; 2120 nctrl.cb_fn = lio_ctrl_cmd_completion; 2121 2122 ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); 2123 if (ret < 0) { 2124 lio_dev_err(oct, 2125 "Kill VLAN filter failed in core (ret: 0x%x)\n", 2126 ret); 2127 } 2128 } 2129 2130 static int 2131 lio_wait_for_oq_pkts(struct octeon_device *oct) 2132 { 2133 int i, pending_pkts, pkt_cnt = 0, retry = 100; 2134 2135 do { 2136 pending_pkts = 0; 2137 2138 for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) { 2139 if (!(oct->io_qmask.oq & BIT_ULL(i))) 2140 continue; 2141 2142 pkt_cnt = lio_droq_check_hw_for_pkts(oct->droq[i]); 2143 if (pkt_cnt > 0) { 2144 pending_pkts += pkt_cnt; 2145 taskqueue_enqueue(oct->droq[i]->droq_taskqueue, 2146 &oct->droq[i]->droq_task); 2147 } 2148 } 2149 2150 pkt_cnt = 0; 2151 lio_sleep_timeout(1); 2152 } while (retry-- && pending_pkts); 2153 2154 return (pkt_cnt); 2155 } 2156 2157 static void 2158 lio_destroy_resources(struct octeon_device *oct) 2159 { 2160 int i, refcount; 2161 2162 switch (atomic_load_acq_int(&oct->status)) { 2163 case LIO_DEV_RUNNING: 2164 case LIO_DEV_CORE_OK: 2165 /* No more instructions will be forwarded. */ 2166 atomic_store_rel_int(&oct->status, LIO_DEV_IN_RESET); 2167 2168 oct->app_mode = LIO_DRV_INVALID_APP; 2169 lio_dev_dbg(oct, "Device state is now %s\n", 2170 lio_get_state_string(&oct->status)); 2171 2172 lio_sleep_timeout(100); 2173 2174 /* fallthrough */ 2175 case LIO_DEV_HOST_OK: 2176 2177 /* fallthrough */ 2178 case LIO_DEV_CONSOLE_INIT_DONE: 2179 /* Remove any consoles */ 2180 lio_remove_consoles(oct); 2181 2182 /* fallthrough */ 2183 case LIO_DEV_IO_QUEUES_DONE: 2184 if (lio_wait_for_pending_requests(oct)) 2185 lio_dev_err(oct, "There were pending requests\n"); 2186 2187 if (lio_wait_for_instr_fetch(oct)) 2188 lio_dev_err(oct, "IQ had pending instructions\n"); 2189 2190 /* 2191 * Disable the input and output queues now. No more packets will 2192 * arrive from Octeon, but we should wait for all packet 2193 * processing to finish. 2194 */ 2195 oct->fn_list.disable_io_queues(oct); 2196 2197 if (lio_wait_for_oq_pkts(oct)) 2198 lio_dev_err(oct, "OQ had pending packets\n"); 2199 2200 /* fallthrough */ 2201 case LIO_DEV_INTR_SET_DONE: 2202 /* Disable interrupts */ 2203 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 2204 2205 if (oct->msix_on) { 2206 for (i = 0; i < oct->num_msix_irqs - 1; i++) { 2207 if (oct->ioq_vector[i].tag != NULL) { 2208 bus_teardown_intr(oct->device, 2209 oct->ioq_vector[i].msix_res, 2210 oct->ioq_vector[i].tag); 2211 oct->ioq_vector[i].tag = NULL; 2212 } 2213 if (oct->ioq_vector[i].msix_res != NULL) { 2214 bus_release_resource(oct->device, 2215 SYS_RES_IRQ, 2216 oct->ioq_vector[i].vector, 2217 oct->ioq_vector[i].msix_res); 2218 oct->ioq_vector[i].msix_res = NULL; 2219 } 2220 } 2221 /* non-iov vector's argument is oct struct */ 2222 if (oct->tag != NULL) { 2223 bus_teardown_intr(oct->device, oct->msix_res, 2224 oct->tag); 2225 oct->tag = NULL; 2226 } 2227 2228 if (oct->msix_res != NULL) { 2229 bus_release_resource(oct->device, SYS_RES_IRQ, 2230 oct->aux_vector, 2231 oct->msix_res); 2232 oct->msix_res = NULL; 2233 } 2234 2235 pci_release_msi(oct->device); 2236 } 2237 /* fallthrough */ 2238 case LIO_DEV_IN_RESET: 2239 case LIO_DEV_DROQ_INIT_DONE: 2240 /* Wait for any pending operations */ 2241 lio_mdelay(100); 2242 for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) { 2243 if (!(oct->io_qmask.oq & BIT_ULL(i))) 2244 continue; 2245 lio_delete_droq(oct, i); 2246 } 2247 2248 /* fallthrough */ 2249 case LIO_DEV_RESP_LIST_INIT_DONE: 2250 for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) { 2251 if (oct->droq[i] != NULL) { 2252 free(oct->droq[i], M_DEVBUF); 2253 oct->droq[i] = NULL; 2254 } 2255 } 2256 lio_delete_response_list(oct); 2257 2258 /* fallthrough */ 2259 case LIO_DEV_INSTR_QUEUE_INIT_DONE: 2260 for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) { 2261 if (!(oct->io_qmask.iq & BIT_ULL(i))) 2262 continue; 2263 2264 lio_delete_instr_queue(oct, i); 2265 } 2266 2267 /* fallthrough */ 2268 case LIO_DEV_MSIX_ALLOC_VECTOR_DONE: 2269 for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) { 2270 if (oct->instr_queue[i] != NULL) { 2271 free(oct->instr_queue[i], M_DEVBUF); 2272 oct->instr_queue[i] = NULL; 2273 } 2274 } 2275 lio_free_ioq_vector(oct); 2276 2277 /* fallthrough */ 2278 case LIO_DEV_SC_BUFF_POOL_INIT_DONE: 2279 lio_free_sc_buffer_pool(oct); 2280 2281 /* fallthrough */ 2282 case LIO_DEV_DISPATCH_INIT_DONE: 2283 lio_delete_dispatch_list(oct); 2284 2285 /* fallthrough */ 2286 case LIO_DEV_PCI_MAP_DONE: 2287 refcount = lio_deregister_device(oct); 2288 2289 if (fw_type_is_none()) 2290 lio_pci_flr(oct); 2291 2292 if (!refcount) 2293 oct->fn_list.soft_reset(oct); 2294 2295 lio_unmap_pci_barx(oct, 0); 2296 lio_unmap_pci_barx(oct, 1); 2297 2298 /* fallthrough */ 2299 case LIO_DEV_PCI_ENABLE_DONE: 2300 /* Disable the device, releasing the PCI INT */ 2301 pci_disable_busmaster(oct->device); 2302 2303 /* fallthrough */ 2304 case LIO_DEV_BEGIN_STATE: 2305 break; 2306 } /* end switch (oct->status) */ 2307 } 2308