1 /********************************************************************** 2 * Author: Cavium, Inc. 3 * 4 * Contact: support@cavium.com 5 * Please include "LiquidIO" in the subject. 6 * 7 * Copyright (c) 2003-2016 Cavium, Inc. 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more details. 17 ***********************************************************************/ 18 #include <linux/module.h> 19 #include <linux/interrupt.h> 20 #include <linux/pci.h> 21 #include <linux/firmware.h> 22 #include <net/vxlan.h> 23 #include <linux/kthread.h> 24 #include "liquidio_common.h" 25 #include "octeon_droq.h" 26 #include "octeon_iq.h" 27 #include "response_manager.h" 28 #include "octeon_device.h" 29 #include "octeon_nic.h" 30 #include "octeon_main.h" 31 #include "octeon_network.h" 32 #include "cn66xx_regs.h" 33 #include "cn66xx_device.h" 34 #include "cn68xx_device.h" 35 #include "cn23xx_pf_device.h" 36 #include "liquidio_image.h" 37 #include "lio_vf_rep.h" 38 39 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); 40 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); 41 MODULE_LICENSE("GPL"); 42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME 43 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME 45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME 47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME 49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX); 50 51 static int ddr_timeout = 10000; 52 module_param(ddr_timeout, int, 0644); 53 MODULE_PARM_DESC(ddr_timeout, 54 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check"); 55 56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) 57 58 static int debug = -1; 59 module_param(debug, int, 0644); 60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); 61 62 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO; 63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444); 64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\"."); 65 66 static u32 console_bitmask; 67 module_param(console_bitmask, int, 0644); 68 MODULE_PARM_DESC(console_bitmask, 69 "Bitmask indicating which consoles have debug output redirected to syslog."); 70 71 /** 72 * \brief determines if a given console has debug enabled. 73 * @param console console to check 74 * @returns 1 = enabled. 0 otherwise 75 */ 76 static int octeon_console_debug_enabled(u32 console) 77 { 78 return (console_bitmask >> (console)) & 0x1; 79 } 80 81 /* Polling interval for determining when NIC application is alive */ 82 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 83 84 /* runtime link query interval */ 85 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000 86 /* update localtime to octeon firmware every 60 seconds. 87 * make firmware to use same time reference, so that it will be easy to 88 * correlate firmware logged events/errors with host events, for debugging. 89 */ 90 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 91 92 /* time to wait for possible in-flight requests in milliseconds */ 93 #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000) 94 95 struct lio_trusted_vf_ctx { 96 struct completion complete; 97 int status; 98 }; 99 100 struct oct_link_status_resp { 101 u64 rh; 102 struct oct_link_info link_info; 103 u64 status; 104 }; 105 106 struct oct_timestamp_resp { 107 u64 rh; 108 u64 timestamp; 109 u64 status; 110 }; 111 112 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp)) 113 114 union tx_info { 115 u64 u64; 116 struct { 117 #ifdef __BIG_ENDIAN_BITFIELD 118 u16 gso_size; 119 u16 gso_segs; 120 u32 reserved; 121 #else 122 u32 reserved; 123 u16 gso_segs; 124 u16 gso_size; 125 #endif 126 } s; 127 }; 128 129 /** Octeon device properties to be used by the NIC module. 130 * Each octeon device in the system will be represented 131 * by this structure in the NIC module. 132 */ 133 134 #define OCTNIC_GSO_MAX_HEADER_SIZE 128 135 #define OCTNIC_GSO_MAX_SIZE \ 136 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE) 137 138 struct handshake { 139 struct completion init; 140 struct completion started; 141 struct pci_dev *pci_dev; 142 int init_ok; 143 int started_ok; 144 }; 145 146 #ifdef CONFIG_PCI_IOV 147 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs); 148 #endif 149 150 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, 151 char *prefix, char *suffix); 152 153 static int octeon_device_init(struct octeon_device *); 154 static int liquidio_stop(struct net_device *netdev); 155 static void liquidio_remove(struct pci_dev *pdev); 156 static int liquidio_probe(struct pci_dev *pdev, 157 const struct pci_device_id *ent); 158 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, 159 int linkstate); 160 161 static struct handshake handshake[MAX_OCTEON_DEVICES]; 162 static struct completion first_stage; 163 164 static void octeon_droq_bh(unsigned long pdev) 165 { 166 int q_no; 167 int reschedule = 0; 168 struct octeon_device *oct = (struct octeon_device *)pdev; 169 struct octeon_device_priv *oct_priv = 170 (struct octeon_device_priv *)oct->priv; 171 172 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { 173 if (!(oct->io_qmask.oq & BIT_ULL(q_no))) 174 continue; 175 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], 176 MAX_PACKET_BUDGET); 177 lio_enable_irq(oct->droq[q_no], NULL); 178 179 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { 180 /* set time and cnt interrupt thresholds for this DROQ 181 * for NAPI 182 */ 183 int adjusted_q_no = q_no + oct->sriov_info.pf_srn; 184 185 octeon_write_csr64( 186 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no), 187 0x5700000040ULL); 188 octeon_write_csr64( 189 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0); 190 } 191 } 192 193 if (reschedule) 194 tasklet_schedule(&oct_priv->droq_tasklet); 195 } 196 197 static int lio_wait_for_oq_pkts(struct octeon_device *oct) 198 { 199 struct octeon_device_priv *oct_priv = 200 (struct octeon_device_priv *)oct->priv; 201 int retry = 100, pkt_cnt = 0, pending_pkts = 0; 202 int i; 203 204 do { 205 pending_pkts = 0; 206 207 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 208 if (!(oct->io_qmask.oq & BIT_ULL(i))) 209 continue; 210 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); 211 } 212 if (pkt_cnt > 0) { 213 pending_pkts += pkt_cnt; 214 tasklet_schedule(&oct_priv->droq_tasklet); 215 } 216 pkt_cnt = 0; 217 schedule_timeout_uninterruptible(1); 218 219 } while (retry-- && pending_pkts); 220 221 return pkt_cnt; 222 } 223 224 /** 225 * \brief Forces all IO queues off on a given device 226 * @param oct Pointer to Octeon device 227 */ 228 static void force_io_queues_off(struct octeon_device *oct) 229 { 230 if ((oct->chip_id == OCTEON_CN66XX) || 231 (oct->chip_id == OCTEON_CN68XX)) { 232 /* Reset the Enable bits for Input Queues. */ 233 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); 234 235 /* Reset the Enable bits for Output Queues. */ 236 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); 237 } 238 } 239 240 /** 241 * \brief Cause device to go quiet so it can be safely removed/reset/etc 242 * @param oct Pointer to Octeon device 243 */ 244 static inline void pcierror_quiesce_device(struct octeon_device *oct) 245 { 246 int i; 247 248 /* Disable the input and output queues now. No more packets will 249 * arrive from Octeon, but we should wait for all packet processing 250 * to finish. 251 */ 252 force_io_queues_off(oct); 253 254 /* To allow for in-flight requests */ 255 schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST); 256 257 if (wait_for_pending_requests(oct)) 258 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 259 260 /* Force all requests waiting to be fetched by OCTEON to complete. */ 261 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 262 struct octeon_instr_queue *iq; 263 264 if (!(oct->io_qmask.iq & BIT_ULL(i))) 265 continue; 266 iq = oct->instr_queue[i]; 267 268 if (atomic_read(&iq->instr_pending)) { 269 spin_lock_bh(&iq->lock); 270 iq->fill_cnt = 0; 271 iq->octeon_read_index = iq->host_write_index; 272 iq->stats.instr_processed += 273 atomic_read(&iq->instr_pending); 274 lio_process_iq_request_list(oct, iq, 0); 275 spin_unlock_bh(&iq->lock); 276 } 277 } 278 279 /* Force all pending ordered list requests to time out. */ 280 lio_process_ordered_list(oct, 1); 281 282 /* We do not need to wait for output queue packets to be processed. */ 283 } 284 285 /** 286 * \brief Cleanup PCI AER uncorrectable error status 287 * @param dev Pointer to PCI device 288 */ 289 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) 290 { 291 int pos = 0x100; 292 u32 status, mask; 293 294 pr_info("%s :\n", __func__); 295 296 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); 297 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); 298 if (dev->error_state == pci_channel_io_normal) 299 status &= ~mask; /* Clear corresponding nonfatal bits */ 300 else 301 status &= mask; /* Clear corresponding fatal bits */ 302 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); 303 } 304 305 /** 306 * \brief Stop all PCI IO to a given device 307 * @param dev Pointer to Octeon device 308 */ 309 static void stop_pci_io(struct octeon_device *oct) 310 { 311 /* No more instructions will be forwarded. */ 312 atomic_set(&oct->status, OCT_DEV_IN_RESET); 313 314 pci_disable_device(oct->pci_dev); 315 316 /* Disable interrupts */ 317 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 318 319 pcierror_quiesce_device(oct); 320 321 /* Release the interrupt line */ 322 free_irq(oct->pci_dev->irq, oct); 323 324 if (oct->flags & LIO_FLAG_MSI_ENABLED) 325 pci_disable_msi(oct->pci_dev); 326 327 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 328 lio_get_state_string(&oct->status)); 329 330 /* making it a common function for all OCTEON models */ 331 cleanup_aer_uncorrect_error_status(oct->pci_dev); 332 } 333 334 /** 335 * \brief called when PCI error is detected 336 * @param pdev Pointer to PCI device 337 * @param state The current pci connection state 338 * 339 * This function is called after a PCI bus error affecting 340 * this device has been detected. 341 */ 342 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, 343 pci_channel_state_t state) 344 { 345 struct octeon_device *oct = pci_get_drvdata(pdev); 346 347 /* Non-correctable Non-fatal errors */ 348 if (state == pci_channel_io_normal) { 349 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); 350 cleanup_aer_uncorrect_error_status(oct->pci_dev); 351 return PCI_ERS_RESULT_CAN_RECOVER; 352 } 353 354 /* Non-correctable Fatal errors */ 355 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); 356 stop_pci_io(oct); 357 358 /* Always return a DISCONNECT. There is no support for recovery but only 359 * for a clean shutdown. 360 */ 361 return PCI_ERS_RESULT_DISCONNECT; 362 } 363 364 /** 365 * \brief mmio handler 366 * @param pdev Pointer to PCI device 367 */ 368 static pci_ers_result_t liquidio_pcie_mmio_enabled( 369 struct pci_dev *pdev __attribute__((unused))) 370 { 371 /* We should never hit this since we never ask for a reset for a Fatal 372 * Error. We always return DISCONNECT in io_error above. 373 * But play safe and return RECOVERED for now. 374 */ 375 return PCI_ERS_RESULT_RECOVERED; 376 } 377 378 /** 379 * \brief called after the pci bus has been reset. 380 * @param pdev Pointer to PCI device 381 * 382 * Restart the card from scratch, as if from a cold-boot. Implementation 383 * resembles the first-half of the octeon_resume routine. 384 */ 385 static pci_ers_result_t liquidio_pcie_slot_reset( 386 struct pci_dev *pdev __attribute__((unused))) 387 { 388 /* We should never hit this since we never ask for a reset for a Fatal 389 * Error. We always return DISCONNECT in io_error above. 390 * But play safe and return RECOVERED for now. 391 */ 392 return PCI_ERS_RESULT_RECOVERED; 393 } 394 395 /** 396 * \brief called when traffic can start flowing again. 397 * @param pdev Pointer to PCI device 398 * 399 * This callback is called when the error recovery driver tells us that 400 * its OK to resume normal operation. Implementation resembles the 401 * second-half of the octeon_resume routine. 402 */ 403 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused))) 404 { 405 /* Nothing to be done here. */ 406 } 407 408 #define liquidio_suspend NULL 409 #define liquidio_resume NULL 410 411 /* For PCI-E Advanced Error Recovery (AER) Interface */ 412 static const struct pci_error_handlers liquidio_err_handler = { 413 .error_detected = liquidio_pcie_error_detected, 414 .mmio_enabled = liquidio_pcie_mmio_enabled, 415 .slot_reset = liquidio_pcie_slot_reset, 416 .resume = liquidio_pcie_resume, 417 }; 418 419 static const struct pci_device_id liquidio_pci_tbl[] = { 420 { /* 68xx */ 421 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 422 }, 423 { /* 66xx */ 424 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 425 }, 426 { /* 23xx pf */ 427 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 428 }, 429 { 430 0, 0, 0, 0, 0, 0, 0 431 } 432 }; 433 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl); 434 435 static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume); 436 437 static struct pci_driver liquidio_pci_driver = { 438 .name = "LiquidIO", 439 .id_table = liquidio_pci_tbl, 440 .probe = liquidio_probe, 441 .remove = liquidio_remove, 442 .err_handler = &liquidio_err_handler, /* For AER */ 443 .driver.pm = &liquidio_pm_ops, 444 #ifdef CONFIG_PCI_IOV 445 .sriov_configure = liquidio_enable_sriov, 446 #endif 447 }; 448 449 /** 450 * \brief register PCI driver 451 */ 452 static int liquidio_init_pci(void) 453 { 454 return pci_register_driver(&liquidio_pci_driver); 455 } 456 457 /** 458 * \brief unregister PCI driver 459 */ 460 static void liquidio_deinit_pci(void) 461 { 462 pci_unregister_driver(&liquidio_pci_driver); 463 } 464 465 /** 466 * \brief Check Tx queue status, and take appropriate action 467 * @param lio per-network private data 468 * @returns 0 if full, number of queues woken up otherwise 469 */ 470 static inline int check_txq_status(struct lio *lio) 471 { 472 int numqs = lio->netdev->real_num_tx_queues; 473 int ret_val = 0; 474 int q, iq; 475 476 /* check each sub-queue state */ 477 for (q = 0; q < numqs; q++) { 478 iq = lio->linfo.txpciq[q % 479 lio->oct_dev->num_iqs].s.q_no; 480 if (octnet_iq_is_full(lio->oct_dev, iq)) 481 continue; 482 if (__netif_subqueue_stopped(lio->netdev, q)) { 483 netif_wake_subqueue(lio->netdev, q); 484 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, 485 tx_restart, 1); 486 ret_val++; 487 } 488 } 489 490 return ret_val; 491 } 492 493 /** 494 * \brief Print link information 495 * @param netdev network device 496 */ 497 static void print_link_info(struct net_device *netdev) 498 { 499 struct lio *lio = GET_LIO(netdev); 500 501 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) && 502 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { 503 struct oct_link_info *linfo = &lio->linfo; 504 505 if (linfo->link.s.link_up) { 506 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", 507 linfo->link.s.speed, 508 (linfo->link.s.duplex) ? "Full" : "Half"); 509 } else { 510 netif_info(lio, link, lio->netdev, "Link Down\n"); 511 } 512 } 513 } 514 515 /** 516 * \brief Routine to notify MTU change 517 * @param work work_struct data structure 518 */ 519 static void octnet_link_status_change(struct work_struct *work) 520 { 521 struct cavium_wk *wk = (struct cavium_wk *)work; 522 struct lio *lio = (struct lio *)wk->ctxptr; 523 524 /* lio->linfo.link.s.mtu always contains max MTU of the lio interface. 525 * this API is invoked only when new max-MTU of the interface is 526 * less than current MTU. 527 */ 528 rtnl_lock(); 529 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu); 530 rtnl_unlock(); 531 } 532 533 /** 534 * \brief Sets up the mtu status change work 535 * @param netdev network device 536 */ 537 static inline int setup_link_status_change_wq(struct net_device *netdev) 538 { 539 struct lio *lio = GET_LIO(netdev); 540 struct octeon_device *oct = lio->oct_dev; 541 542 lio->link_status_wq.wq = alloc_workqueue("link-status", 543 WQ_MEM_RECLAIM, 0); 544 if (!lio->link_status_wq.wq) { 545 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n"); 546 return -1; 547 } 548 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work, 549 octnet_link_status_change); 550 lio->link_status_wq.wk.ctxptr = lio; 551 552 return 0; 553 } 554 555 static inline void cleanup_link_status_change_wq(struct net_device *netdev) 556 { 557 struct lio *lio = GET_LIO(netdev); 558 559 if (lio->link_status_wq.wq) { 560 cancel_delayed_work_sync(&lio->link_status_wq.wk.work); 561 destroy_workqueue(lio->link_status_wq.wq); 562 } 563 } 564 565 /** 566 * \brief Update link status 567 * @param netdev network device 568 * @param ls link status structure 569 * 570 * Called on receipt of a link status response from the core application to 571 * update each interface's link status. 572 */ 573 static inline void update_link_status(struct net_device *netdev, 574 union oct_link_status *ls) 575 { 576 struct lio *lio = GET_LIO(netdev); 577 int changed = (lio->linfo.link.u64 != ls->u64); 578 int current_max_mtu = lio->linfo.link.s.mtu; 579 struct octeon_device *oct = lio->oct_dev; 580 581 dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n", 582 __func__, lio->linfo.link.u64, ls->u64); 583 lio->linfo.link.u64 = ls->u64; 584 585 if ((lio->intf_open) && (changed)) { 586 print_link_info(netdev); 587 lio->link_changes++; 588 589 if (lio->linfo.link.s.link_up) { 590 dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__); 591 netif_carrier_on(netdev); 592 wake_txqs(netdev); 593 } else { 594 dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__); 595 netif_carrier_off(netdev); 596 stop_txqs(netdev); 597 } 598 if (lio->linfo.link.s.mtu != current_max_mtu) { 599 netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n", 600 current_max_mtu, lio->linfo.link.s.mtu); 601 netdev->max_mtu = lio->linfo.link.s.mtu; 602 } 603 if (lio->linfo.link.s.mtu < netdev->mtu) { 604 dev_warn(&oct->pci_dev->dev, 605 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n", 606 netdev->mtu, lio->linfo.link.s.mtu); 607 queue_delayed_work(lio->link_status_wq.wq, 608 &lio->link_status_wq.wk.work, 0); 609 } 610 } 611 } 612 613 /** 614 * lio_sync_octeon_time - send latest localtime to octeon firmware so that 615 * firmware will correct it's time, in case there is a time skew 616 * 617 * @work: work scheduled to send time update to octeon firmware 618 **/ 619 static void lio_sync_octeon_time(struct work_struct *work) 620 { 621 struct cavium_wk *wk = (struct cavium_wk *)work; 622 struct lio *lio = (struct lio *)wk->ctxptr; 623 struct octeon_device *oct = lio->oct_dev; 624 struct octeon_soft_command *sc; 625 struct timespec64 ts; 626 struct lio_time *lt; 627 int ret; 628 629 sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0); 630 if (!sc) { 631 dev_err(&oct->pci_dev->dev, 632 "Failed to sync time to octeon: soft command allocation failed\n"); 633 return; 634 } 635 636 lt = (struct lio_time *)sc->virtdptr; 637 638 /* Get time of the day */ 639 ktime_get_real_ts64(&ts); 640 lt->sec = ts.tv_sec; 641 lt->nsec = ts.tv_nsec; 642 octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8); 643 644 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 645 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 646 OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0); 647 648 init_completion(&sc->complete); 649 sc->sc_status = OCTEON_REQUEST_PENDING; 650 651 ret = octeon_send_soft_command(oct, sc); 652 if (ret == IQ_SEND_FAILED) { 653 dev_err(&oct->pci_dev->dev, 654 "Failed to sync time to octeon: failed to send soft command\n"); 655 octeon_free_soft_command(oct, sc); 656 } else { 657 WRITE_ONCE(sc->caller_is_done, true); 658 } 659 660 queue_delayed_work(lio->sync_octeon_time_wq.wq, 661 &lio->sync_octeon_time_wq.wk.work, 662 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); 663 } 664 665 /** 666 * setup_sync_octeon_time_wq - Sets up the work to periodically update 667 * local time to octeon firmware 668 * 669 * @netdev - network device which should send time update to firmware 670 **/ 671 static inline int setup_sync_octeon_time_wq(struct net_device *netdev) 672 { 673 struct lio *lio = GET_LIO(netdev); 674 struct octeon_device *oct = lio->oct_dev; 675 676 lio->sync_octeon_time_wq.wq = 677 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0); 678 if (!lio->sync_octeon_time_wq.wq) { 679 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n"); 680 return -1; 681 } 682 INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work, 683 lio_sync_octeon_time); 684 lio->sync_octeon_time_wq.wk.ctxptr = lio; 685 queue_delayed_work(lio->sync_octeon_time_wq.wq, 686 &lio->sync_octeon_time_wq.wk.work, 687 msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS)); 688 689 return 0; 690 } 691 692 /** 693 * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created 694 * to periodically update local time to octeon firmware 695 * 696 * @netdev - network device which should send time update to firmware 697 **/ 698 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev) 699 { 700 struct lio *lio = GET_LIO(netdev); 701 struct cavium_wq *time_wq = &lio->sync_octeon_time_wq; 702 703 if (time_wq->wq) { 704 cancel_delayed_work_sync(&time_wq->wk.work); 705 destroy_workqueue(time_wq->wq); 706 } 707 } 708 709 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct) 710 { 711 struct octeon_device *other_oct; 712 713 other_oct = lio_get_device(oct->octeon_id + 1); 714 715 if (other_oct && other_oct->pci_dev) { 716 int oct_busnum, other_oct_busnum; 717 718 oct_busnum = oct->pci_dev->bus->number; 719 other_oct_busnum = other_oct->pci_dev->bus->number; 720 721 if (oct_busnum == other_oct_busnum) { 722 int oct_slot, other_oct_slot; 723 724 oct_slot = PCI_SLOT(oct->pci_dev->devfn); 725 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn); 726 727 if (oct_slot == other_oct_slot) 728 return other_oct; 729 } 730 } 731 732 return NULL; 733 } 734 735 static void disable_all_vf_links(struct octeon_device *oct) 736 { 737 struct net_device *netdev; 738 int max_vfs, vf, i; 739 740 if (!oct) 741 return; 742 743 max_vfs = oct->sriov_info.max_vfs; 744 745 for (i = 0; i < oct->ifcount; i++) { 746 netdev = oct->props[i].netdev; 747 if (!netdev) 748 continue; 749 750 for (vf = 0; vf < max_vfs; vf++) 751 liquidio_set_vf_link_state(netdev, vf, 752 IFLA_VF_LINK_STATE_DISABLE); 753 } 754 } 755 756 static int liquidio_watchdog(void *param) 757 { 758 bool err_msg_was_printed[LIO_MAX_CORES]; 759 u16 mask_of_crashed_or_stuck_cores = 0; 760 bool all_vf_links_are_disabled = false; 761 struct octeon_device *oct = param; 762 struct octeon_device *other_oct; 763 #ifdef CONFIG_MODULE_UNLOAD 764 long refcount, vfs_referencing_pf; 765 u64 vfs_mask1, vfs_mask2; 766 #endif 767 int core; 768 769 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed)); 770 771 while (!kthread_should_stop()) { 772 /* sleep for a couple of seconds so that we don't hog the CPU */ 773 set_current_state(TASK_INTERRUPTIBLE); 774 schedule_timeout(msecs_to_jiffies(2000)); 775 776 mask_of_crashed_or_stuck_cores = 777 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2); 778 779 if (!mask_of_crashed_or_stuck_cores) 780 continue; 781 782 WRITE_ONCE(oct->cores_crashed, true); 783 other_oct = get_other_octeon_device(oct); 784 if (other_oct) 785 WRITE_ONCE(other_oct->cores_crashed, true); 786 787 for (core = 0; core < LIO_MAX_CORES; core++) { 788 bool core_crashed_or_got_stuck; 789 790 core_crashed_or_got_stuck = 791 (mask_of_crashed_or_stuck_cores 792 >> core) & 1; 793 794 if (core_crashed_or_got_stuck && 795 !err_msg_was_printed[core]) { 796 dev_err(&oct->pci_dev->dev, 797 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n", 798 core); 799 err_msg_was_printed[core] = true; 800 } 801 } 802 803 if (all_vf_links_are_disabled) 804 continue; 805 806 disable_all_vf_links(oct); 807 disable_all_vf_links(other_oct); 808 all_vf_links_are_disabled = true; 809 810 #ifdef CONFIG_MODULE_UNLOAD 811 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask); 812 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask); 813 814 vfs_referencing_pf = hweight64(vfs_mask1); 815 vfs_referencing_pf += hweight64(vfs_mask2); 816 817 refcount = module_refcount(THIS_MODULE); 818 if (refcount >= vfs_referencing_pf) { 819 while (vfs_referencing_pf) { 820 module_put(THIS_MODULE); 821 vfs_referencing_pf--; 822 } 823 } 824 #endif 825 } 826 827 return 0; 828 } 829 830 /** 831 * \brief PCI probe handler 832 * @param pdev PCI device structure 833 * @param ent unused 834 */ 835 static int 836 liquidio_probe(struct pci_dev *pdev, 837 const struct pci_device_id *ent __attribute__((unused))) 838 { 839 struct octeon_device *oct_dev = NULL; 840 struct handshake *hs; 841 842 oct_dev = octeon_allocate_device(pdev->device, 843 sizeof(struct octeon_device_priv)); 844 if (!oct_dev) { 845 dev_err(&pdev->dev, "Unable to allocate device\n"); 846 return -ENOMEM; 847 } 848 849 if (pdev->device == OCTEON_CN23XX_PF_VID) 850 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; 851 852 /* Enable PTP for 6XXX Device */ 853 if (((pdev->device == OCTEON_CN66XX) || 854 (pdev->device == OCTEON_CN68XX))) 855 oct_dev->ptp_enable = true; 856 else 857 oct_dev->ptp_enable = false; 858 859 dev_info(&pdev->dev, "Initializing device %x:%x.\n", 860 (u32)pdev->vendor, (u32)pdev->device); 861 862 /* Assign octeon_device for this device to the private data area. */ 863 pci_set_drvdata(pdev, oct_dev); 864 865 /* set linux specific device pointer */ 866 oct_dev->pci_dev = (void *)pdev; 867 868 oct_dev->subsystem_id = pdev->subsystem_vendor | 869 (pdev->subsystem_device << 16); 870 871 hs = &handshake[oct_dev->octeon_id]; 872 init_completion(&hs->init); 873 init_completion(&hs->started); 874 hs->pci_dev = pdev; 875 876 if (oct_dev->octeon_id == 0) 877 /* first LiquidIO NIC is detected */ 878 complete(&first_stage); 879 880 if (octeon_device_init(oct_dev)) { 881 complete(&hs->init); 882 liquidio_remove(pdev); 883 return -ENOMEM; 884 } 885 886 if (OCTEON_CN23XX_PF(oct_dev)) { 887 u8 bus, device, function; 888 889 if (atomic_read(oct_dev->adapter_refcount) == 1) { 890 /* Each NIC gets one watchdog kernel thread. The first 891 * PF (of each NIC) that gets pci_driver->probe()'d 892 * creates that thread. 893 */ 894 bus = pdev->bus->number; 895 device = PCI_SLOT(pdev->devfn); 896 function = PCI_FUNC(pdev->devfn); 897 oct_dev->watchdog_task = kthread_create( 898 liquidio_watchdog, oct_dev, 899 "liowd/%02hhx:%02hhx.%hhx", bus, device, function); 900 if (!IS_ERR(oct_dev->watchdog_task)) { 901 wake_up_process(oct_dev->watchdog_task); 902 } else { 903 oct_dev->watchdog_task = NULL; 904 dev_err(&oct_dev->pci_dev->dev, 905 "failed to create kernel_thread\n"); 906 liquidio_remove(pdev); 907 return -1; 908 } 909 } 910 } 911 912 oct_dev->rx_pause = 1; 913 oct_dev->tx_pause = 1; 914 915 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); 916 917 return 0; 918 } 919 920 static bool fw_type_is_auto(void) 921 { 922 return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO, 923 sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0; 924 } 925 926 /** 927 * \brief PCI FLR for each Octeon device. 928 * @param oct octeon device 929 */ 930 static void octeon_pci_flr(struct octeon_device *oct) 931 { 932 int rc; 933 934 pci_save_state(oct->pci_dev); 935 936 pci_cfg_access_lock(oct->pci_dev); 937 938 /* Quiesce the device completely */ 939 pci_write_config_word(oct->pci_dev, PCI_COMMAND, 940 PCI_COMMAND_INTX_DISABLE); 941 942 rc = __pci_reset_function_locked(oct->pci_dev); 943 944 if (rc != 0) 945 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n", 946 rc, oct->pf_num); 947 948 pci_cfg_access_unlock(oct->pci_dev); 949 950 pci_restore_state(oct->pci_dev); 951 } 952 953 /** 954 *\brief Destroy resources associated with octeon device 955 * @param pdev PCI device structure 956 * @param ent unused 957 */ 958 static void octeon_destroy_resources(struct octeon_device *oct) 959 { 960 int i, refcount; 961 struct msix_entry *msix_entries; 962 struct octeon_device_priv *oct_priv = 963 (struct octeon_device_priv *)oct->priv; 964 965 struct handshake *hs; 966 967 switch (atomic_read(&oct->status)) { 968 case OCT_DEV_RUNNING: 969 case OCT_DEV_CORE_OK: 970 971 /* No more instructions will be forwarded. */ 972 atomic_set(&oct->status, OCT_DEV_IN_RESET); 973 974 oct->app_mode = CVM_DRV_INVALID_APP; 975 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", 976 lio_get_state_string(&oct->status)); 977 978 schedule_timeout_uninterruptible(HZ / 10); 979 980 /* fallthrough */ 981 case OCT_DEV_HOST_OK: 982 983 /* fallthrough */ 984 case OCT_DEV_CONSOLE_INIT_DONE: 985 /* Remove any consoles */ 986 octeon_remove_consoles(oct); 987 988 /* fallthrough */ 989 case OCT_DEV_IO_QUEUES_DONE: 990 if (lio_wait_for_instr_fetch(oct)) 991 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); 992 993 if (wait_for_pending_requests(oct)) 994 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 995 996 /* Disable the input and output queues now. No more packets will 997 * arrive from Octeon, but we should wait for all packet 998 * processing to finish. 999 */ 1000 oct->fn_list.disable_io_queues(oct); 1001 1002 if (lio_wait_for_oq_pkts(oct)) 1003 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); 1004 1005 /* Force all requests waiting to be fetched by OCTEON to 1006 * complete. 1007 */ 1008 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 1009 struct octeon_instr_queue *iq; 1010 1011 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1012 continue; 1013 iq = oct->instr_queue[i]; 1014 1015 if (atomic_read(&iq->instr_pending)) { 1016 spin_lock_bh(&iq->lock); 1017 iq->fill_cnt = 0; 1018 iq->octeon_read_index = iq->host_write_index; 1019 iq->stats.instr_processed += 1020 atomic_read(&iq->instr_pending); 1021 lio_process_iq_request_list(oct, iq, 0); 1022 spin_unlock_bh(&iq->lock); 1023 } 1024 } 1025 1026 lio_process_ordered_list(oct, 1); 1027 octeon_free_sc_done_list(oct); 1028 octeon_free_sc_zombie_list(oct); 1029 1030 /* fallthrough */ 1031 case OCT_DEV_INTR_SET_DONE: 1032 /* Disable interrupts */ 1033 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); 1034 1035 if (oct->msix_on) { 1036 msix_entries = (struct msix_entry *)oct->msix_entries; 1037 for (i = 0; i < oct->num_msix_irqs - 1; i++) { 1038 if (oct->ioq_vector[i].vector) { 1039 /* clear the affinity_cpumask */ 1040 irq_set_affinity_hint( 1041 msix_entries[i].vector, 1042 NULL); 1043 free_irq(msix_entries[i].vector, 1044 &oct->ioq_vector[i]); 1045 oct->ioq_vector[i].vector = 0; 1046 } 1047 } 1048 /* non-iov vector's argument is oct struct */ 1049 free_irq(msix_entries[i].vector, oct); 1050 1051 pci_disable_msix(oct->pci_dev); 1052 kfree(oct->msix_entries); 1053 oct->msix_entries = NULL; 1054 } else { 1055 /* Release the interrupt line */ 1056 free_irq(oct->pci_dev->irq, oct); 1057 1058 if (oct->flags & LIO_FLAG_MSI_ENABLED) 1059 pci_disable_msi(oct->pci_dev); 1060 } 1061 1062 kfree(oct->irq_name_storage); 1063 oct->irq_name_storage = NULL; 1064 1065 /* fallthrough */ 1066 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE: 1067 if (OCTEON_CN23XX_PF(oct)) 1068 octeon_free_ioq_vector(oct); 1069 1070 /* fallthrough */ 1071 case OCT_DEV_MBOX_SETUP_DONE: 1072 if (OCTEON_CN23XX_PF(oct)) 1073 oct->fn_list.free_mbox(oct); 1074 1075 /* fallthrough */ 1076 case OCT_DEV_IN_RESET: 1077 case OCT_DEV_DROQ_INIT_DONE: 1078 /* Wait for any pending operations */ 1079 mdelay(100); 1080 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { 1081 if (!(oct->io_qmask.oq & BIT_ULL(i))) 1082 continue; 1083 octeon_delete_droq(oct, i); 1084 } 1085 1086 /* Force any pending handshakes to complete */ 1087 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 1088 hs = &handshake[i]; 1089 1090 if (hs->pci_dev) { 1091 handshake[oct->octeon_id].init_ok = 0; 1092 complete(&handshake[oct->octeon_id].init); 1093 handshake[oct->octeon_id].started_ok = 0; 1094 complete(&handshake[oct->octeon_id].started); 1095 } 1096 } 1097 1098 /* fallthrough */ 1099 case OCT_DEV_RESP_LIST_INIT_DONE: 1100 octeon_delete_response_list(oct); 1101 1102 /* fallthrough */ 1103 case OCT_DEV_INSTR_QUEUE_INIT_DONE: 1104 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { 1105 if (!(oct->io_qmask.iq & BIT_ULL(i))) 1106 continue; 1107 octeon_delete_instr_queue(oct, i); 1108 } 1109 #ifdef CONFIG_PCI_IOV 1110 if (oct->sriov_info.sriov_enabled) 1111 pci_disable_sriov(oct->pci_dev); 1112 #endif 1113 /* fallthrough */ 1114 case OCT_DEV_SC_BUFF_POOL_INIT_DONE: 1115 octeon_free_sc_buffer_pool(oct); 1116 1117 /* fallthrough */ 1118 case OCT_DEV_DISPATCH_INIT_DONE: 1119 octeon_delete_dispatch_list(oct); 1120 cancel_delayed_work_sync(&oct->nic_poll_work.work); 1121 1122 /* fallthrough */ 1123 case OCT_DEV_PCI_MAP_DONE: 1124 refcount = octeon_deregister_device(oct); 1125 1126 /* Soft reset the octeon device before exiting. 1127 * However, if fw was loaded from card (i.e. autoboot), 1128 * perform an FLR instead. 1129 * Implementation note: only soft-reset the device 1130 * if it is a CN6XXX OR the LAST CN23XX device. 1131 */ 1132 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED) 1133 octeon_pci_flr(oct); 1134 else if (OCTEON_CN6XXX(oct) || !refcount) 1135 oct->fn_list.soft_reset(oct); 1136 1137 octeon_unmap_pci_barx(oct, 0); 1138 octeon_unmap_pci_barx(oct, 1); 1139 1140 /* fallthrough */ 1141 case OCT_DEV_PCI_ENABLE_DONE: 1142 pci_clear_master(oct->pci_dev); 1143 /* Disable the device, releasing the PCI INT */ 1144 pci_disable_device(oct->pci_dev); 1145 1146 /* fallthrough */ 1147 case OCT_DEV_BEGIN_STATE: 1148 /* Nothing to be done here either */ 1149 break; 1150 } /* end switch (oct->status) */ 1151 1152 tasklet_kill(&oct_priv->droq_tasklet); 1153 } 1154 1155 /** 1156 * \brief Send Rx control command 1157 * @param lio per-network private data 1158 * @param start_stop whether to start or stop 1159 */ 1160 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) 1161 { 1162 struct octeon_soft_command *sc; 1163 union octnet_cmd *ncmd; 1164 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1165 int retval; 1166 1167 if (oct->props[lio->ifidx].rx_on == start_stop) 1168 return; 1169 1170 sc = (struct octeon_soft_command *) 1171 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE, 1172 16, 0); 1173 if (!sc) { 1174 netif_info(lio, rx_err, lio->netdev, 1175 "Failed to allocate octeon_soft_command\n"); 1176 return; 1177 } 1178 1179 ncmd = (union octnet_cmd *)sc->virtdptr; 1180 1181 ncmd->u64 = 0; 1182 ncmd->s.cmd = OCTNET_CMD_RX_CTL; 1183 ncmd->s.param1 = start_stop; 1184 1185 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3)); 1186 1187 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 1188 1189 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 1190 OPCODE_NIC_CMD, 0, 0, 0); 1191 1192 init_completion(&sc->complete); 1193 sc->sc_status = OCTEON_REQUEST_PENDING; 1194 1195 retval = octeon_send_soft_command(oct, sc); 1196 if (retval == IQ_SEND_FAILED) { 1197 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); 1198 octeon_free_soft_command(oct, sc); 1199 return; 1200 } else { 1201 /* Sleep on a wait queue till the cond flag indicates that the 1202 * response arrived or timed-out. 1203 */ 1204 retval = wait_for_sc_completion_timeout(oct, sc, 0); 1205 if (retval) 1206 return; 1207 1208 oct->props[lio->ifidx].rx_on = start_stop; 1209 WRITE_ONCE(sc->caller_is_done, true); 1210 } 1211 } 1212 1213 /** 1214 * \brief Destroy NIC device interface 1215 * @param oct octeon device 1216 * @param ifidx which interface to destroy 1217 * 1218 * Cleanup associated with each interface for an Octeon device when NIC 1219 * module is being unloaded or if initialization fails during load. 1220 */ 1221 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) 1222 { 1223 struct net_device *netdev = oct->props[ifidx].netdev; 1224 struct octeon_device_priv *oct_priv = 1225 (struct octeon_device_priv *)oct->priv; 1226 struct napi_struct *napi, *n; 1227 struct lio *lio; 1228 1229 if (!netdev) { 1230 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", 1231 __func__, ifidx); 1232 return; 1233 } 1234 1235 lio = GET_LIO(netdev); 1236 1237 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); 1238 1239 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) 1240 liquidio_stop(netdev); 1241 1242 if (oct->props[lio->ifidx].napi_enabled == 1) { 1243 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1244 napi_disable(napi); 1245 1246 oct->props[lio->ifidx].napi_enabled = 0; 1247 1248 if (OCTEON_CN23XX_PF(oct)) 1249 oct->droq[0]->ops.poll_mode = 0; 1250 } 1251 1252 /* Delete NAPI */ 1253 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1254 netif_napi_del(napi); 1255 1256 tasklet_enable(&oct_priv->droq_tasklet); 1257 1258 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) 1259 unregister_netdev(netdev); 1260 1261 cleanup_sync_octeon_time_wq(netdev); 1262 cleanup_link_status_change_wq(netdev); 1263 1264 cleanup_rx_oom_poll_fn(netdev); 1265 1266 lio_delete_glists(lio); 1267 1268 free_netdev(netdev); 1269 1270 oct->props[ifidx].gmxport = -1; 1271 1272 oct->props[ifidx].netdev = NULL; 1273 } 1274 1275 /** 1276 * \brief Stop complete NIC functionality 1277 * @param oct octeon device 1278 */ 1279 static int liquidio_stop_nic_module(struct octeon_device *oct) 1280 { 1281 int i, j; 1282 struct lio *lio; 1283 1284 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); 1285 if (!oct->ifcount) { 1286 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); 1287 return 1; 1288 } 1289 1290 spin_lock_bh(&oct->cmd_resp_wqlock); 1291 oct->cmd_resp_state = OCT_DRV_OFFLINE; 1292 spin_unlock_bh(&oct->cmd_resp_wqlock); 1293 1294 lio_vf_rep_destroy(oct); 1295 1296 for (i = 0; i < oct->ifcount; i++) { 1297 lio = GET_LIO(oct->props[i].netdev); 1298 for (j = 0; j < oct->num_oqs; j++) 1299 octeon_unregister_droq_ops(oct, 1300 lio->linfo.rxpciq[j].s.q_no); 1301 } 1302 1303 for (i = 0; i < oct->ifcount; i++) 1304 liquidio_destroy_nic_device(oct, i); 1305 1306 if (oct->devlink) { 1307 devlink_unregister(oct->devlink); 1308 devlink_free(oct->devlink); 1309 oct->devlink = NULL; 1310 } 1311 1312 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); 1313 return 0; 1314 } 1315 1316 /** 1317 * \brief Cleans up resources at unload time 1318 * @param pdev PCI device structure 1319 */ 1320 static void liquidio_remove(struct pci_dev *pdev) 1321 { 1322 struct octeon_device *oct_dev = pci_get_drvdata(pdev); 1323 1324 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); 1325 1326 if (oct_dev->watchdog_task) 1327 kthread_stop(oct_dev->watchdog_task); 1328 1329 if (!oct_dev->octeon_id && 1330 oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) 1331 lio_vf_rep_modexit(); 1332 1333 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP)) 1334 liquidio_stop_nic_module(oct_dev); 1335 1336 /* Reset the octeon device and cleanup all memory allocated for 1337 * the octeon device by driver. 1338 */ 1339 octeon_destroy_resources(oct_dev); 1340 1341 dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); 1342 1343 /* This octeon device has been removed. Update the global 1344 * data structure to reflect this. Free the device structure. 1345 */ 1346 octeon_free_device_mem(oct_dev); 1347 } 1348 1349 /** 1350 * \brief Identify the Octeon device and to map the BAR address space 1351 * @param oct octeon device 1352 */ 1353 static int octeon_chip_specific_setup(struct octeon_device *oct) 1354 { 1355 u32 dev_id, rev_id; 1356 int ret = 1; 1357 1358 pci_read_config_dword(oct->pci_dev, 0, &dev_id); 1359 pci_read_config_dword(oct->pci_dev, 8, &rev_id); 1360 oct->rev_id = rev_id & 0xff; 1361 1362 switch (dev_id) { 1363 case OCTEON_CN68XX_PCIID: 1364 oct->chip_id = OCTEON_CN68XX; 1365 ret = lio_setup_cn68xx_octeon_device(oct); 1366 break; 1367 1368 case OCTEON_CN66XX_PCIID: 1369 oct->chip_id = OCTEON_CN66XX; 1370 ret = lio_setup_cn66xx_octeon_device(oct); 1371 break; 1372 1373 case OCTEON_CN23XX_PCIID_PF: 1374 oct->chip_id = OCTEON_CN23XX_PF_VID; 1375 ret = setup_cn23xx_octeon_pf_device(oct); 1376 if (ret) 1377 break; 1378 #ifdef CONFIG_PCI_IOV 1379 if (!ret) 1380 pci_sriov_set_totalvfs(oct->pci_dev, 1381 oct->sriov_info.max_vfs); 1382 #endif 1383 break; 1384 1385 default: 1386 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", 1387 dev_id); 1388 } 1389 1390 return ret; 1391 } 1392 1393 /** 1394 * \brief PCI initialization for each Octeon device. 1395 * @param oct octeon device 1396 */ 1397 static int octeon_pci_os_setup(struct octeon_device *oct) 1398 { 1399 /* setup PCI stuff first */ 1400 if (pci_enable_device(oct->pci_dev)) { 1401 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); 1402 return 1; 1403 } 1404 1405 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { 1406 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); 1407 pci_disable_device(oct->pci_dev); 1408 return 1; 1409 } 1410 1411 /* Enable PCI DMA Master. */ 1412 pci_set_master(oct->pci_dev); 1413 1414 return 0; 1415 } 1416 1417 /** 1418 * \brief Unmap and free network buffer 1419 * @param buf buffer 1420 */ 1421 static void free_netbuf(void *buf) 1422 { 1423 struct sk_buff *skb; 1424 struct octnet_buf_free_info *finfo; 1425 struct lio *lio; 1426 1427 finfo = (struct octnet_buf_free_info *)buf; 1428 skb = finfo->skb; 1429 lio = finfo->lio; 1430 1431 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, 1432 DMA_TO_DEVICE); 1433 1434 tx_buffer_free(skb); 1435 } 1436 1437 /** 1438 * \brief Unmap and free gather buffer 1439 * @param buf buffer 1440 */ 1441 static void free_netsgbuf(void *buf) 1442 { 1443 struct octnet_buf_free_info *finfo; 1444 struct sk_buff *skb; 1445 struct lio *lio; 1446 struct octnic_gather *g; 1447 int i, frags, iq; 1448 1449 finfo = (struct octnet_buf_free_info *)buf; 1450 skb = finfo->skb; 1451 lio = finfo->lio; 1452 g = finfo->g; 1453 frags = skb_shinfo(skb)->nr_frags; 1454 1455 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1456 g->sg[0].ptr[0], (skb->len - skb->data_len), 1457 DMA_TO_DEVICE); 1458 1459 i = 1; 1460 while (frags--) { 1461 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 1462 1463 pci_unmap_page((lio->oct_dev)->pci_dev, 1464 g->sg[(i >> 2)].ptr[(i & 3)], 1465 skb_frag_size(frag), DMA_TO_DEVICE); 1466 i++; 1467 } 1468 1469 iq = skb_iq(lio->oct_dev, skb); 1470 spin_lock(&lio->glist_lock[iq]); 1471 list_add_tail(&g->list, &lio->glist[iq]); 1472 spin_unlock(&lio->glist_lock[iq]); 1473 1474 tx_buffer_free(skb); 1475 } 1476 1477 /** 1478 * \brief Unmap and free gather buffer with response 1479 * @param buf buffer 1480 */ 1481 static void free_netsgbuf_with_resp(void *buf) 1482 { 1483 struct octeon_soft_command *sc; 1484 struct octnet_buf_free_info *finfo; 1485 struct sk_buff *skb; 1486 struct lio *lio; 1487 struct octnic_gather *g; 1488 int i, frags, iq; 1489 1490 sc = (struct octeon_soft_command *)buf; 1491 skb = (struct sk_buff *)sc->callback_arg; 1492 finfo = (struct octnet_buf_free_info *)&skb->cb; 1493 1494 lio = finfo->lio; 1495 g = finfo->g; 1496 frags = skb_shinfo(skb)->nr_frags; 1497 1498 dma_unmap_single(&lio->oct_dev->pci_dev->dev, 1499 g->sg[0].ptr[0], (skb->len - skb->data_len), 1500 DMA_TO_DEVICE); 1501 1502 i = 1; 1503 while (frags--) { 1504 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 1505 1506 pci_unmap_page((lio->oct_dev)->pci_dev, 1507 g->sg[(i >> 2)].ptr[(i & 3)], 1508 skb_frag_size(frag), DMA_TO_DEVICE); 1509 i++; 1510 } 1511 1512 iq = skb_iq(lio->oct_dev, skb); 1513 1514 spin_lock(&lio->glist_lock[iq]); 1515 list_add_tail(&g->list, &lio->glist[iq]); 1516 spin_unlock(&lio->glist_lock[iq]); 1517 1518 /* Don't free the skb yet */ 1519 } 1520 1521 /** 1522 * \brief Adjust ptp frequency 1523 * @param ptp PTP clock info 1524 * @param ppb how much to adjust by, in parts-per-billion 1525 */ 1526 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) 1527 { 1528 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1529 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1530 u64 comp, delta; 1531 unsigned long flags; 1532 bool neg_adj = false; 1533 1534 if (ppb < 0) { 1535 neg_adj = true; 1536 ppb = -ppb; 1537 } 1538 1539 /* The hardware adds the clock compensation value to the 1540 * PTP clock on every coprocessor clock cycle, so we 1541 * compute the delta in terms of coprocessor clocks. 1542 */ 1543 delta = (u64)ppb << 32; 1544 do_div(delta, oct->coproc_clock_rate); 1545 1546 spin_lock_irqsave(&lio->ptp_lock, flags); 1547 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP); 1548 if (neg_adj) 1549 comp -= delta; 1550 else 1551 comp += delta; 1552 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1553 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1554 1555 return 0; 1556 } 1557 1558 /** 1559 * \brief Adjust ptp time 1560 * @param ptp PTP clock info 1561 * @param delta how much to adjust by, in nanosecs 1562 */ 1563 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 1564 { 1565 unsigned long flags; 1566 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1567 1568 spin_lock_irqsave(&lio->ptp_lock, flags); 1569 lio->ptp_adjust += delta; 1570 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1571 1572 return 0; 1573 } 1574 1575 /** 1576 * \brief Get hardware clock time, including any adjustment 1577 * @param ptp PTP clock info 1578 * @param ts timespec 1579 */ 1580 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, 1581 struct timespec64 *ts) 1582 { 1583 u64 ns; 1584 unsigned long flags; 1585 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1586 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1587 1588 spin_lock_irqsave(&lio->ptp_lock, flags); 1589 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI); 1590 ns += lio->ptp_adjust; 1591 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1592 1593 *ts = ns_to_timespec64(ns); 1594 1595 return 0; 1596 } 1597 1598 /** 1599 * \brief Set hardware clock time. Reset adjustment 1600 * @param ptp PTP clock info 1601 * @param ts timespec 1602 */ 1603 static int liquidio_ptp_settime(struct ptp_clock_info *ptp, 1604 const struct timespec64 *ts) 1605 { 1606 u64 ns; 1607 unsigned long flags; 1608 struct lio *lio = container_of(ptp, struct lio, ptp_info); 1609 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1610 1611 ns = timespec64_to_ns(ts); 1612 1613 spin_lock_irqsave(&lio->ptp_lock, flags); 1614 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); 1615 lio->ptp_adjust = 0; 1616 spin_unlock_irqrestore(&lio->ptp_lock, flags); 1617 1618 return 0; 1619 } 1620 1621 /** 1622 * \brief Check if PTP is enabled 1623 * @param ptp PTP clock info 1624 * @param rq request 1625 * @param on is it on 1626 */ 1627 static int 1628 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)), 1629 struct ptp_clock_request *rq __attribute__((unused)), 1630 int on __attribute__((unused))) 1631 { 1632 return -EOPNOTSUPP; 1633 } 1634 1635 /** 1636 * \brief Open PTP clock source 1637 * @param netdev network device 1638 */ 1639 static void oct_ptp_open(struct net_device *netdev) 1640 { 1641 struct lio *lio = GET_LIO(netdev); 1642 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; 1643 1644 spin_lock_init(&lio->ptp_lock); 1645 1646 snprintf(lio->ptp_info.name, 16, "%s", netdev->name); 1647 lio->ptp_info.owner = THIS_MODULE; 1648 lio->ptp_info.max_adj = 250000000; 1649 lio->ptp_info.n_alarm = 0; 1650 lio->ptp_info.n_ext_ts = 0; 1651 lio->ptp_info.n_per_out = 0; 1652 lio->ptp_info.pps = 0; 1653 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq; 1654 lio->ptp_info.adjtime = liquidio_ptp_adjtime; 1655 lio->ptp_info.gettime64 = liquidio_ptp_gettime; 1656 lio->ptp_info.settime64 = liquidio_ptp_settime; 1657 lio->ptp_info.enable = liquidio_ptp_enable; 1658 1659 lio->ptp_adjust = 0; 1660 1661 lio->ptp_clock = ptp_clock_register(&lio->ptp_info, 1662 &oct->pci_dev->dev); 1663 1664 if (IS_ERR(lio->ptp_clock)) 1665 lio->ptp_clock = NULL; 1666 } 1667 1668 /** 1669 * \brief Init PTP clock 1670 * @param oct octeon device 1671 */ 1672 static void liquidio_ptp_init(struct octeon_device *oct) 1673 { 1674 u64 clock_comp, cfg; 1675 1676 clock_comp = (u64)NSEC_PER_SEC << 32; 1677 do_div(clock_comp, oct->coproc_clock_rate); 1678 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP); 1679 1680 /* Enable */ 1681 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG); 1682 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG); 1683 } 1684 1685 /** 1686 * \brief Load firmware to device 1687 * @param oct octeon device 1688 * 1689 * Maps device to firmware filename, requests firmware, and downloads it 1690 */ 1691 static int load_firmware(struct octeon_device *oct) 1692 { 1693 int ret = 0; 1694 const struct firmware *fw; 1695 char fw_name[LIO_MAX_FW_FILENAME_LEN]; 1696 char *tmp_fw_type; 1697 1698 if (fw_type_is_auto()) { 1699 tmp_fw_type = LIO_FW_NAME_TYPE_NIC; 1700 strncpy(fw_type, tmp_fw_type, sizeof(fw_type)); 1701 } else { 1702 tmp_fw_type = fw_type; 1703 } 1704 1705 sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME, 1706 octeon_get_conf(oct)->card_name, tmp_fw_type, 1707 LIO_FW_NAME_SUFFIX); 1708 1709 ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); 1710 if (ret) { 1711 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n", 1712 fw_name); 1713 release_firmware(fw); 1714 return ret; 1715 } 1716 1717 ret = octeon_download_firmware(oct, fw->data, fw->size); 1718 1719 release_firmware(fw); 1720 1721 return ret; 1722 } 1723 1724 /** 1725 * \brief Poll routine for checking transmit queue status 1726 * @param work work_struct data structure 1727 */ 1728 static void octnet_poll_check_txq_status(struct work_struct *work) 1729 { 1730 struct cavium_wk *wk = (struct cavium_wk *)work; 1731 struct lio *lio = (struct lio *)wk->ctxptr; 1732 1733 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) 1734 return; 1735 1736 check_txq_status(lio); 1737 queue_delayed_work(lio->txq_status_wq.wq, 1738 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 1739 } 1740 1741 /** 1742 * \brief Sets up the txq poll check 1743 * @param netdev network device 1744 */ 1745 static inline int setup_tx_poll_fn(struct net_device *netdev) 1746 { 1747 struct lio *lio = GET_LIO(netdev); 1748 struct octeon_device *oct = lio->oct_dev; 1749 1750 lio->txq_status_wq.wq = alloc_workqueue("txq-status", 1751 WQ_MEM_RECLAIM, 0); 1752 if (!lio->txq_status_wq.wq) { 1753 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); 1754 return -1; 1755 } 1756 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work, 1757 octnet_poll_check_txq_status); 1758 lio->txq_status_wq.wk.ctxptr = lio; 1759 queue_delayed_work(lio->txq_status_wq.wq, 1760 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); 1761 return 0; 1762 } 1763 1764 static inline void cleanup_tx_poll_fn(struct net_device *netdev) 1765 { 1766 struct lio *lio = GET_LIO(netdev); 1767 1768 if (lio->txq_status_wq.wq) { 1769 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); 1770 destroy_workqueue(lio->txq_status_wq.wq); 1771 } 1772 } 1773 1774 /** 1775 * \brief Net device open for LiquidIO 1776 * @param netdev network device 1777 */ 1778 static int liquidio_open(struct net_device *netdev) 1779 { 1780 struct lio *lio = GET_LIO(netdev); 1781 struct octeon_device *oct = lio->oct_dev; 1782 struct octeon_device_priv *oct_priv = 1783 (struct octeon_device_priv *)oct->priv; 1784 struct napi_struct *napi, *n; 1785 1786 if (oct->props[lio->ifidx].napi_enabled == 0) { 1787 tasklet_disable(&oct_priv->droq_tasklet); 1788 1789 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1790 napi_enable(napi); 1791 1792 oct->props[lio->ifidx].napi_enabled = 1; 1793 1794 if (OCTEON_CN23XX_PF(oct)) 1795 oct->droq[0]->ops.poll_mode = 1; 1796 } 1797 1798 if (oct->ptp_enable) 1799 oct_ptp_open(netdev); 1800 1801 ifstate_set(lio, LIO_IFSTATE_RUNNING); 1802 1803 if (OCTEON_CN23XX_PF(oct)) { 1804 if (!oct->msix_on) 1805 if (setup_tx_poll_fn(netdev)) 1806 return -1; 1807 } else { 1808 if (setup_tx_poll_fn(netdev)) 1809 return -1; 1810 } 1811 1812 netif_tx_start_all_queues(netdev); 1813 1814 /* Ready for link status updates */ 1815 lio->intf_open = 1; 1816 1817 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); 1818 1819 /* tell Octeon to start forwarding packets to host */ 1820 send_rx_ctrl_cmd(lio, 1); 1821 1822 /* start periodical statistics fetch */ 1823 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); 1824 lio->stats_wk.ctxptr = lio; 1825 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies 1826 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS)); 1827 1828 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", 1829 netdev->name); 1830 1831 return 0; 1832 } 1833 1834 /** 1835 * \brief Net device stop for LiquidIO 1836 * @param netdev network device 1837 */ 1838 static int liquidio_stop(struct net_device *netdev) 1839 { 1840 struct lio *lio = GET_LIO(netdev); 1841 struct octeon_device *oct = lio->oct_dev; 1842 struct octeon_device_priv *oct_priv = 1843 (struct octeon_device_priv *)oct->priv; 1844 struct napi_struct *napi, *n; 1845 1846 ifstate_reset(lio, LIO_IFSTATE_RUNNING); 1847 1848 /* Stop any link updates */ 1849 lio->intf_open = 0; 1850 1851 stop_txqs(netdev); 1852 1853 /* Inform that netif carrier is down */ 1854 netif_carrier_off(netdev); 1855 netif_tx_disable(netdev); 1856 1857 lio->linfo.link.s.link_up = 0; 1858 lio->link_changes++; 1859 1860 /* Tell Octeon that nic interface is down. */ 1861 send_rx_ctrl_cmd(lio, 0); 1862 1863 if (OCTEON_CN23XX_PF(oct)) { 1864 if (!oct->msix_on) 1865 cleanup_tx_poll_fn(netdev); 1866 } else { 1867 cleanup_tx_poll_fn(netdev); 1868 } 1869 1870 cancel_delayed_work_sync(&lio->stats_wk.work); 1871 1872 if (lio->ptp_clock) { 1873 ptp_clock_unregister(lio->ptp_clock); 1874 lio->ptp_clock = NULL; 1875 } 1876 1877 /* Wait for any pending Rx descriptors */ 1878 if (lio_wait_for_clean_oq(oct)) 1879 netif_info(lio, rx_err, lio->netdev, 1880 "Proceeding with stop interface after partial RX desc processing\n"); 1881 1882 if (oct->props[lio->ifidx].napi_enabled == 1) { 1883 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) 1884 napi_disable(napi); 1885 1886 oct->props[lio->ifidx].napi_enabled = 0; 1887 1888 if (OCTEON_CN23XX_PF(oct)) 1889 oct->droq[0]->ops.poll_mode = 0; 1890 1891 tasklet_enable(&oct_priv->droq_tasklet); 1892 } 1893 1894 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); 1895 1896 return 0; 1897 } 1898 1899 /** 1900 * \brief Converts a mask based on net device flags 1901 * @param netdev network device 1902 * 1903 * This routine generates a octnet_ifflags mask from the net device flags 1904 * received from the OS. 1905 */ 1906 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev) 1907 { 1908 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; 1909 1910 if (netdev->flags & IFF_PROMISC) 1911 f |= OCTNET_IFFLAG_PROMISC; 1912 1913 if (netdev->flags & IFF_ALLMULTI) 1914 f |= OCTNET_IFFLAG_ALLMULTI; 1915 1916 if (netdev->flags & IFF_MULTICAST) { 1917 f |= OCTNET_IFFLAG_MULTICAST; 1918 1919 /* Accept all multicast addresses if there are more than we 1920 * can handle 1921 */ 1922 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) 1923 f |= OCTNET_IFFLAG_ALLMULTI; 1924 } 1925 1926 if (netdev->flags & IFF_BROADCAST) 1927 f |= OCTNET_IFFLAG_BROADCAST; 1928 1929 return f; 1930 } 1931 1932 /** 1933 * \brief Net device set_multicast_list 1934 * @param netdev network device 1935 */ 1936 static void liquidio_set_mcast_list(struct net_device *netdev) 1937 { 1938 struct lio *lio = GET_LIO(netdev); 1939 struct octeon_device *oct = lio->oct_dev; 1940 struct octnic_ctrl_pkt nctrl; 1941 struct netdev_hw_addr *ha; 1942 u64 *mc; 1943 int ret; 1944 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); 1945 1946 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1947 1948 /* Create a ctrl pkt command to be sent to core app. */ 1949 nctrl.ncmd.u64 = 0; 1950 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; 1951 nctrl.ncmd.s.param1 = get_new_flags(netdev); 1952 nctrl.ncmd.s.param2 = mc_count; 1953 nctrl.ncmd.s.more = mc_count; 1954 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 1955 nctrl.netpndev = (u64)netdev; 1956 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 1957 1958 /* copy all the addresses into the udd */ 1959 mc = &nctrl.udd[0]; 1960 netdev_for_each_mc_addr(ha, netdev) { 1961 *mc = 0; 1962 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN); 1963 /* no need to swap bytes */ 1964 1965 if (++mc > &nctrl.udd[mc_count]) 1966 break; 1967 } 1968 1969 /* Apparently, any activity in this call from the kernel has to 1970 * be atomic. So we won't wait for response. 1971 */ 1972 1973 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 1974 if (ret) { 1975 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", 1976 ret); 1977 } 1978 } 1979 1980 /** 1981 * \brief Net device set_mac_address 1982 * @param netdev network device 1983 */ 1984 static int liquidio_set_mac(struct net_device *netdev, void *p) 1985 { 1986 int ret = 0; 1987 struct lio *lio = GET_LIO(netdev); 1988 struct octeon_device *oct = lio->oct_dev; 1989 struct sockaddr *addr = (struct sockaddr *)p; 1990 struct octnic_ctrl_pkt nctrl; 1991 1992 if (!is_valid_ether_addr(addr->sa_data)) 1993 return -EADDRNOTAVAIL; 1994 1995 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 1996 1997 nctrl.ncmd.u64 = 0; 1998 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 1999 nctrl.ncmd.s.param1 = 0; 2000 nctrl.ncmd.s.more = 1; 2001 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2002 nctrl.netpndev = (u64)netdev; 2003 2004 nctrl.udd[0] = 0; 2005 /* The MAC Address is presented in network byte order. */ 2006 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); 2007 2008 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2009 if (ret < 0) { 2010 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); 2011 return -ENOMEM; 2012 } 2013 2014 if (nctrl.sc_status) { 2015 dev_err(&oct->pci_dev->dev, 2016 "%s: MAC Address change failed. sc return=%x\n", 2017 __func__, nctrl.sc_status); 2018 return -EIO; 2019 } 2020 2021 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2022 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); 2023 2024 return 0; 2025 } 2026 2027 static void 2028 liquidio_get_stats64(struct net_device *netdev, 2029 struct rtnl_link_stats64 *lstats) 2030 { 2031 struct lio *lio = GET_LIO(netdev); 2032 struct octeon_device *oct; 2033 u64 pkts = 0, drop = 0, bytes = 0; 2034 struct oct_droq_stats *oq_stats; 2035 struct oct_iq_stats *iq_stats; 2036 int i, iq_no, oq_no; 2037 2038 oct = lio->oct_dev; 2039 2040 if (ifstate_check(lio, LIO_IFSTATE_RESETTING)) 2041 return; 2042 2043 for (i = 0; i < oct->num_iqs; i++) { 2044 iq_no = lio->linfo.txpciq[i].s.q_no; 2045 iq_stats = &oct->instr_queue[iq_no]->stats; 2046 pkts += iq_stats->tx_done; 2047 drop += iq_stats->tx_dropped; 2048 bytes += iq_stats->tx_tot_bytes; 2049 } 2050 2051 lstats->tx_packets = pkts; 2052 lstats->tx_bytes = bytes; 2053 lstats->tx_dropped = drop; 2054 2055 pkts = 0; 2056 drop = 0; 2057 bytes = 0; 2058 2059 for (i = 0; i < oct->num_oqs; i++) { 2060 oq_no = lio->linfo.rxpciq[i].s.q_no; 2061 oq_stats = &oct->droq[oq_no]->stats; 2062 pkts += oq_stats->rx_pkts_received; 2063 drop += (oq_stats->rx_dropped + 2064 oq_stats->dropped_nodispatch + 2065 oq_stats->dropped_toomany + 2066 oq_stats->dropped_nomem); 2067 bytes += oq_stats->rx_bytes_received; 2068 } 2069 2070 lstats->rx_bytes = bytes; 2071 lstats->rx_packets = pkts; 2072 lstats->rx_dropped = drop; 2073 2074 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast; 2075 lstats->collisions = oct->link_stats.fromhost.total_collisions; 2076 2077 /* detailed rx_errors: */ 2078 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err; 2079 /* recved pkt with crc error */ 2080 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err; 2081 /* recv'd frame alignment error */ 2082 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err; 2083 /* recv'r fifo overrun */ 2084 lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err; 2085 2086 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors + 2087 lstats->rx_frame_errors + lstats->rx_fifo_errors; 2088 2089 /* detailed tx_errors */ 2090 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko; 2091 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link; 2092 lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err; 2093 2094 lstats->tx_errors = lstats->tx_aborted_errors + 2095 lstats->tx_carrier_errors + 2096 lstats->tx_fifo_errors; 2097 } 2098 2099 /** 2100 * \brief Handler for SIOCSHWTSTAMP ioctl 2101 * @param netdev network device 2102 * @param ifr interface request 2103 * @param cmd command 2104 */ 2105 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) 2106 { 2107 struct hwtstamp_config conf; 2108 struct lio *lio = GET_LIO(netdev); 2109 2110 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) 2111 return -EFAULT; 2112 2113 if (conf.flags) 2114 return -EINVAL; 2115 2116 switch (conf.tx_type) { 2117 case HWTSTAMP_TX_ON: 2118 case HWTSTAMP_TX_OFF: 2119 break; 2120 default: 2121 return -ERANGE; 2122 } 2123 2124 switch (conf.rx_filter) { 2125 case HWTSTAMP_FILTER_NONE: 2126 break; 2127 case HWTSTAMP_FILTER_ALL: 2128 case HWTSTAMP_FILTER_SOME: 2129 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 2130 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 2131 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 2132 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 2133 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 2134 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 2135 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 2136 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 2137 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 2138 case HWTSTAMP_FILTER_PTP_V2_EVENT: 2139 case HWTSTAMP_FILTER_PTP_V2_SYNC: 2140 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 2141 case HWTSTAMP_FILTER_NTP_ALL: 2142 conf.rx_filter = HWTSTAMP_FILTER_ALL; 2143 break; 2144 default: 2145 return -ERANGE; 2146 } 2147 2148 if (conf.rx_filter == HWTSTAMP_FILTER_ALL) 2149 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2150 2151 else 2152 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); 2153 2154 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; 2155 } 2156 2157 /** 2158 * \brief ioctl handler 2159 * @param netdev network device 2160 * @param ifr interface request 2161 * @param cmd command 2162 */ 2163 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2164 { 2165 struct lio *lio = GET_LIO(netdev); 2166 2167 switch (cmd) { 2168 case SIOCSHWTSTAMP: 2169 if (lio->oct_dev->ptp_enable) 2170 return hwtstamp_ioctl(netdev, ifr); 2171 /* fall through */ 2172 default: 2173 return -EOPNOTSUPP; 2174 } 2175 } 2176 2177 /** 2178 * \brief handle a Tx timestamp response 2179 * @param status response status 2180 * @param buf pointer to skb 2181 */ 2182 static void handle_timestamp(struct octeon_device *oct, 2183 u32 status, 2184 void *buf) 2185 { 2186 struct octnet_buf_free_info *finfo; 2187 struct octeon_soft_command *sc; 2188 struct oct_timestamp_resp *resp; 2189 struct lio *lio; 2190 struct sk_buff *skb = (struct sk_buff *)buf; 2191 2192 finfo = (struct octnet_buf_free_info *)skb->cb; 2193 lio = finfo->lio; 2194 sc = finfo->sc; 2195 oct = lio->oct_dev; 2196 resp = (struct oct_timestamp_resp *)sc->virtrptr; 2197 2198 if (status != OCTEON_REQUEST_DONE) { 2199 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", 2200 CVM_CAST64(status)); 2201 resp->timestamp = 0; 2202 } 2203 2204 octeon_swap_8B_data(&resp->timestamp, 1); 2205 2206 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) { 2207 struct skb_shared_hwtstamps ts; 2208 u64 ns = resp->timestamp; 2209 2210 netif_info(lio, tx_done, lio->netdev, 2211 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", 2212 skb, (unsigned long long)ns); 2213 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); 2214 skb_tstamp_tx(skb, &ts); 2215 } 2216 2217 octeon_free_soft_command(oct, sc); 2218 tx_buffer_free(skb); 2219 } 2220 2221 /* \brief Send a data packet that will be timestamped 2222 * @param oct octeon device 2223 * @param ndata pointer to network data 2224 * @param finfo pointer to private network data 2225 */ 2226 static inline int send_nic_timestamp_pkt(struct octeon_device *oct, 2227 struct octnic_data_pkt *ndata, 2228 struct octnet_buf_free_info *finfo, 2229 int xmit_more) 2230 { 2231 int retval; 2232 struct octeon_soft_command *sc; 2233 struct lio *lio; 2234 int ring_doorbell; 2235 u32 len; 2236 2237 lio = finfo->lio; 2238 2239 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, 2240 sizeof(struct oct_timestamp_resp)); 2241 finfo->sc = sc; 2242 2243 if (!sc) { 2244 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); 2245 return IQ_SEND_FAILED; 2246 } 2247 2248 if (ndata->reqtype == REQTYPE_NORESP_NET) 2249 ndata->reqtype = REQTYPE_RESP_NET; 2250 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) 2251 ndata->reqtype = REQTYPE_RESP_NET_SG; 2252 2253 sc->callback = handle_timestamp; 2254 sc->callback_arg = finfo->skb; 2255 sc->iq_no = ndata->q_no; 2256 2257 if (OCTEON_CN23XX_PF(oct)) 2258 len = (u32)((struct octeon_instr_ih3 *) 2259 (&sc->cmd.cmd3.ih3))->dlengsz; 2260 else 2261 len = (u32)((struct octeon_instr_ih2 *) 2262 (&sc->cmd.cmd2.ih2))->dlengsz; 2263 2264 ring_doorbell = !xmit_more; 2265 2266 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, 2267 sc, len, ndata->reqtype); 2268 2269 if (retval == IQ_SEND_FAILED) { 2270 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", 2271 retval); 2272 octeon_free_soft_command(oct, sc); 2273 } else { 2274 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); 2275 } 2276 2277 return retval; 2278 } 2279 2280 /** \brief Transmit networks packets to the Octeon interface 2281 * @param skbuff skbuff struct to be passed to network layer. 2282 * @param netdev pointer to network device 2283 * @returns whether the packet was transmitted to the device okay or not 2284 * (NETDEV_TX_OK or NETDEV_TX_BUSY) 2285 */ 2286 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) 2287 { 2288 struct lio *lio; 2289 struct octnet_buf_free_info *finfo; 2290 union octnic_cmd_setup cmdsetup; 2291 struct octnic_data_pkt ndata; 2292 struct octeon_device *oct; 2293 struct oct_iq_stats *stats; 2294 struct octeon_instr_irh *irh; 2295 union tx_info *tx_info; 2296 int status = 0; 2297 int q_idx = 0, iq_no = 0; 2298 int j, xmit_more = 0; 2299 u64 dptr = 0; 2300 u32 tag = 0; 2301 2302 lio = GET_LIO(netdev); 2303 oct = lio->oct_dev; 2304 2305 q_idx = skb_iq(oct, skb); 2306 tag = q_idx; 2307 iq_no = lio->linfo.txpciq[q_idx].s.q_no; 2308 2309 stats = &oct->instr_queue[iq_no]->stats; 2310 2311 /* Check for all conditions in which the current packet cannot be 2312 * transmitted. 2313 */ 2314 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || 2315 (!lio->linfo.link.s.link_up) || 2316 (skb->len <= 0)) { 2317 netif_info(lio, tx_err, lio->netdev, 2318 "Transmit failed link_status : %d\n", 2319 lio->linfo.link.s.link_up); 2320 goto lio_xmit_failed; 2321 } 2322 2323 /* Use space in skb->cb to store info used to unmap and 2324 * free the buffers. 2325 */ 2326 finfo = (struct octnet_buf_free_info *)skb->cb; 2327 finfo->lio = lio; 2328 finfo->skb = skb; 2329 finfo->sc = NULL; 2330 2331 /* Prepare the attributes for the data to be passed to OSI. */ 2332 memset(&ndata, 0, sizeof(struct octnic_data_pkt)); 2333 2334 ndata.buf = (void *)finfo; 2335 2336 ndata.q_no = iq_no; 2337 2338 if (octnet_iq_is_full(oct, ndata.q_no)) { 2339 /* defer sending if queue is full */ 2340 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", 2341 ndata.q_no); 2342 stats->tx_iq_busy++; 2343 return NETDEV_TX_BUSY; 2344 } 2345 2346 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", 2347 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no); 2348 */ 2349 2350 ndata.datasize = skb->len; 2351 2352 cmdsetup.u64 = 0; 2353 cmdsetup.s.iq_no = iq_no; 2354 2355 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2356 if (skb->encapsulation) { 2357 cmdsetup.s.tnl_csum = 1; 2358 stats->tx_vxlan++; 2359 } else { 2360 cmdsetup.s.transport_csum = 1; 2361 } 2362 } 2363 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 2364 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2365 cmdsetup.s.timestamp = 1; 2366 } 2367 2368 if (skb_shinfo(skb)->nr_frags == 0) { 2369 cmdsetup.s.u.datasize = skb->len; 2370 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2371 2372 /* Offload checksum calculation for TCP/UDP packets */ 2373 dptr = dma_map_single(&oct->pci_dev->dev, 2374 skb->data, 2375 skb->len, 2376 DMA_TO_DEVICE); 2377 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) { 2378 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", 2379 __func__); 2380 stats->tx_dmamap_fail++; 2381 return NETDEV_TX_BUSY; 2382 } 2383 2384 if (OCTEON_CN23XX_PF(oct)) 2385 ndata.cmd.cmd3.dptr = dptr; 2386 else 2387 ndata.cmd.cmd2.dptr = dptr; 2388 finfo->dptr = dptr; 2389 ndata.reqtype = REQTYPE_NORESP_NET; 2390 2391 } else { 2392 int i, frags; 2393 skb_frag_t *frag; 2394 struct octnic_gather *g; 2395 2396 spin_lock(&lio->glist_lock[q_idx]); 2397 g = (struct octnic_gather *) 2398 lio_list_delete_head(&lio->glist[q_idx]); 2399 spin_unlock(&lio->glist_lock[q_idx]); 2400 2401 if (!g) { 2402 netif_info(lio, tx_err, lio->netdev, 2403 "Transmit scatter gather: glist null!\n"); 2404 goto lio_xmit_failed; 2405 } 2406 2407 cmdsetup.s.gather = 1; 2408 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); 2409 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag); 2410 2411 memset(g->sg, 0, g->sg_size); 2412 2413 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, 2414 skb->data, 2415 (skb->len - skb->data_len), 2416 DMA_TO_DEVICE); 2417 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { 2418 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", 2419 __func__); 2420 stats->tx_dmamap_fail++; 2421 return NETDEV_TX_BUSY; 2422 } 2423 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); 2424 2425 frags = skb_shinfo(skb)->nr_frags; 2426 i = 1; 2427 while (frags--) { 2428 frag = &skb_shinfo(skb)->frags[i - 1]; 2429 2430 g->sg[(i >> 2)].ptr[(i & 3)] = 2431 skb_frag_dma_map(&oct->pci_dev->dev, 2432 frag, 0, skb_frag_size(frag), 2433 DMA_TO_DEVICE); 2434 2435 if (dma_mapping_error(&oct->pci_dev->dev, 2436 g->sg[i >> 2].ptr[i & 3])) { 2437 dma_unmap_single(&oct->pci_dev->dev, 2438 g->sg[0].ptr[0], 2439 skb->len - skb->data_len, 2440 DMA_TO_DEVICE); 2441 for (j = 1; j < i; j++) { 2442 frag = &skb_shinfo(skb)->frags[j - 1]; 2443 dma_unmap_page(&oct->pci_dev->dev, 2444 g->sg[j >> 2].ptr[j & 3], 2445 skb_frag_size(frag), 2446 DMA_TO_DEVICE); 2447 } 2448 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", 2449 __func__); 2450 return NETDEV_TX_BUSY; 2451 } 2452 2453 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag), 2454 (i & 3)); 2455 i++; 2456 } 2457 2458 dptr = g->sg_dma_ptr; 2459 2460 if (OCTEON_CN23XX_PF(oct)) 2461 ndata.cmd.cmd3.dptr = dptr; 2462 else 2463 ndata.cmd.cmd2.dptr = dptr; 2464 finfo->dptr = dptr; 2465 finfo->g = g; 2466 2467 ndata.reqtype = REQTYPE_NORESP_NET_SG; 2468 } 2469 2470 if (OCTEON_CN23XX_PF(oct)) { 2471 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh; 2472 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0]; 2473 } else { 2474 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh; 2475 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0]; 2476 } 2477 2478 if (skb_shinfo(skb)->gso_size) { 2479 tx_info->s.gso_size = skb_shinfo(skb)->gso_size; 2480 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; 2481 stats->tx_gso++; 2482 } 2483 2484 /* HW insert VLAN tag */ 2485 if (skb_vlan_tag_present(skb)) { 2486 irh->priority = skb_vlan_tag_get(skb) >> 13; 2487 irh->vlan = skb_vlan_tag_get(skb) & 0xfff; 2488 } 2489 2490 xmit_more = netdev_xmit_more(); 2491 2492 if (unlikely(cmdsetup.s.timestamp)) 2493 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); 2494 else 2495 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); 2496 if (status == IQ_SEND_FAILED) 2497 goto lio_xmit_failed; 2498 2499 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); 2500 2501 if (status == IQ_SEND_STOP) 2502 netif_stop_subqueue(netdev, q_idx); 2503 2504 netif_trans_update(netdev); 2505 2506 if (tx_info->s.gso_segs) 2507 stats->tx_done += tx_info->s.gso_segs; 2508 else 2509 stats->tx_done++; 2510 stats->tx_tot_bytes += ndata.datasize; 2511 2512 return NETDEV_TX_OK; 2513 2514 lio_xmit_failed: 2515 stats->tx_dropped++; 2516 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", 2517 iq_no, stats->tx_dropped); 2518 if (dptr) 2519 dma_unmap_single(&oct->pci_dev->dev, dptr, 2520 ndata.datasize, DMA_TO_DEVICE); 2521 2522 octeon_ring_doorbell_locked(oct, iq_no); 2523 2524 tx_buffer_free(skb); 2525 return NETDEV_TX_OK; 2526 } 2527 2528 /** \brief Network device Tx timeout 2529 * @param netdev pointer to network device 2530 */ 2531 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue) 2532 { 2533 struct lio *lio; 2534 2535 lio = GET_LIO(netdev); 2536 2537 netif_info(lio, tx_err, lio->netdev, 2538 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", 2539 netdev->stats.tx_dropped); 2540 netif_trans_update(netdev); 2541 wake_txqs(netdev); 2542 } 2543 2544 static int liquidio_vlan_rx_add_vid(struct net_device *netdev, 2545 __be16 proto __attribute__((unused)), 2546 u16 vid) 2547 { 2548 struct lio *lio = GET_LIO(netdev); 2549 struct octeon_device *oct = lio->oct_dev; 2550 struct octnic_ctrl_pkt nctrl; 2551 int ret = 0; 2552 2553 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2554 2555 nctrl.ncmd.u64 = 0; 2556 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2557 nctrl.ncmd.s.param1 = vid; 2558 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2559 nctrl.netpndev = (u64)netdev; 2560 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2561 2562 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2563 if (ret) { 2564 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n", 2565 ret); 2566 if (ret > 0) 2567 ret = -EIO; 2568 } 2569 2570 return ret; 2571 } 2572 2573 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev, 2574 __be16 proto __attribute__((unused)), 2575 u16 vid) 2576 { 2577 struct lio *lio = GET_LIO(netdev); 2578 struct octeon_device *oct = lio->oct_dev; 2579 struct octnic_ctrl_pkt nctrl; 2580 int ret = 0; 2581 2582 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2583 2584 nctrl.ncmd.u64 = 0; 2585 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2586 nctrl.ncmd.s.param1 = vid; 2587 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2588 nctrl.netpndev = (u64)netdev; 2589 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2590 2591 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2592 if (ret) { 2593 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n", 2594 ret); 2595 if (ret > 0) 2596 ret = -EIO; 2597 } 2598 return ret; 2599 } 2600 2601 /** Sending command to enable/disable RX checksum offload 2602 * @param netdev pointer to network device 2603 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL 2604 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/ 2605 * OCTNET_CMD_RXCSUM_DISABLE 2606 * @returns SUCCESS or FAILURE 2607 */ 2608 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command, 2609 u8 rx_cmd) 2610 { 2611 struct lio *lio = GET_LIO(netdev); 2612 struct octeon_device *oct = lio->oct_dev; 2613 struct octnic_ctrl_pkt nctrl; 2614 int ret = 0; 2615 2616 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2617 2618 nctrl.ncmd.u64 = 0; 2619 nctrl.ncmd.s.cmd = command; 2620 nctrl.ncmd.s.param1 = rx_cmd; 2621 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2622 nctrl.netpndev = (u64)netdev; 2623 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2624 2625 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2626 if (ret) { 2627 dev_err(&oct->pci_dev->dev, 2628 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", 2629 ret); 2630 if (ret > 0) 2631 ret = -EIO; 2632 } 2633 return ret; 2634 } 2635 2636 /** Sending command to add/delete VxLAN UDP port to firmware 2637 * @param netdev pointer to network device 2638 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG 2639 * @param vxlan_port VxLAN port to be added or deleted 2640 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD, 2641 * OCTNET_CMD_VXLAN_PORT_DEL 2642 * @returns SUCCESS or FAILURE 2643 */ 2644 static int liquidio_vxlan_port_command(struct net_device *netdev, int command, 2645 u16 vxlan_port, u8 vxlan_cmd_bit) 2646 { 2647 struct lio *lio = GET_LIO(netdev); 2648 struct octeon_device *oct = lio->oct_dev; 2649 struct octnic_ctrl_pkt nctrl; 2650 int ret = 0; 2651 2652 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2653 2654 nctrl.ncmd.u64 = 0; 2655 nctrl.ncmd.s.cmd = command; 2656 nctrl.ncmd.s.more = vxlan_cmd_bit; 2657 nctrl.ncmd.s.param1 = vxlan_port; 2658 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2659 nctrl.netpndev = (u64)netdev; 2660 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2661 2662 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl); 2663 if (ret) { 2664 dev_err(&oct->pci_dev->dev, 2665 "VxLAN port add/delete failed in core (ret:0x%x)\n", 2666 ret); 2667 if (ret > 0) 2668 ret = -EIO; 2669 } 2670 return ret; 2671 } 2672 2673 static int liquidio_udp_tunnel_set_port(struct net_device *netdev, 2674 unsigned int table, unsigned int entry, 2675 struct udp_tunnel_info *ti) 2676 { 2677 return liquidio_vxlan_port_command(netdev, 2678 OCTNET_CMD_VXLAN_PORT_CONFIG, 2679 htons(ti->port), 2680 OCTNET_CMD_VXLAN_PORT_ADD); 2681 } 2682 2683 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev, 2684 unsigned int table, 2685 unsigned int entry, 2686 struct udp_tunnel_info *ti) 2687 { 2688 return liquidio_vxlan_port_command(netdev, 2689 OCTNET_CMD_VXLAN_PORT_CONFIG, 2690 htons(ti->port), 2691 OCTNET_CMD_VXLAN_PORT_DEL); 2692 } 2693 2694 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = { 2695 .set_port = liquidio_udp_tunnel_set_port, 2696 .unset_port = liquidio_udp_tunnel_unset_port, 2697 .tables = { 2698 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, }, 2699 }, 2700 }; 2701 2702 /** \brief Net device fix features 2703 * @param netdev pointer to network device 2704 * @param request features requested 2705 * @returns updated features list 2706 */ 2707 static netdev_features_t liquidio_fix_features(struct net_device *netdev, 2708 netdev_features_t request) 2709 { 2710 struct lio *lio = netdev_priv(netdev); 2711 2712 if ((request & NETIF_F_RXCSUM) && 2713 !(lio->dev_capability & NETIF_F_RXCSUM)) 2714 request &= ~NETIF_F_RXCSUM; 2715 2716 if ((request & NETIF_F_HW_CSUM) && 2717 !(lio->dev_capability & NETIF_F_HW_CSUM)) 2718 request &= ~NETIF_F_HW_CSUM; 2719 2720 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) 2721 request &= ~NETIF_F_TSO; 2722 2723 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) 2724 request &= ~NETIF_F_TSO6; 2725 2726 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) 2727 request &= ~NETIF_F_LRO; 2728 2729 /*Disable LRO if RXCSUM is off */ 2730 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && 2731 (lio->dev_capability & NETIF_F_LRO)) 2732 request &= ~NETIF_F_LRO; 2733 2734 if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) && 2735 !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER)) 2736 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER; 2737 2738 return request; 2739 } 2740 2741 /** \brief Net device set features 2742 * @param netdev pointer to network device 2743 * @param features features to enable/disable 2744 */ 2745 static int liquidio_set_features(struct net_device *netdev, 2746 netdev_features_t features) 2747 { 2748 struct lio *lio = netdev_priv(netdev); 2749 2750 if ((features & NETIF_F_LRO) && 2751 (lio->dev_capability & NETIF_F_LRO) && 2752 !(netdev->features & NETIF_F_LRO)) 2753 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 2754 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2755 else if (!(features & NETIF_F_LRO) && 2756 (lio->dev_capability & NETIF_F_LRO) && 2757 (netdev->features & NETIF_F_LRO)) 2758 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE, 2759 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 2760 2761 /* Sending command to firmware to enable/disable RX checksum 2762 * offload settings using ethtool 2763 */ 2764 if (!(netdev->features & NETIF_F_RXCSUM) && 2765 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2766 (features & NETIF_F_RXCSUM)) 2767 liquidio_set_rxcsum_command(netdev, 2768 OCTNET_CMD_TNL_RX_CSUM_CTL, 2769 OCTNET_CMD_RXCSUM_ENABLE); 2770 else if ((netdev->features & NETIF_F_RXCSUM) && 2771 (lio->enc_dev_capability & NETIF_F_RXCSUM) && 2772 !(features & NETIF_F_RXCSUM)) 2773 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 2774 OCTNET_CMD_RXCSUM_DISABLE); 2775 2776 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && 2777 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && 2778 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 2779 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 2780 OCTNET_CMD_VLAN_FILTER_ENABLE); 2781 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && 2782 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) && 2783 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) 2784 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 2785 OCTNET_CMD_VLAN_FILTER_DISABLE); 2786 2787 return 0; 2788 } 2789 2790 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx, 2791 u8 *mac, bool is_admin_assigned) 2792 { 2793 struct lio *lio = GET_LIO(netdev); 2794 struct octeon_device *oct = lio->oct_dev; 2795 struct octnic_ctrl_pkt nctrl; 2796 int ret = 0; 2797 2798 if (!is_valid_ether_addr(mac)) 2799 return -EINVAL; 2800 2801 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs) 2802 return -EINVAL; 2803 2804 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2805 2806 nctrl.ncmd.u64 = 0; 2807 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; 2808 /* vfidx is 0 based, but vf_num (param1) is 1 based */ 2809 nctrl.ncmd.s.param1 = vfidx + 1; 2810 nctrl.ncmd.s.more = 1; 2811 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2812 nctrl.netpndev = (u64)netdev; 2813 if (is_admin_assigned) { 2814 nctrl.ncmd.s.param2 = true; 2815 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; 2816 } 2817 2818 nctrl.udd[0] = 0; 2819 /* The MAC Address is presented in network byte order. */ 2820 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac); 2821 2822 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0]; 2823 2824 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2825 if (ret > 0) 2826 ret = -EIO; 2827 2828 return ret; 2829 } 2830 2831 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac) 2832 { 2833 struct lio *lio = GET_LIO(netdev); 2834 struct octeon_device *oct = lio->oct_dev; 2835 int retval; 2836 2837 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2838 return -EINVAL; 2839 2840 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true); 2841 if (!retval) 2842 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac); 2843 2844 return retval; 2845 } 2846 2847 static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx, 2848 bool enable) 2849 { 2850 struct lio *lio = GET_LIO(netdev); 2851 struct octeon_device *oct = lio->oct_dev; 2852 struct octnic_ctrl_pkt nctrl; 2853 int retval; 2854 2855 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) { 2856 netif_info(lio, drv, lio->netdev, 2857 "firmware does not support spoofchk\n"); 2858 return -EOPNOTSUPP; 2859 } 2860 2861 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { 2862 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); 2863 return -EINVAL; 2864 } 2865 2866 if (enable) { 2867 if (oct->sriov_info.vf_spoofchk[vfidx]) 2868 return 0; 2869 } else { 2870 /* Clear */ 2871 if (!oct->sriov_info.vf_spoofchk[vfidx]) 2872 return 0; 2873 } 2874 2875 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2876 nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1; 2877 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK; 2878 nctrl.ncmd.s.param1 = 2879 vfidx + 1; /* vfidx is 0 based, 2880 * but vf_num (param1) is 1 based 2881 */ 2882 nctrl.ncmd.s.param2 = enable; 2883 nctrl.ncmd.s.more = 0; 2884 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2885 nctrl.cb_fn = NULL; 2886 2887 retval = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2888 2889 if (retval) { 2890 netif_info(lio, drv, lio->netdev, 2891 "Failed to set VF %d spoofchk %s\n", vfidx, 2892 enable ? "on" : "off"); 2893 return -1; 2894 } 2895 2896 oct->sriov_info.vf_spoofchk[vfidx] = enable; 2897 netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx, 2898 enable ? "on" : "off"); 2899 2900 return 0; 2901 } 2902 2903 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx, 2904 u16 vlan, u8 qos, __be16 vlan_proto) 2905 { 2906 struct lio *lio = GET_LIO(netdev); 2907 struct octeon_device *oct = lio->oct_dev; 2908 struct octnic_ctrl_pkt nctrl; 2909 u16 vlantci; 2910 int ret = 0; 2911 2912 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2913 return -EINVAL; 2914 2915 if (vlan_proto != htons(ETH_P_8021Q)) 2916 return -EPROTONOSUPPORT; 2917 2918 if (vlan >= VLAN_N_VID || qos > 7) 2919 return -EINVAL; 2920 2921 if (vlan) 2922 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT; 2923 else 2924 vlantci = 0; 2925 2926 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci) 2927 return 0; 2928 2929 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 2930 2931 if (vlan) 2932 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER; 2933 else 2934 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER; 2935 2936 nctrl.ncmd.s.param1 = vlantci; 2937 nctrl.ncmd.s.param2 = 2938 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */ 2939 nctrl.ncmd.s.more = 0; 2940 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 2941 nctrl.cb_fn = NULL; 2942 2943 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 2944 if (ret) { 2945 if (ret > 0) 2946 ret = -EIO; 2947 return ret; 2948 } 2949 2950 oct->sriov_info.vf_vlantci[vfidx] = vlantci; 2951 2952 return ret; 2953 } 2954 2955 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx, 2956 struct ifla_vf_info *ivi) 2957 { 2958 struct lio *lio = GET_LIO(netdev); 2959 struct octeon_device *oct = lio->oct_dev; 2960 u8 *macaddr; 2961 2962 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 2963 return -EINVAL; 2964 2965 memset(ivi, 0, sizeof(struct ifla_vf_info)); 2966 2967 ivi->vf = vfidx; 2968 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx]; 2969 ether_addr_copy(&ivi->mac[0], macaddr); 2970 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK; 2971 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT; 2972 if (oct->sriov_info.trusted_vf.active && 2973 oct->sriov_info.trusted_vf.id == vfidx) 2974 ivi->trusted = true; 2975 else 2976 ivi->trusted = false; 2977 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx]; 2978 ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx]; 2979 ivi->max_tx_rate = lio->linfo.link.s.speed; 2980 ivi->min_tx_rate = 0; 2981 2982 return 0; 2983 } 2984 2985 static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted) 2986 { 2987 struct octeon_device *oct = lio->oct_dev; 2988 struct octeon_soft_command *sc; 2989 int retval; 2990 2991 sc = octeon_alloc_soft_command(oct, 0, 16, 0); 2992 if (!sc) 2993 return -ENOMEM; 2994 2995 sc->iq_no = lio->linfo.txpciq[0].s.q_no; 2996 2997 /* vfidx is 0 based, but vf_num (param1) is 1 based */ 2998 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, 2999 OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1, 3000 trusted); 3001 3002 init_completion(&sc->complete); 3003 sc->sc_status = OCTEON_REQUEST_PENDING; 3004 3005 retval = octeon_send_soft_command(oct, sc); 3006 if (retval == IQ_SEND_FAILED) { 3007 octeon_free_soft_command(oct, sc); 3008 retval = -1; 3009 } else { 3010 /* Wait for response or timeout */ 3011 retval = wait_for_sc_completion_timeout(oct, sc, 0); 3012 if (retval) 3013 return (retval); 3014 3015 WRITE_ONCE(sc->caller_is_done, true); 3016 } 3017 3018 return retval; 3019 } 3020 3021 static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx, 3022 bool setting) 3023 { 3024 struct lio *lio = GET_LIO(netdev); 3025 struct octeon_device *oct = lio->oct_dev; 3026 3027 if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) { 3028 /* trusted vf is not supported by firmware older than 1.7.1 */ 3029 return -EOPNOTSUPP; 3030 } 3031 3032 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) { 3033 netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx); 3034 return -EINVAL; 3035 } 3036 3037 if (setting) { 3038 /* Set */ 3039 3040 if (oct->sriov_info.trusted_vf.active && 3041 oct->sriov_info.trusted_vf.id == vfidx) 3042 return 0; 3043 3044 if (oct->sriov_info.trusted_vf.active) { 3045 netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n"); 3046 return -EPERM; 3047 } 3048 } else { 3049 /* Clear */ 3050 3051 if (!oct->sriov_info.trusted_vf.active) 3052 return 0; 3053 } 3054 3055 if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) { 3056 if (setting) { 3057 oct->sriov_info.trusted_vf.id = vfidx; 3058 oct->sriov_info.trusted_vf.active = true; 3059 } else { 3060 oct->sriov_info.trusted_vf.active = false; 3061 } 3062 3063 netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx, 3064 setting ? "" : "not "); 3065 } else { 3066 netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n"); 3067 return -1; 3068 } 3069 3070 return 0; 3071 } 3072 3073 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx, 3074 int linkstate) 3075 { 3076 struct lio *lio = GET_LIO(netdev); 3077 struct octeon_device *oct = lio->oct_dev; 3078 struct octnic_ctrl_pkt nctrl; 3079 int ret = 0; 3080 3081 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3082 return -EINVAL; 3083 3084 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate) 3085 return 0; 3086 3087 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); 3088 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE; 3089 nctrl.ncmd.s.param1 = 3090 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */ 3091 nctrl.ncmd.s.param2 = linkstate; 3092 nctrl.ncmd.s.more = 0; 3093 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; 3094 nctrl.cb_fn = NULL; 3095 3096 ret = octnet_send_nic_ctrl_pkt(oct, &nctrl); 3097 3098 if (!ret) 3099 oct->sriov_info.vf_linkstate[vfidx] = linkstate; 3100 else if (ret > 0) 3101 ret = -EIO; 3102 3103 return ret; 3104 } 3105 3106 static int 3107 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode) 3108 { 3109 struct lio_devlink_priv *priv; 3110 struct octeon_device *oct; 3111 3112 priv = devlink_priv(devlink); 3113 oct = priv->oct; 3114 3115 *mode = oct->eswitch_mode; 3116 3117 return 0; 3118 } 3119 3120 static int 3121 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode, 3122 struct netlink_ext_ack *extack) 3123 { 3124 struct lio_devlink_priv *priv; 3125 struct octeon_device *oct; 3126 int ret = 0; 3127 3128 priv = devlink_priv(devlink); 3129 oct = priv->oct; 3130 3131 if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)) 3132 return -EINVAL; 3133 3134 if (oct->eswitch_mode == mode) 3135 return 0; 3136 3137 switch (mode) { 3138 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 3139 oct->eswitch_mode = mode; 3140 ret = lio_vf_rep_create(oct); 3141 break; 3142 3143 case DEVLINK_ESWITCH_MODE_LEGACY: 3144 lio_vf_rep_destroy(oct); 3145 oct->eswitch_mode = mode; 3146 break; 3147 3148 default: 3149 ret = -EINVAL; 3150 } 3151 3152 return ret; 3153 } 3154 3155 static const struct devlink_ops liquidio_devlink_ops = { 3156 .eswitch_mode_get = liquidio_eswitch_mode_get, 3157 .eswitch_mode_set = liquidio_eswitch_mode_set, 3158 }; 3159 3160 static int 3161 liquidio_get_port_parent_id(struct net_device *dev, 3162 struct netdev_phys_item_id *ppid) 3163 { 3164 struct lio *lio = GET_LIO(dev); 3165 struct octeon_device *oct = lio->oct_dev; 3166 3167 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV) 3168 return -EOPNOTSUPP; 3169 3170 ppid->id_len = ETH_ALEN; 3171 ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2); 3172 3173 return 0; 3174 } 3175 3176 static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx, 3177 struct ifla_vf_stats *vf_stats) 3178 { 3179 struct lio *lio = GET_LIO(netdev); 3180 struct octeon_device *oct = lio->oct_dev; 3181 struct oct_vf_stats stats; 3182 int ret; 3183 3184 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) 3185 return -EINVAL; 3186 3187 memset(&stats, 0, sizeof(struct oct_vf_stats)); 3188 ret = cn23xx_get_vf_stats(oct, vfidx, &stats); 3189 if (!ret) { 3190 vf_stats->rx_packets = stats.rx_packets; 3191 vf_stats->tx_packets = stats.tx_packets; 3192 vf_stats->rx_bytes = stats.rx_bytes; 3193 vf_stats->tx_bytes = stats.tx_bytes; 3194 vf_stats->broadcast = stats.broadcast; 3195 vf_stats->multicast = stats.multicast; 3196 } 3197 3198 return ret; 3199 } 3200 3201 static const struct net_device_ops lionetdevops = { 3202 .ndo_open = liquidio_open, 3203 .ndo_stop = liquidio_stop, 3204 .ndo_start_xmit = liquidio_xmit, 3205 .ndo_get_stats64 = liquidio_get_stats64, 3206 .ndo_set_mac_address = liquidio_set_mac, 3207 .ndo_set_rx_mode = liquidio_set_mcast_list, 3208 .ndo_tx_timeout = liquidio_tx_timeout, 3209 3210 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, 3211 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, 3212 .ndo_change_mtu = liquidio_change_mtu, 3213 .ndo_do_ioctl = liquidio_ioctl, 3214 .ndo_fix_features = liquidio_fix_features, 3215 .ndo_set_features = liquidio_set_features, 3216 .ndo_udp_tunnel_add = udp_tunnel_nic_add_port, 3217 .ndo_udp_tunnel_del = udp_tunnel_nic_del_port, 3218 .ndo_set_vf_mac = liquidio_set_vf_mac, 3219 .ndo_set_vf_vlan = liquidio_set_vf_vlan, 3220 .ndo_get_vf_config = liquidio_get_vf_config, 3221 .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk, 3222 .ndo_set_vf_trust = liquidio_set_vf_trust, 3223 .ndo_set_vf_link_state = liquidio_set_vf_link_state, 3224 .ndo_get_vf_stats = liquidio_get_vf_stats, 3225 .ndo_get_port_parent_id = liquidio_get_port_parent_id, 3226 }; 3227 3228 /** \brief Entry point for the liquidio module 3229 */ 3230 static int __init liquidio_init(void) 3231 { 3232 int i; 3233 struct handshake *hs; 3234 3235 init_completion(&first_stage); 3236 3237 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT); 3238 3239 if (liquidio_init_pci()) 3240 return -EINVAL; 3241 3242 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000)); 3243 3244 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3245 hs = &handshake[i]; 3246 if (hs->pci_dev) { 3247 wait_for_completion(&hs->init); 3248 if (!hs->init_ok) { 3249 /* init handshake failed */ 3250 dev_err(&hs->pci_dev->dev, 3251 "Failed to init device\n"); 3252 liquidio_deinit_pci(); 3253 return -EIO; 3254 } 3255 } 3256 } 3257 3258 for (i = 0; i < MAX_OCTEON_DEVICES; i++) { 3259 hs = &handshake[i]; 3260 if (hs->pci_dev) { 3261 wait_for_completion_timeout(&hs->started, 3262 msecs_to_jiffies(30000)); 3263 if (!hs->started_ok) { 3264 /* starter handshake failed */ 3265 dev_err(&hs->pci_dev->dev, 3266 "Firmware failed to start\n"); 3267 liquidio_deinit_pci(); 3268 return -EIO; 3269 } 3270 } 3271 } 3272 3273 return 0; 3274 } 3275 3276 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) 3277 { 3278 struct octeon_device *oct = (struct octeon_device *)buf; 3279 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3280 int gmxport = 0; 3281 union oct_link_status *ls; 3282 int i; 3283 3284 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) { 3285 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", 3286 recv_pkt->buffer_size[0], 3287 recv_pkt->rh.r_nic_info.gmxport); 3288 goto nic_info_err; 3289 } 3290 3291 gmxport = recv_pkt->rh.r_nic_info.gmxport; 3292 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) + 3293 OCT_DROQ_INFO_SIZE); 3294 3295 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); 3296 for (i = 0; i < oct->ifcount; i++) { 3297 if (oct->props[i].gmxport == gmxport) { 3298 update_link_status(oct->props[i].netdev, ls); 3299 break; 3300 } 3301 } 3302 3303 nic_info_err: 3304 for (i = 0; i < recv_pkt->buffer_count; i++) 3305 recv_buffer_free(recv_pkt->buffer_ptr[i]); 3306 octeon_free_recv_info(recv_info); 3307 return 0; 3308 } 3309 3310 /** 3311 * \brief Setup network interfaces 3312 * @param octeon_dev octeon device 3313 * 3314 * Called during init time for each device. It assumes the NIC 3315 * is already up and running. The link information for each 3316 * interface is passed in link_info. 3317 */ 3318 static int setup_nic_devices(struct octeon_device *octeon_dev) 3319 { 3320 struct lio *lio = NULL; 3321 struct net_device *netdev; 3322 u8 mac[6], i, j, *fw_ver, *micro_ver; 3323 unsigned long micro; 3324 u32 cur_ver; 3325 struct octeon_soft_command *sc; 3326 struct liquidio_if_cfg_resp *resp; 3327 struct octdev_props *props; 3328 int retval, num_iqueues, num_oqueues; 3329 int max_num_queues = 0; 3330 union oct_nic_if_cfg if_cfg; 3331 unsigned int base_queue; 3332 unsigned int gmx_port_id; 3333 u32 resp_size, data_size; 3334 u32 ifidx_or_pfnum; 3335 struct lio_version *vdata; 3336 struct devlink *devlink; 3337 struct lio_devlink_priv *lio_devlink; 3338 3339 /* This is to handle link status changes */ 3340 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 3341 OPCODE_NIC_INFO, 3342 lio_nic_info, octeon_dev); 3343 3344 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. 3345 * They are handled directly. 3346 */ 3347 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, 3348 free_netbuf); 3349 3350 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, 3351 free_netsgbuf); 3352 3353 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, 3354 free_netsgbuf_with_resp); 3355 3356 for (i = 0; i < octeon_dev->ifcount; i++) { 3357 resp_size = sizeof(struct liquidio_if_cfg_resp); 3358 data_size = sizeof(struct lio_version); 3359 sc = (struct octeon_soft_command *) 3360 octeon_alloc_soft_command(octeon_dev, data_size, 3361 resp_size, 0); 3362 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; 3363 vdata = (struct lio_version *)sc->virtdptr; 3364 3365 *((u64 *)vdata) = 0; 3366 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION); 3367 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION); 3368 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION); 3369 3370 if (OCTEON_CN23XX_PF(octeon_dev)) { 3371 num_iqueues = octeon_dev->sriov_info.num_pf_rings; 3372 num_oqueues = octeon_dev->sriov_info.num_pf_rings; 3373 base_queue = octeon_dev->sriov_info.pf_srn; 3374 3375 gmx_port_id = octeon_dev->pf_num; 3376 ifidx_or_pfnum = octeon_dev->pf_num; 3377 } else { 3378 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF( 3379 octeon_get_conf(octeon_dev), i); 3380 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF( 3381 octeon_get_conf(octeon_dev), i); 3382 base_queue = CFG_GET_BASE_QUE_NIC_IF( 3383 octeon_get_conf(octeon_dev), i); 3384 gmx_port_id = CFG_GET_GMXID_NIC_IF( 3385 octeon_get_conf(octeon_dev), i); 3386 ifidx_or_pfnum = i; 3387 } 3388 3389 dev_dbg(&octeon_dev->pci_dev->dev, 3390 "requesting config for interface %d, iqs %d, oqs %d\n", 3391 ifidx_or_pfnum, num_iqueues, num_oqueues); 3392 3393 if_cfg.u64 = 0; 3394 if_cfg.s.num_iqueues = num_iqueues; 3395 if_cfg.s.num_oqueues = num_oqueues; 3396 if_cfg.s.base_queue = base_queue; 3397 if_cfg.s.gmx_port_id = gmx_port_id; 3398 3399 sc->iq_no = 0; 3400 3401 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, 3402 OPCODE_NIC_IF_CFG, 0, 3403 if_cfg.u64, 0); 3404 3405 init_completion(&sc->complete); 3406 sc->sc_status = OCTEON_REQUEST_PENDING; 3407 3408 retval = octeon_send_soft_command(octeon_dev, sc); 3409 if (retval == IQ_SEND_FAILED) { 3410 dev_err(&octeon_dev->pci_dev->dev, 3411 "iq/oq config failed status: %x\n", 3412 retval); 3413 /* Soft instr is freed by driver in case of failure. */ 3414 octeon_free_soft_command(octeon_dev, sc); 3415 return(-EIO); 3416 } 3417 3418 /* Sleep on a wait queue till the cond flag indicates that the 3419 * response arrived or timed-out. 3420 */ 3421 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0); 3422 if (retval) 3423 return retval; 3424 3425 retval = resp->status; 3426 if (retval) { 3427 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); 3428 WRITE_ONCE(sc->caller_is_done, true); 3429 goto setup_nic_dev_done; 3430 } 3431 snprintf(octeon_dev->fw_info.liquidio_firmware_version, 3432 32, "%s", 3433 resp->cfg_info.liquidio_firmware_version); 3434 3435 /* Verify f/w version (in case of 'auto' loading from flash) */ 3436 fw_ver = octeon_dev->fw_info.liquidio_firmware_version; 3437 if (memcmp(LIQUIDIO_BASE_VERSION, 3438 fw_ver, 3439 strlen(LIQUIDIO_BASE_VERSION))) { 3440 dev_err(&octeon_dev->pci_dev->dev, 3441 "Unmatched firmware version. Expected %s.x, got %s.\n", 3442 LIQUIDIO_BASE_VERSION, fw_ver); 3443 WRITE_ONCE(sc->caller_is_done, true); 3444 goto setup_nic_dev_done; 3445 } else if (atomic_read(octeon_dev->adapter_fw_state) == 3446 FW_IS_PRELOADED) { 3447 dev_info(&octeon_dev->pci_dev->dev, 3448 "Using auto-loaded firmware version %s.\n", 3449 fw_ver); 3450 } 3451 3452 /* extract micro version field; point past '<maj>.<min>.' */ 3453 micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1; 3454 if (kstrtoul(micro_ver, 10, µ) != 0) 3455 micro = 0; 3456 octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION; 3457 octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION; 3458 octeon_dev->fw_info.ver.rev = micro; 3459 3460 octeon_swap_8B_data((u64 *)(&resp->cfg_info), 3461 (sizeof(struct liquidio_if_cfg_info)) >> 3); 3462 3463 num_iqueues = hweight64(resp->cfg_info.iqmask); 3464 num_oqueues = hweight64(resp->cfg_info.oqmask); 3465 3466 if (!(num_iqueues) || !(num_oqueues)) { 3467 dev_err(&octeon_dev->pci_dev->dev, 3468 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", 3469 resp->cfg_info.iqmask, 3470 resp->cfg_info.oqmask); 3471 WRITE_ONCE(sc->caller_is_done, true); 3472 goto setup_nic_dev_done; 3473 } 3474 3475 if (OCTEON_CN6XXX(octeon_dev)) { 3476 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, 3477 cn6xxx)); 3478 } else if (OCTEON_CN23XX_PF(octeon_dev)) { 3479 max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev, 3480 cn23xx_pf)); 3481 } 3482 3483 dev_dbg(&octeon_dev->pci_dev->dev, 3484 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n", 3485 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, 3486 num_iqueues, num_oqueues, max_num_queues); 3487 netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues); 3488 3489 if (!netdev) { 3490 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); 3491 WRITE_ONCE(sc->caller_is_done, true); 3492 goto setup_nic_dev_done; 3493 } 3494 3495 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev); 3496 3497 /* Associate the routines that will handle different 3498 * netdev tasks. 3499 */ 3500 netdev->netdev_ops = &lionetdevops; 3501 3502 retval = netif_set_real_num_rx_queues(netdev, num_oqueues); 3503 if (retval) { 3504 dev_err(&octeon_dev->pci_dev->dev, 3505 "setting real number rx failed\n"); 3506 WRITE_ONCE(sc->caller_is_done, true); 3507 goto setup_nic_dev_free; 3508 } 3509 3510 retval = netif_set_real_num_tx_queues(netdev, num_iqueues); 3511 if (retval) { 3512 dev_err(&octeon_dev->pci_dev->dev, 3513 "setting real number tx failed\n"); 3514 WRITE_ONCE(sc->caller_is_done, true); 3515 goto setup_nic_dev_free; 3516 } 3517 3518 lio = GET_LIO(netdev); 3519 3520 memset(lio, 0, sizeof(struct lio)); 3521 3522 lio->ifidx = ifidx_or_pfnum; 3523 3524 props = &octeon_dev->props[i]; 3525 props->gmxport = resp->cfg_info.linfo.gmxport; 3526 props->netdev = netdev; 3527 3528 lio->linfo.num_rxpciq = num_oqueues; 3529 lio->linfo.num_txpciq = num_iqueues; 3530 for (j = 0; j < num_oqueues; j++) { 3531 lio->linfo.rxpciq[j].u64 = 3532 resp->cfg_info.linfo.rxpciq[j].u64; 3533 } 3534 for (j = 0; j < num_iqueues; j++) { 3535 lio->linfo.txpciq[j].u64 = 3536 resp->cfg_info.linfo.txpciq[j].u64; 3537 } 3538 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; 3539 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; 3540 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; 3541 3542 WRITE_ONCE(sc->caller_is_done, true); 3543 3544 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); 3545 3546 if (OCTEON_CN23XX_PF(octeon_dev) || 3547 OCTEON_CN6XXX(octeon_dev)) { 3548 lio->dev_capability = NETIF_F_HIGHDMA 3549 | NETIF_F_IP_CSUM 3550 | NETIF_F_IPV6_CSUM 3551 | NETIF_F_SG | NETIF_F_RXCSUM 3552 | NETIF_F_GRO 3553 | NETIF_F_TSO | NETIF_F_TSO6 3554 | NETIF_F_LRO; 3555 } 3556 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); 3557 3558 /* Copy of transmit encapsulation capabilities: 3559 * TSO, TSO6, Checksums for this device 3560 */ 3561 lio->enc_dev_capability = NETIF_F_IP_CSUM 3562 | NETIF_F_IPV6_CSUM 3563 | NETIF_F_GSO_UDP_TUNNEL 3564 | NETIF_F_HW_CSUM | NETIF_F_SG 3565 | NETIF_F_RXCSUM 3566 | NETIF_F_TSO | NETIF_F_TSO6 3567 | NETIF_F_LRO; 3568 3569 netdev->hw_enc_features = (lio->enc_dev_capability & 3570 ~NETIF_F_LRO); 3571 3572 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels; 3573 3574 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL; 3575 3576 netdev->vlan_features = lio->dev_capability; 3577 /* Add any unchangeable hw features */ 3578 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER | 3579 NETIF_F_HW_VLAN_CTAG_RX | 3580 NETIF_F_HW_VLAN_CTAG_TX; 3581 3582 netdev->features = (lio->dev_capability & ~NETIF_F_LRO); 3583 3584 netdev->hw_features = lio->dev_capability; 3585 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/ 3586 netdev->hw_features = netdev->hw_features & 3587 ~NETIF_F_HW_VLAN_CTAG_RX; 3588 3589 /* MTU range: 68 - 16000 */ 3590 netdev->min_mtu = LIO_MIN_MTU_SIZE; 3591 netdev->max_mtu = LIO_MAX_MTU_SIZE; 3592 3593 /* Point to the properties for octeon device to which this 3594 * interface belongs. 3595 */ 3596 lio->oct_dev = octeon_dev; 3597 lio->octprops = props; 3598 lio->netdev = netdev; 3599 3600 dev_dbg(&octeon_dev->pci_dev->dev, 3601 "if%d gmx: %d hw_addr: 0x%llx\n", i, 3602 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); 3603 3604 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) { 3605 u8 vfmac[ETH_ALEN]; 3606 3607 eth_random_addr(vfmac); 3608 if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) { 3609 dev_err(&octeon_dev->pci_dev->dev, 3610 "Error setting VF%d MAC address\n", 3611 j); 3612 goto setup_nic_dev_free; 3613 } 3614 } 3615 3616 /* 64-bit swap required on LE machines */ 3617 octeon_swap_8B_data(&lio->linfo.hw_addr, 1); 3618 for (j = 0; j < 6; j++) 3619 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); 3620 3621 /* Copy MAC Address to OS network device structure */ 3622 3623 ether_addr_copy(netdev->dev_addr, mac); 3624 3625 /* By default all interfaces on a single Octeon uses the same 3626 * tx and rx queues 3627 */ 3628 lio->txq = lio->linfo.txpciq[0].s.q_no; 3629 lio->rxq = lio->linfo.rxpciq[0].s.q_no; 3630 if (liquidio_setup_io_queues(octeon_dev, i, 3631 lio->linfo.num_txpciq, 3632 lio->linfo.num_rxpciq)) { 3633 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); 3634 goto setup_nic_dev_free; 3635 } 3636 3637 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); 3638 3639 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); 3640 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); 3641 3642 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { 3643 dev_err(&octeon_dev->pci_dev->dev, 3644 "Gather list allocation failed\n"); 3645 goto setup_nic_dev_free; 3646 } 3647 3648 /* Register ethtool support */ 3649 liquidio_set_ethtool_ops(netdev); 3650 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID) 3651 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT; 3652 else 3653 octeon_dev->priv_flags = 0x0; 3654 3655 if (netdev->features & NETIF_F_LRO) 3656 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE, 3657 OCTNIC_LROIPV4 | OCTNIC_LROIPV6); 3658 3659 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL, 3660 OCTNET_CMD_VLAN_FILTER_ENABLE); 3661 3662 if ((debug != -1) && (debug & NETIF_MSG_HW)) 3663 liquidio_set_feature(netdev, 3664 OCTNET_CMD_VERBOSE_ENABLE, 0); 3665 3666 if (setup_link_status_change_wq(netdev)) 3667 goto setup_nic_dev_free; 3668 3669 if ((octeon_dev->fw_info.app_cap_flags & 3670 LIQUIDIO_TIME_SYNC_CAP) && 3671 setup_sync_octeon_time_wq(netdev)) 3672 goto setup_nic_dev_free; 3673 3674 if (setup_rx_oom_poll_fn(netdev)) 3675 goto setup_nic_dev_free; 3676 3677 /* Register the network device with the OS */ 3678 if (register_netdev(netdev)) { 3679 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); 3680 goto setup_nic_dev_free; 3681 } 3682 3683 dev_dbg(&octeon_dev->pci_dev->dev, 3684 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", 3685 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); 3686 netif_carrier_off(netdev); 3687 lio->link_changes++; 3688 3689 ifstate_set(lio, LIO_IFSTATE_REGISTERED); 3690 3691 /* Sending command to firmware to enable Rx checksum offload 3692 * by default at the time of setup of Liquidio driver for 3693 * this device 3694 */ 3695 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL, 3696 OCTNET_CMD_RXCSUM_ENABLE); 3697 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL, 3698 OCTNET_CMD_TXCSUM_ENABLE); 3699 3700 dev_dbg(&octeon_dev->pci_dev->dev, 3701 "NIC ifidx:%d Setup successful\n", i); 3702 3703 if (octeon_dev->subsystem_id == 3704 OCTEON_CN2350_25GB_SUBSYS_ID || 3705 octeon_dev->subsystem_id == 3706 OCTEON_CN2360_25GB_SUBSYS_ID) { 3707 cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj, 3708 octeon_dev->fw_info.ver.min, 3709 octeon_dev->fw_info.ver.rev); 3710 3711 /* speed control unsupported in f/w older than 1.7.2 */ 3712 if (cur_ver < OCT_FW_VER(1, 7, 2)) { 3713 dev_info(&octeon_dev->pci_dev->dev, 3714 "speed setting not supported by f/w."); 3715 octeon_dev->speed_setting = 25; 3716 octeon_dev->no_speed_setting = 1; 3717 } else { 3718 liquidio_get_speed(lio); 3719 } 3720 3721 if (octeon_dev->speed_setting == 0) { 3722 octeon_dev->speed_setting = 25; 3723 octeon_dev->no_speed_setting = 1; 3724 } 3725 } else { 3726 octeon_dev->no_speed_setting = 1; 3727 octeon_dev->speed_setting = 10; 3728 } 3729 octeon_dev->speed_boot = octeon_dev->speed_setting; 3730 3731 /* don't read FEC setting if unsupported by f/w (see above) */ 3732 if (octeon_dev->speed_boot == 25 && 3733 !octeon_dev->no_speed_setting) { 3734 liquidio_get_fec(lio); 3735 octeon_dev->props[lio->ifidx].fec_boot = 3736 octeon_dev->props[lio->ifidx].fec; 3737 } 3738 } 3739 3740 devlink = devlink_alloc(&liquidio_devlink_ops, 3741 sizeof(struct lio_devlink_priv)); 3742 if (!devlink) { 3743 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); 3744 goto setup_nic_dev_free; 3745 } 3746 3747 lio_devlink = devlink_priv(devlink); 3748 lio_devlink->oct = octeon_dev; 3749 3750 if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) { 3751 devlink_free(devlink); 3752 dev_err(&octeon_dev->pci_dev->dev, 3753 "devlink registration failed\n"); 3754 goto setup_nic_dev_free; 3755 } 3756 3757 octeon_dev->devlink = devlink; 3758 octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; 3759 3760 return 0; 3761 3762 setup_nic_dev_free: 3763 3764 while (i--) { 3765 dev_err(&octeon_dev->pci_dev->dev, 3766 "NIC ifidx:%d Setup failed\n", i); 3767 liquidio_destroy_nic_device(octeon_dev, i); 3768 } 3769 3770 setup_nic_dev_done: 3771 3772 return -ENODEV; 3773 } 3774 3775 #ifdef CONFIG_PCI_IOV 3776 static int octeon_enable_sriov(struct octeon_device *oct) 3777 { 3778 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced; 3779 struct pci_dev *vfdev; 3780 int err; 3781 u32 u; 3782 3783 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) { 3784 err = pci_enable_sriov(oct->pci_dev, 3785 oct->sriov_info.num_vfs_alloced); 3786 if (err) { 3787 dev_err(&oct->pci_dev->dev, 3788 "OCTEON: Failed to enable PCI sriov: %d\n", 3789 err); 3790 oct->sriov_info.num_vfs_alloced = 0; 3791 return err; 3792 } 3793 oct->sriov_info.sriov_enabled = 1; 3794 3795 /* init lookup table that maps DPI ring number to VF pci_dev 3796 * struct pointer 3797 */ 3798 u = 0; 3799 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 3800 OCTEON_CN23XX_VF_VID, NULL); 3801 while (vfdev) { 3802 if (vfdev->is_virtfn && 3803 (vfdev->physfn == oct->pci_dev)) { 3804 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = 3805 vfdev; 3806 u += oct->sriov_info.rings_per_vf; 3807 } 3808 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, 3809 OCTEON_CN23XX_VF_VID, vfdev); 3810 } 3811 } 3812 3813 return num_vfs_alloced; 3814 } 3815 3816 static int lio_pci_sriov_disable(struct octeon_device *oct) 3817 { 3818 int u; 3819 3820 if (pci_vfs_assigned(oct->pci_dev)) { 3821 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n"); 3822 return -EPERM; 3823 } 3824 3825 pci_disable_sriov(oct->pci_dev); 3826 3827 u = 0; 3828 while (u < MAX_POSSIBLE_VFS) { 3829 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL; 3830 u += oct->sriov_info.rings_per_vf; 3831 } 3832 3833 oct->sriov_info.num_vfs_alloced = 0; 3834 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n", 3835 oct->pf_num); 3836 3837 return 0; 3838 } 3839 3840 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs) 3841 { 3842 struct octeon_device *oct = pci_get_drvdata(dev); 3843 int ret = 0; 3844 3845 if ((num_vfs == oct->sriov_info.num_vfs_alloced) && 3846 (oct->sriov_info.sriov_enabled)) { 3847 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n", 3848 oct->pf_num, num_vfs); 3849 return 0; 3850 } 3851 3852 if (!num_vfs) { 3853 lio_vf_rep_destroy(oct); 3854 ret = lio_pci_sriov_disable(oct); 3855 } else if (num_vfs > oct->sriov_info.max_vfs) { 3856 dev_err(&oct->pci_dev->dev, 3857 "OCTEON: Max allowed VFs:%d user requested:%d", 3858 oct->sriov_info.max_vfs, num_vfs); 3859 ret = -EPERM; 3860 } else { 3861 oct->sriov_info.num_vfs_alloced = num_vfs; 3862 ret = octeon_enable_sriov(oct); 3863 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n", 3864 oct->pf_num, num_vfs); 3865 ret = lio_vf_rep_create(oct); 3866 if (ret) 3867 dev_info(&oct->pci_dev->dev, 3868 "vf representor create failed"); 3869 } 3870 3871 return ret; 3872 } 3873 #endif 3874 3875 /** 3876 * \brief initialize the NIC 3877 * @param oct octeon device 3878 * 3879 * This initialization routine is called once the Octeon device application is 3880 * up and running 3881 */ 3882 static int liquidio_init_nic_module(struct octeon_device *oct) 3883 { 3884 int i, retval = 0; 3885 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); 3886 3887 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); 3888 3889 /* only default iq and oq were initialized 3890 * initialize the rest as well 3891 */ 3892 /* run port_config command for each port */ 3893 oct->ifcount = num_nic_ports; 3894 3895 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports); 3896 3897 for (i = 0; i < MAX_OCTEON_LINKS; i++) 3898 oct->props[i].gmxport = -1; 3899 3900 retval = setup_nic_devices(oct); 3901 if (retval) { 3902 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); 3903 goto octnet_init_failure; 3904 } 3905 3906 /* Call vf_rep_modinit if the firmware is switchdev capable 3907 * and do it from the first liquidio function probed. 3908 */ 3909 if (!oct->octeon_id && 3910 oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) { 3911 retval = lio_vf_rep_modinit(); 3912 if (retval) { 3913 liquidio_stop_nic_module(oct); 3914 goto octnet_init_failure; 3915 } 3916 } 3917 3918 liquidio_ptp_init(oct); 3919 3920 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); 3921 3922 return retval; 3923 3924 octnet_init_failure: 3925 3926 oct->ifcount = 0; 3927 3928 return retval; 3929 } 3930 3931 /** 3932 * \brief starter callback that invokes the remaining initialization work after 3933 * the NIC is up and running. 3934 * @param octptr work struct work_struct 3935 */ 3936 static void nic_starter(struct work_struct *work) 3937 { 3938 struct octeon_device *oct; 3939 struct cavium_wk *wk = (struct cavium_wk *)work; 3940 3941 oct = (struct octeon_device *)wk->ctxptr; 3942 3943 if (atomic_read(&oct->status) == OCT_DEV_RUNNING) 3944 return; 3945 3946 /* If the status of the device is CORE_OK, the core 3947 * application has reported its application type. Call 3948 * any registered handlers now and move to the RUNNING 3949 * state. 3950 */ 3951 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) { 3952 schedule_delayed_work(&oct->nic_poll_work.work, 3953 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 3954 return; 3955 } 3956 3957 atomic_set(&oct->status, OCT_DEV_RUNNING); 3958 3959 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) { 3960 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n"); 3961 3962 if (liquidio_init_nic_module(oct)) 3963 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n"); 3964 else 3965 handshake[oct->octeon_id].started_ok = 1; 3966 } else { 3967 dev_err(&oct->pci_dev->dev, 3968 "Unexpected application running on NIC (%d). Check firmware.\n", 3969 oct->app_mode); 3970 } 3971 3972 complete(&handshake[oct->octeon_id].started); 3973 } 3974 3975 static int 3976 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf) 3977 { 3978 struct octeon_device *oct = (struct octeon_device *)buf; 3979 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; 3980 int i, notice, vf_idx; 3981 bool cores_crashed; 3982 u64 *data, vf_num; 3983 3984 notice = recv_pkt->rh.r.ossp; 3985 data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE); 3986 3987 /* the first 64-bit word of data is the vf_num */ 3988 vf_num = data[0]; 3989 octeon_swap_8B_data(&vf_num, 1); 3990 vf_idx = (int)vf_num - 1; 3991 3992 cores_crashed = READ_ONCE(oct->cores_crashed); 3993 3994 if (notice == VF_DRV_LOADED) { 3995 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) { 3996 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx); 3997 dev_info(&oct->pci_dev->dev, 3998 "driver for VF%d was loaded\n", vf_idx); 3999 if (!cores_crashed) 4000 try_module_get(THIS_MODULE); 4001 } 4002 } else if (notice == VF_DRV_REMOVED) { 4003 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) { 4004 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx); 4005 dev_info(&oct->pci_dev->dev, 4006 "driver for VF%d was removed\n", vf_idx); 4007 if (!cores_crashed) 4008 module_put(THIS_MODULE); 4009 } 4010 } else if (notice == VF_DRV_MACADDR_CHANGED) { 4011 u8 *b = (u8 *)&data[1]; 4012 4013 oct->sriov_info.vf_macaddr[vf_idx] = data[1]; 4014 dev_info(&oct->pci_dev->dev, 4015 "VF driver changed VF%d's MAC address to %pM\n", 4016 vf_idx, b + 2); 4017 } 4018 4019 for (i = 0; i < recv_pkt->buffer_count; i++) 4020 recv_buffer_free(recv_pkt->buffer_ptr[i]); 4021 octeon_free_recv_info(recv_info); 4022 4023 return 0; 4024 } 4025 4026 /** 4027 * \brief Device initialization for each Octeon device that is probed 4028 * @param octeon_dev octeon device 4029 */ 4030 static int octeon_device_init(struct octeon_device *octeon_dev) 4031 { 4032 int j, ret; 4033 char bootcmd[] = "\n"; 4034 char *dbg_enb = NULL; 4035 enum lio_fw_state fw_state; 4036 struct octeon_device_priv *oct_priv = 4037 (struct octeon_device_priv *)octeon_dev->priv; 4038 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); 4039 4040 /* Enable access to the octeon device and make its DMA capability 4041 * known to the OS. 4042 */ 4043 if (octeon_pci_os_setup(octeon_dev)) 4044 return 1; 4045 4046 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE); 4047 4048 /* Identify the Octeon type and map the BAR address space. */ 4049 if (octeon_chip_specific_setup(octeon_dev)) { 4050 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n"); 4051 return 1; 4052 } 4053 4054 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE); 4055 4056 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE', 4057 * since that is what is required for the reference to be removed 4058 * during de-initialization (see 'octeon_destroy_resources'). 4059 */ 4060 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number, 4061 PCI_SLOT(octeon_dev->pci_dev->devfn), 4062 PCI_FUNC(octeon_dev->pci_dev->devfn), 4063 true); 4064 4065 octeon_dev->app_mode = CVM_DRV_INVALID_APP; 4066 4067 /* CN23XX supports preloaded firmware if the following is true: 4068 * 4069 * The adapter indicates that firmware is currently running AND 4070 * 'fw_type' is 'auto'. 4071 * 4072 * (default state is NEEDS_TO_BE_LOADED, override it if appropriate). 4073 */ 4074 if (OCTEON_CN23XX_PF(octeon_dev) && 4075 cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) { 4076 atomic_cmpxchg(octeon_dev->adapter_fw_state, 4077 FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED); 4078 } 4079 4080 /* If loading firmware, only first device of adapter needs to do so. */ 4081 fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state, 4082 FW_NEEDS_TO_BE_LOADED, 4083 FW_IS_BEING_LOADED); 4084 4085 /* Here, [local variable] 'fw_state' is set to one of: 4086 * 4087 * FW_IS_PRELOADED: No firmware is to be loaded (see above) 4088 * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load 4089 * firmware to the adapter. 4090 * FW_IS_BEING_LOADED: The driver's second instance will not load 4091 * firmware to the adapter. 4092 */ 4093 4094 /* Prior to f/w load, perform a soft reset of the Octeon device; 4095 * if error resetting, return w/error. 4096 */ 4097 if (fw_state == FW_NEEDS_TO_BE_LOADED) 4098 if (octeon_dev->fn_list.soft_reset(octeon_dev)) 4099 return 1; 4100 4101 /* Initialize the dispatch mechanism used to push packets arriving on 4102 * Octeon Output queues. 4103 */ 4104 if (octeon_init_dispatch_list(octeon_dev)) 4105 return 1; 4106 4107 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4108 OPCODE_NIC_CORE_DRV_ACTIVE, 4109 octeon_core_drv_init, 4110 octeon_dev); 4111 4112 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, 4113 OPCODE_NIC_VF_DRV_NOTICE, 4114 octeon_recv_vf_drv_notice, octeon_dev); 4115 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter); 4116 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev; 4117 schedule_delayed_work(&octeon_dev->nic_poll_work.work, 4118 LIQUIDIO_STARTER_POLL_INTERVAL_MS); 4119 4120 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE); 4121 4122 if (octeon_set_io_queues_off(octeon_dev)) { 4123 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n"); 4124 return 1; 4125 } 4126 4127 if (OCTEON_CN23XX_PF(octeon_dev)) { 4128 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4129 if (ret) { 4130 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n"); 4131 return ret; 4132 } 4133 } 4134 4135 /* Initialize soft command buffer pool 4136 */ 4137 if (octeon_setup_sc_buffer_pool(octeon_dev)) { 4138 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n"); 4139 return 1; 4140 } 4141 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); 4142 4143 /* Setup the data structures that manage this Octeon's Input queues. */ 4144 if (octeon_setup_instr_queues(octeon_dev)) { 4145 dev_err(&octeon_dev->pci_dev->dev, 4146 "instruction queue initialization failed\n"); 4147 return 1; 4148 } 4149 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); 4150 4151 /* Initialize lists to manage the requests of different types that 4152 * arrive from user & kernel applications for this octeon device. 4153 */ 4154 if (octeon_setup_response_list(octeon_dev)) { 4155 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n"); 4156 return 1; 4157 } 4158 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE); 4159 4160 if (octeon_setup_output_queues(octeon_dev)) { 4161 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n"); 4162 return 1; 4163 } 4164 4165 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); 4166 4167 if (OCTEON_CN23XX_PF(octeon_dev)) { 4168 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) { 4169 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n"); 4170 return 1; 4171 } 4172 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE); 4173 4174 if (octeon_allocate_ioq_vector 4175 (octeon_dev, 4176 octeon_dev->sriov_info.num_pf_rings)) { 4177 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); 4178 return 1; 4179 } 4180 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE); 4181 4182 } else { 4183 /* The input and output queue registers were setup earlier (the 4184 * queues were not enabled). Any additional registers 4185 * that need to be programmed should be done now. 4186 */ 4187 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); 4188 if (ret) { 4189 dev_err(&octeon_dev->pci_dev->dev, 4190 "Failed to configure device registers\n"); 4191 return ret; 4192 } 4193 } 4194 4195 /* Initialize the tasklet that handles output queue packet processing.*/ 4196 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n"); 4197 tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh, 4198 (unsigned long)octeon_dev); 4199 4200 /* Setup the interrupt handler and record the INT SUM register address 4201 */ 4202 if (octeon_setup_interrupt(octeon_dev, 4203 octeon_dev->sriov_info.num_pf_rings)) 4204 return 1; 4205 4206 /* Enable Octeon device interrupts */ 4207 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); 4208 4209 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE); 4210 4211 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE 4212 * the output queue is enabled. 4213 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in 4214 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0. 4215 * Otherwise, it is possible that the DRV_ACTIVE message will be sent 4216 * before any credits have been issued, causing the ring to be reset 4217 * (and the f/w appear to never have started). 4218 */ 4219 for (j = 0; j < octeon_dev->num_oqs; j++) 4220 writel(octeon_dev->droq[j]->max_count, 4221 octeon_dev->droq[j]->pkts_credit_reg); 4222 4223 /* Enable the input and output queues for this Octeon device */ 4224 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); 4225 if (ret) { 4226 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues"); 4227 return ret; 4228 } 4229 4230 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE); 4231 4232 if (fw_state == FW_NEEDS_TO_BE_LOADED) { 4233 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); 4234 if (!ddr_timeout) { 4235 dev_info(&octeon_dev->pci_dev->dev, 4236 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); 4237 } 4238 4239 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); 4240 4241 /* Wait for the octeon to initialize DDR after the soft-reset.*/ 4242 while (!ddr_timeout) { 4243 set_current_state(TASK_INTERRUPTIBLE); 4244 if (schedule_timeout(HZ / 10)) { 4245 /* user probably pressed Control-C */ 4246 return 1; 4247 } 4248 } 4249 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); 4250 if (ret) { 4251 dev_err(&octeon_dev->pci_dev->dev, 4252 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", 4253 ret); 4254 return 1; 4255 } 4256 4257 if (octeon_wait_for_bootloader(octeon_dev, 1000)) { 4258 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n"); 4259 return 1; 4260 } 4261 4262 /* Divert uboot to take commands from host instead. */ 4263 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50); 4264 4265 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); 4266 ret = octeon_init_consoles(octeon_dev); 4267 if (ret) { 4268 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n"); 4269 return 1; 4270 } 4271 /* If console debug enabled, specify empty string to use default 4272 * enablement ELSE specify NULL string for 'disabled'. 4273 */ 4274 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL; 4275 ret = octeon_add_console(octeon_dev, 0, dbg_enb); 4276 if (ret) { 4277 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n"); 4278 return 1; 4279 } else if (octeon_console_debug_enabled(0)) { 4280 /* If console was added AND we're logging console output 4281 * then set our console print function. 4282 */ 4283 octeon_dev->console[0].print = octeon_dbg_console_print; 4284 } 4285 4286 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE); 4287 4288 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n"); 4289 ret = load_firmware(octeon_dev); 4290 if (ret) { 4291 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n"); 4292 return 1; 4293 } 4294 4295 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED); 4296 } 4297 4298 handshake[octeon_dev->octeon_id].init_ok = 1; 4299 complete(&handshake[octeon_dev->octeon_id].init); 4300 4301 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK); 4302 4303 return 0; 4304 } 4305 4306 /** 4307 * \brief Debug console print function 4308 * @param octeon_dev octeon device 4309 * @param console_num console number 4310 * @param prefix first portion of line to display 4311 * @param suffix second portion of line to display 4312 * 4313 * The OCTEON debug console outputs entire lines (excluding '\n'). 4314 * Normally, the line will be passed in the 'prefix' parameter. 4315 * However, due to buffering, it is possible for a line to be split into two 4316 * parts, in which case they will be passed as the 'prefix' parameter and 4317 * 'suffix' parameter. 4318 */ 4319 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num, 4320 char *prefix, char *suffix) 4321 { 4322 if (prefix && suffix) 4323 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix, 4324 suffix); 4325 else if (prefix) 4326 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix); 4327 else if (suffix) 4328 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix); 4329 4330 return 0; 4331 } 4332 4333 /** 4334 * \brief Exits the module 4335 */ 4336 static void __exit liquidio_exit(void) 4337 { 4338 liquidio_deinit_pci(); 4339 4340 pr_info("LiquidIO network module is now unloaded\n"); 4341 } 4342 4343 module_init(liquidio_init); 4344 module_exit(liquidio_exit); 4345